From 90f2c07391ab165c68a30e8592e9c98983a27145 Mon Sep 17 00:00:00 2001 From: Stanimal Date: Mon, 1 Nov 2021 10:25:26 +0400 Subject: [PATCH 01/11] fix: header sync must allow transition to archival/pruned if tip is behind --- .../sync/header_sync/synchronizer.rs | 37 +++++++++++++------ base_layer/core/src/chain_storage/async_db.rs | 2 + .../src/chain_storage/blockchain_backend.rs | 2 + .../src/chain_storage/blockchain_database.rs | 5 +++ .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 25 +++++++++++++ .../core/src/test_helpers/blockchain.rs | 4 ++ 6 files changed, 64 insertions(+), 11 deletions(-) diff --git a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs index 16c7c41494..c5a69a4ea0 100644 --- a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs @@ -246,22 +246,37 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { ); // Fetch the local tip header at the beginning of the sync process - let local_tip_header = self.db.fetch_tip_header().await?; + let local_tip_header = self.db.fetch_last_chain_header().await?; let local_total_accumulated_difficulty = local_tip_header.accumulated_data().total_accumulated_difficulty; + let header_tip_height = local_tip_header.height(); let sync_status = self .determine_sync_status(sync_peer, local_tip_header, &mut client) .await?; match sync_status { - SyncStatus::InSync => Err(BlockHeaderSyncError::PeerSentInaccurateChainMetadata { - claimed: sync_peer.claimed_chain_metadata().accumulated_difficulty(), - actual: None, - local: local_total_accumulated_difficulty, - }), - SyncStatus::WereAhead => Err(BlockHeaderSyncError::PeerSentInaccurateChainMetadata { - claimed: sync_peer.claimed_chain_metadata().accumulated_difficulty(), - actual: None, - local: local_total_accumulated_difficulty, - }), + SyncStatus::InSync | SyncStatus::WereAhead => { + let metadata = self.db.get_chain_metadata().await?; + if metadata.height_of_longest_chain() < header_tip_height { + debug!( + target: LOG_TARGET, + "Headers are in sync at height {} but tip is {}. Proceeding to archival/pruned block sync", + header_tip_height, + metadata.height_of_longest_chain() + ); + Ok(()) + } else { + debug!( + target: LOG_TARGET, + "Headers and block state are already in-sync (Header Tip: {}, Block tip: {})", + header_tip_height, + metadata.height_of_longest_chain() + ); + Err(BlockHeaderSyncError::PeerSentInaccurateChainMetadata { + claimed: sync_peer.claimed_chain_metadata().accumulated_difficulty(), + actual: None, + local: local_total_accumulated_difficulty, + }) + } + }, SyncStatus::Lagging(split_info) => { self.hooks.call_on_progress_header_hooks( split_info.local_tip_header.height(), diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index 0c1e743842..bf7c9b4a06 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -195,6 +195,8 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_last_header() -> BlockHeader, "fetch_last_header"); + make_async_fn!(fetch_last_chain_header() -> ChainHeader, "fetch_last_chain_header"); + make_async_fn!(fetch_tip_header() -> ChainHeader, "fetch_tip_header"); make_async_fn!(insert_valid_headers(headers: Vec) -> (), "insert_valid_headers"); diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index 8c4612806a..bd1a3acdef 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -133,6 +133,8 @@ pub trait BlockchainBackend: Send + Sync { fn orphan_count(&self) -> Result; /// Returns the stored header with the highest corresponding height. fn fetch_last_header(&self) -> Result; + /// Returns the stored header and accumulated data with the highest height. + fn fetch_last_chain_header(&self) -> Result; /// Returns the stored header with the highest corresponding height. fn fetch_tip_header(&self) -> Result; /// Returns the stored chain metadata. diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 4d7dbf5754..52c1ce2099 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -566,6 +566,11 @@ where B: BlockchainBackend db.fetch_last_header() } + pub fn fetch_last_chain_header(&self) -> Result { + let db = self.db_read_access()?; + db.fetch_last_chain_header() + } + /// Returns the sum of all kernels pub fn fetch_kernel_commitment_sum(&self, at_hash: &HashOutput) -> Result { Ok(self.fetch_block_accumulated_data(at_hash.clone())?.kernel_sum) diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index a4e3d4cea8..0b8e6333af 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -1942,6 +1942,31 @@ impl BlockchainBackend for LMDBDatabase { }) } + /// Finds and returns the last stored header. + fn fetch_last_chain_header(&self) -> Result { + let txn = self.read_transaction()?; + let header = self.fetch_last_header_in_txn(&txn)?.ok_or_else(|| { + ChainStorageError::InvalidOperation("Cannot fetch last header because database is empty".to_string()) + })?; + let height = header.height; + let accumulated_data = self + .fetch_header_accumulated_data_by_height(&txn, height)? + .ok_or_else(|| ChainStorageError::ValueNotFound { + entity: "BlockHeaderAccumulatedData", + field: "height", + value: height.to_string(), + })?; + + let chain_header = ChainHeader::try_construct(header, accumulated_data).ok_or_else(|| { + ChainStorageError::DataInconsistencyDetected { + function: "fetch_tip_header", + details: format!("Accumulated data mismatch at height #{}", height), + } + })?; + + Ok(chain_header) + } + fn fetch_tip_header(&self) -> Result { let txn = self.read_transaction()?; diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index 30c838cbf2..66afe677c9 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -314,6 +314,10 @@ impl BlockchainBackend for TempDatabase { self.db.as_ref().unwrap().fetch_last_header() } + fn fetch_last_chain_header(&self) -> Result { + self.db.as_ref().unwrap().fetch_last_chain_header() + } + fn fetch_tip_header(&self) -> Result { self.db.as_ref().unwrap().fetch_tip_header() } From e131408c29ad815c6a8d10eaf74022e3059a80d4 Mon Sep 17 00:00:00 2001 From: Stanimal Date: Mon, 1 Nov 2021 16:17:38 +0400 Subject: [PATCH 02/11] fix(pruned mode): prune inputs, keep track of kernel/utxo sum --- applications/tari_base_node/src/bootstrap.rs | 2 +- .../state_machine_service/initializer.rs | 2 +- .../state_machine_service/state_machine.rs | 10 +- .../states/block_sync.rs | 21 +- .../states/events_and_states.rs | 6 +- .../states/horizon_state_sync.rs | 37 +-- .../states/horizon_state_sync/error.rs | 9 +- .../horizon_state_synchronization.rs | 297 ++++++++++-------- .../base_node/sync/block_sync/synchronizer.rs | 31 +- .../sync/header_sync/synchronizer.rs | 4 +- .../core/src/blocks/accumulated_data.rs | 49 ++- base_layer/core/src/blocks/mod.rs | 1 + base_layer/core/src/chain_storage/async_db.rs | 27 +- .../src/chain_storage/blockchain_database.rs | 115 +++++-- .../core/src/chain_storage/db_transaction.rs | 78 ++--- base_layer/core/src/chain_storage/error.rs | 8 +- .../core/src/chain_storage/horizon_data.rs | 8 +- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 130 +++----- base_layer/core/src/mempool/config.rs | 11 +- .../core/src/transactions/aggregated_body.rs | 6 +- .../core/src/transactions/coinbase_builder.rs | 2 +- .../core/src/transactions/transaction.rs | 15 +- .../transaction_protocol/recipient.rs | 2 +- .../transaction_protocol/sender.rs | 22 +- .../transaction_protocol/single_receiver.rs | 5 +- base_layer/mmr/src/merkle_proof.rs | 12 +- common/src/build/protobuf.rs | 9 +- comms/src/test_utils/factories/net_address.rs | 11 +- 28 files changed, 453 insertions(+), 477 deletions(-) diff --git a/applications/tari_base_node/src/bootstrap.rs b/applications/tari_base_node/src/bootstrap.rs index dc551a6379..36ab592ac7 100644 --- a/applications/tari_base_node/src/bootstrap.rs +++ b/applications/tari_base_node/src/bootstrap.rs @@ -169,7 +169,7 @@ where B: BlockchainBackend + 'static orphan_db_clean_out_threshold: config.orphan_db_clean_out_threshold, max_randomx_vms: config.max_randomx_vms, blocks_behind_before_considered_lagging: self.config.blocks_behind_before_considered_lagging, - block_sync_validation_concurrency: num_cpus::get(), + sync_validation_concurrency: num_cpus::get(), ..Default::default() }, self.rules, diff --git a/base_layer/core/src/base_node/state_machine_service/initializer.rs b/base_layer/core/src/base_node/state_machine_service/initializer.rs index b3dea2f6af..019b2bf8f9 100644 --- a/base_layer/core/src/base_node/state_machine_service/initializer.rs +++ b/base_layer/core/src/base_node/state_machine_service/initializer.rs @@ -104,7 +104,7 @@ where B: BlockchainBackend + 'static rules.clone(), factories, config.bypass_range_proof_verification, - config.block_sync_validation_concurrency, + config.sync_validation_concurrency, ); let max_randomx_vms = config.max_randomx_vms; diff --git a/base_layer/core/src/base_node/state_machine_service/state_machine.rs b/base_layer/core/src/base_node/state_machine_service/state_machine.rs index ab534cda52..f2a4d4bc11 100644 --- a/base_layer/core/src/base_node/state_machine_service/state_machine.rs +++ b/base_layer/core/src/base_node/state_machine_service/state_machine.rs @@ -54,7 +54,7 @@ pub struct BaseNodeStateMachineConfig { pub max_randomx_vms: usize, pub blocks_behind_before_considered_lagging: u64, pub bypass_range_proof_verification: bool, - pub block_sync_validation_concurrency: usize, + pub sync_validation_concurrency: usize, } impl Default for BaseNodeStateMachineConfig { @@ -68,7 +68,7 @@ impl Default for BaseNodeStateMachineConfig { max_randomx_vms: 0, blocks_behind_before_considered_lagging: 0, bypass_range_proof_verification: false, - block_sync_validation_concurrency: 8, + sync_validation_concurrency: 8, } } } @@ -138,11 +138,11 @@ impl BaseNodeStateMachine { use self::{BaseNodeState::*, StateEvent::*, SyncStatus::*}; match (state, event) { (Starting(s), Initialized) => Listening(s.into()), - (HeaderSync(_), HeadersSynchronized(conn)) => { + (HeaderSync(_), HeadersSynchronized(sync_peer)) => { if self.config.pruning_horizon > 0 { - HorizonStateSync(states::HorizonStateSync::with_peer(conn)) + HorizonStateSync(states::HorizonStateSync::new(sync_peer)) } else { - BlockSync(states::BlockSync::with_peer(conn)) + BlockSync(states::BlockSync::new(sync_peer)) } }, (HeaderSync(s), HeaderSyncFailed) => Waiting(s.into()), diff --git a/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs b/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs index 96e2a1f2c6..620de2fc84 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs @@ -24,7 +24,7 @@ use crate::{ base_node::{ comms_interface::BlockEvent, state_machine_service::states::{BlockSyncInfo, HorizonStateSync, StateEvent, StateInfo, StatusInfo}, - sync::BlockSynchronizer, + sync::{BlockSynchronizer, SyncPeer}, BaseNodeStateMachine, }, chain_storage::{BlockAddResult, BlockchainBackend}, @@ -32,24 +32,19 @@ use crate::{ use log::*; use randomx_rs::RandomXFlag; use std::time::Instant; -use tari_comms::PeerConnection; const LOG_TARGET: &str = "c::bn::block_sync"; -#[derive(Debug, Default)] +#[derive(Debug)] pub struct BlockSync { - sync_peer: Option, + sync_peer: SyncPeer, is_synced: bool, } impl BlockSync { - pub fn new() -> Self { - Default::default() - } - - pub fn with_peer(sync_peer: PeerConnection) -> Self { + pub fn new(sync_peer: SyncPeer) -> Self { Self { - sync_peer: Some(sync_peer), + sync_peer, is_synced: false, } } @@ -62,7 +57,7 @@ impl BlockSync { shared.config.block_sync_config.clone(), shared.db.clone(), shared.connectivity.clone(), - self.sync_peer.take(), + self.sync_peer.clone(), shared.sync_validators.block_body.clone(), ); @@ -122,7 +117,7 @@ impl BlockSync { } impl From for BlockSync { - fn from(_: HorizonStateSync) -> Self { - BlockSync::new() + fn from(state: HorizonStateSync) -> Self { + BlockSync::new(state.sync_peer().clone()) } } diff --git a/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs b/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs index 080fd9df21..0b843b8c2f 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs @@ -36,7 +36,7 @@ use crate::base_node::{ use randomx_rs::RandomXFlag; use std::fmt::{Display, Error, Formatter}; use tari_common_types::chain_metadata::ChainMetadata; -use tari_comms::{peer_manager::NodeId, PeerConnection}; +use tari_comms::peer_manager::NodeId; #[derive(Debug)] pub enum BaseNodeState { @@ -54,7 +54,7 @@ pub enum BaseNodeState { #[derive(Debug, Clone, PartialEq)] pub enum StateEvent { Initialized, - HeadersSynchronized(PeerConnection), + HeadersSynchronized(SyncPeer), HeaderSyncFailed, HorizonStateSynchronized, HorizonStateSyncFailure, @@ -125,7 +125,7 @@ impl Display for StateEvent { match self { Initialized => f.write_str("Initialized"), BlocksSynchronized => f.write_str("Synchronised Blocks"), - HeadersSynchronized(conn) => write!(f, "Headers Synchronized from peer `{}`", conn.peer_node_id()), + HeadersSynchronized(sync_peer) => write!(f, "Headers Synchronized from peer `{}`", sync_peer), HeaderSyncFailed => f.write_str("Header Synchronization Failed"), HorizonStateSynchronized => f.write_str("Horizon State Synchronized"), HorizonStateSyncFailure => f.write_str("Horizon State Synchronization Failed"), diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs index c1b8412028..30222dac83 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs @@ -25,40 +25,41 @@ // TODO: Move the horizon synchronizer to the `sync` module -use log::*; - -pub use error::HorizonSyncError; -use horizon_state_synchronization::HorizonStateSynchronization; -use tari_comms::PeerConnection; +mod config; +pub use self::config::HorizonSyncConfig; -use crate::{base_node::BaseNodeStateMachine, chain_storage::BlockchainBackend, transactions::CryptoFactories}; +mod error; +mod horizon_state_synchronization; use super::{ events_and_states::{HorizonSyncInfo, HorizonSyncStatus}, StateEvent, StateInfo, }; - -pub use self::config::HorizonSyncConfig; - -mod config; - -mod error; - -mod horizon_state_synchronization; +use crate::{ + base_node::{sync::SyncPeer, BaseNodeStateMachine}, + chain_storage::BlockchainBackend, + transactions::CryptoFactories, +}; +use horizon_state_synchronization::HorizonStateSynchronization; +use log::*; const LOG_TARGET: &str = "c::bn::state_machine_service::states::horizon_state_sync"; #[derive(Clone, Debug)] pub struct HorizonStateSync { - sync_peer: PeerConnection, + sync_peer: SyncPeer, } impl HorizonStateSync { - pub fn with_peer(sync_peer: PeerConnection) -> Self { + pub fn new(sync_peer: SyncPeer) -> Self { Self { sync_peer } } + pub fn sync_peer(&self) -> &SyncPeer { + &self.sync_peer + } + pub async fn next_event( &mut self, shared: &mut BaseNodeStateMachine, @@ -83,12 +84,12 @@ impl HorizonStateSync { return StateEvent::HorizonStateSynchronized; } - let info = HorizonSyncInfo::new(vec![self.sync_peer.peer_node_id().clone()], HorizonSyncStatus::Starting); + let info = HorizonSyncInfo::new(vec![self.sync_peer.node_id().clone()], HorizonSyncStatus::Starting); shared.set_state_info(StateInfo::HorizonSync(info)); let prover = CryptoFactories::default().range_proof; let mut horizon_state = - HorizonStateSynchronization::new(shared, self.sync_peer.clone(), horizon_sync_height, &prover); + HorizonStateSynchronization::new(shared, self.sync_peer.clone(), horizon_sync_height, prover); match horizon_state.synchronize().await { Ok(()) => { diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/error.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/error.rs index 4669bdce09..6381602bc1 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/error.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/error.rs @@ -27,7 +27,10 @@ use crate::{ validation::ValidationError, }; use std::num::TryFromIntError; -use tari_comms::protocol::rpc::{RpcError, RpcStatus}; +use tari_comms::{ + connectivity::ConnectivityError, + protocol::rpc::{RpcError, RpcStatus}, +}; use tari_mmr::error::MerkleMountainRangeError; use thiserror::Error; use tokio::task; @@ -36,8 +39,6 @@ use tokio::task; pub enum HorizonSyncError { #[error("Peer sent an invalid response: {0}")] IncorrectResponse(String), - // #[error("Exceeded maximum sync attempts")] - // MaxSyncAttemptsReached, #[error("Chain storage error: {0}")] ChainStorageError(#[from] ChainStorageError), #[error("Comms interface error: {0}")] @@ -67,6 +68,8 @@ pub enum HorizonSyncError { ConversionError(String), #[error("MerkleMountainRangeError: {0}")] MerkleMountainRangeError(#[from] MerkleMountainRangeError), + #[error("Connectivity error: {0}")] + ConnectivityError(#[from] ConnectivityError), } impl From for HorizonSyncError { diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs index 7e10d59745..f8ce7afbf0 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs @@ -27,10 +27,10 @@ use crate::{ states::events_and_states::{HorizonSyncInfo, HorizonSyncStatus, StateInfo}, BaseNodeStateMachine, }, - sync::rpc, + sync::{rpc, SyncPeer}, }, - blocks::BlockHeader, - chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainStorageError, MmrTree, PrunedOutput}, + blocks::{BlockHeader, UpdateBlockAccumulatedData}, + chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainStorageError, MmrTree}, proto::base_node::{ sync_utxo as proto_sync_utxo, sync_utxos_response::UtxoOrDeleted, @@ -42,37 +42,39 @@ use crate::{ transactions::transaction::{TransactionKernel, TransactionOutput}, }; use croaring::Bitmap; -use futures::StreamExt; +use futures::{stream::FuturesUnordered, StreamExt}; use log::*; use std::{ + cmp, convert::{TryFrom, TryInto}, + mem, sync::Arc, + time::Instant, }; -use tari_common_types::types::{HashDigest, RangeProofService}; -use tari_comms::PeerConnection; -use tari_crypto::{ - commitment::HomomorphicCommitment, - tari_utilities::{hex::Hex, Hashable}, -}; +use tari_common_types::types::{Commitment, HashDigest, RangeProofService}; +use tari_crypto::tari_utilities::{hex::Hex, Hashable}; use tari_mmr::{MerkleMountainRange, MutableMmr}; +use tokio::task; const LOG_TARGET: &str = "c::bn::state_machine_service::states::horizon_state_sync"; pub struct HorizonStateSynchronization<'a, B: BlockchainBackend> { shared: &'a mut BaseNodeStateMachine, - sync_peer: PeerConnection, + sync_peer: SyncPeer, horizon_sync_height: u64, - prover: &'a RangeProofService, + prover: Arc, num_kernels: u64, num_outputs: u64, + kernel_sum: Commitment, + utxo_sum: Commitment, } impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { pub fn new( shared: &'a mut BaseNodeStateMachine, - sync_peer: PeerConnection, + sync_peer: SyncPeer, horizon_sync_height: u64, - prover: &'a RangeProofService, + prover: Arc, ) -> Self { Self { shared, @@ -81,6 +83,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { prover, num_kernels: 0, num_outputs: 0, + kernel_sum: Default::default(), + utxo_sum: Default::default(), } } @@ -89,6 +93,13 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { target: LOG_TARGET, "Preparing database for horizon sync to height (#{})", self.horizon_sync_height ); + let mut connection = self + .shared + .connectivity + .dial_peer(self.sync_peer.node_id().clone()) + .await?; + let mut client = connection.connect_rpc::().await?; + let header = self.db().fetch_header(self.horizon_sync_height).await?.ok_or_else(|| { ChainStorageError::ValueNotFound { entity: "Header", @@ -97,8 +108,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } })?; - let mut client = self.sync_peer.connect_rpc::().await?; - match self.begin_sync(&mut client, &header).await { Ok(_) => match self.finalize_horizon_sync().await { Ok(_) => Ok(()), @@ -119,6 +128,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { client: &mut rpc::BaseNodeSyncRpcClient, to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { + debug!(target: LOG_TARGET, "Initializing"); + self.initialize().await?; debug!(target: LOG_TARGET, "Synchronizing kernels"); self.synchronize_kernels(client, to_header).await?; debug!(target: LOG_TARGET, "Synchronizing outputs"); @@ -126,12 +137,45 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(()) } + async fn initialize(&mut self) -> Result<(), HorizonSyncError> { + let local_metadata = self.db().get_chain_metadata().await?; + let remote_metadata = self.sync_peer.claimed_chain_metadata(); + + // If the target pruning horizon is greater than our current tip, prune the blockchain to our current tip + // and continue from there. This will update the horizon data accordingly. + let target_pruning_horizon = local_metadata.horizon_block(remote_metadata.height_of_longest_chain()); + if target_pruning_horizon >= local_metadata.height_of_longest_chain() { + info!( + target: LOG_TARGET, + "Target horizon height {} is past the current tip height {}. Pruning blockchain to tip", + target_pruning_horizon, + local_metadata.height_of_longest_chain() + ); + self.db() + .prune_to_height(local_metadata.height_of_longest_chain()) + .await?; + } + + let data = self.db().fetch_horizon_data().await?; + debug!( + target: LOG_TARGET, + "Loaded from horizon data utxo_sum = {}, kernel_sum = {}", + data.utxo_sum().to_hex(), + data.kernel_sum().to_hex(), + ); + self.utxo_sum = data.utxo_sum().clone(); + self.kernel_sum = data.kernel_sum().clone(); + + Ok(()) + } + async fn synchronize_kernels( &mut self, client: &mut rpc::BaseNodeSyncRpcClient, to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { let local_num_kernels = self.db().fetch_mmr_size(MmrTree::Kernel).await?; + let metadata = self.db().get_chain_metadata().await?; let remote_num_kernels = to_header.kernel_mmr_size; self.num_kernels = remote_num_kernels; @@ -142,7 +186,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } let info = HorizonSyncInfo::new( - vec![self.sync_peer.peer_node_id().clone()], + vec![self.sync_peer.node_id().clone()], HorizonSyncStatus::Kernels(local_num_kernels, remote_num_kernels), ); self.shared.set_state_info(StateInfo::HorizonSync(info)); @@ -158,9 +202,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let latency = client.get_last_request_latency().await?; debug!( target: LOG_TARGET, - "Initiating kernel sync with peer `{}` `{}` (latency = {}ms)", - self.sync_peer.peer_node_id(), - self.sync_peer.address(), + "Initiating kernel sync with peer `{}` (latency = {}ms)", + self.sync_peer.node_id(), latency.unwrap_or_default().as_millis() ); @@ -192,6 +235,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { .map_err(HorizonSyncError::InvalidKernelSignature)?; kernels.push(kernel.clone()); + self.kernel_sum = &self.kernel_sum + &kernel.excess; txn.insert_kernel_via_horizon_sync(kernel, current_header.hash().clone(), mmr_position as u32); if mmr_position == current_header.header().kernel_mmr_size - 1 { debug!( @@ -221,13 +265,24 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { }); } - txn.update_pruned_hash_set( - MmrTree::Kernel, + let kernel_hash_set = kernel_mmr.get_pruned_hash_set()?; + debug!( + target: LOG_TARGET, + "Updating block data at height {}", + current_header.height() + ); + txn.update_block_accumulated_data_via_horizon_sync( current_header.hash().clone(), - kernel_mmr.get_pruned_hash_set()?, + UpdateBlockAccumulatedData { + kernel_sum: Some(self.kernel_sum.clone()), + kernel_hash_set: Some(kernel_hash_set), + ..Default::default() + }, ); + txn.set_pruned_height(metadata.pruned_height(), self.kernel_sum.clone(), self.utxo_sum.clone()); txn.commit().await?; + if mmr_position < end - 1 { current_header = db.fetch_chain_header(current_header.height() + 1).await?; } @@ -236,7 +291,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { if mmr_position % 100 == 0 || mmr_position == self.num_kernels { let info = HorizonSyncInfo::new( - vec![self.sync_peer.peer_node_id().clone()], + vec![self.sync_peer.node_id().clone()], HorizonSyncStatus::Kernels(mmr_position, self.num_kernels), ); self.shared.set_state_info(StateInfo::HorizonSync(info)); @@ -258,6 +313,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ) -> Result<(), HorizonSyncError> { let local_num_outputs = self.db().fetch_mmr_size(MmrTree::Utxo).await?; + let metadata = self.db().get_chain_metadata().await?; + let remote_num_outputs = to_header.output_mmr_size; self.num_outputs = remote_num_outputs; @@ -267,7 +324,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } let info = HorizonSyncInfo::new( - vec![self.sync_peer.peer_node_id().clone()], + vec![self.sync_peer.node_id().clone()], HorizonSyncStatus::Outputs(local_num_outputs, self.num_outputs), ); self.shared.set_state_info(StateInfo::HorizonSync(info)); @@ -288,7 +345,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { debug!( target: LOG_TARGET, "Initiating output sync with peer `{}` (latency = {}ms)", - self.sync_peer.peer_node_id(), + self.sync_peer.node_id(), latency.unwrap_or_default().as_millis() ); @@ -318,14 +375,16 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let mut mmr_position = start; let mut height_utxo_counter = 0u64; let mut height_txo_counter = 0u64; + let mut timer = Instant::now(); let block_data = db .fetch_block_accumulated_data(current_header.header().prev_hash.clone()) .await?; - let (_, output_pruned_set, rp_pruned_set, mut full_bitmap) = block_data.dissolve(); + let (_, output_pruned_set, witness_pruned_set, _) = block_data.dissolve(); + let mut full_bitmap = self.db().fetch_deleted_bitmap_at_tip().await?.into_bitmap(); let mut output_mmr = MerkleMountainRange::::new(output_pruned_set); - let mut witness_mmr = MerkleMountainRange::::new(rp_pruned_set); + let mut witness_mmr = MerkleMountainRange::::new(witness_pruned_set); while let Some(response) = output_stream.next().await { let res: SyncUtxosResponse = response?; @@ -356,6 +415,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { output_hashes.push(output.hash()); witness_hashes.push(output.witness_hash()); unpruned_outputs.push(output.clone()); + self.utxo_sum = &self.utxo_sum + &output.commitment; txn.insert_output_via_horizon_sync( output, current_header.hash().clone(), @@ -394,18 +454,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ))); } - debug!( - target: LOG_TARGET, - "UTXO: {} (Header #{}), added {} utxos, added {} txos", - mmr_position, - current_header.height(), - height_utxo_counter, - height_txo_counter - ); - - height_txo_counter = 0; - height_utxo_counter = 0; - // Validate root for hash in output_hashes.drain(..) { output_mmr.push(hash)?; @@ -415,8 +463,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { witness_mmr.push(hash)?; } - // Check that the difference bitmap is excessively large. Bitmap::deserialize panics if greater than - // isize::MAX, however isize::MAX is still an inordinate amount of data. An + // Check that the difference bitmap isn't excessively large. Bitmap::deserialize panics if greater + // than isize::MAX, however isize::MAX is still an inordinate amount of data. An // arbitrary 4 MiB limit is used. const MAX_DIFF_BITMAP_BYTE_LEN: usize = 4 * 1024 * 1024; if diff_bitmap.len() > MAX_DIFF_BITMAP_BYTE_LEN { @@ -425,14 +473,14 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { peer {}", diff_bitmap.len(), MAX_DIFF_BITMAP_BYTE_LEN, - self.sync_peer.peer_node_id() + self.sync_peer.node_id() ))); } let diff_bitmap = Bitmap::try_deserialize(&diff_bitmap).ok_or_else(|| { HorizonSyncError::IncorrectResponse(format!( "Peer {} sent an invalid difference bitmap", - self.sync_peer.peer_node_id() + self.sync_peer.node_id() )) })?; @@ -464,23 +512,38 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { }); } - // Validate rangeproofs if the MMR matches - for o in unpruned_outputs.drain(..) { - o.verify_range_proof(self.prover) - .map_err(|err| HorizonSyncError::InvalidRangeProof(o.hash().to_hex(), err.to_string()))?; - } + self.validate_rangeproofs(mem::take(&mut unpruned_outputs)).await?; txn.update_deleted_bitmap(diff_bitmap.clone()); - txn.update_pruned_hash_set(MmrTree::Utxo, current_header.hash().clone(), pruned_output_set); - txn.update_pruned_hash_set( - MmrTree::Witness, + + let witness_hash_set = witness_mmr.get_pruned_hash_set()?; + txn.update_block_accumulated_data_via_horizon_sync( current_header.hash().clone(), - witness_mmr.get_pruned_hash_set()?, + UpdateBlockAccumulatedData { + utxo_sum: Some(self.utxo_sum.clone()), + utxo_hash_set: Some(pruned_output_set), + witness_hash_set: Some(witness_hash_set), + deleted_diff: Some(diff_bitmap.into()), + ..Default::default() + }, ); - txn.update_block_accumulated_data_with_deleted_diff(current_header.hash().clone(), diff_bitmap); - + txn.set_pruned_height(metadata.pruned_height(), self.kernel_sum.clone(), self.utxo_sum.clone()); txn.commit().await?; + debug!( + target: LOG_TARGET, + "UTXO: {}, Header #{}, added {} utxos, added {} txos in {:.2?}", + mmr_position, + current_header.height(), + height_utxo_counter, + height_txo_counter, + timer.elapsed() + ); + + height_txo_counter = 0; + height_utxo_counter = 0; + timer = Instant::now(); + current_header = db.fetch_chain_header(current_header.height() + 1).await?; debug!( target: LOG_TARGET, @@ -498,7 +561,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { if mmr_position % 100 == 0 || mmr_position == self.num_outputs { let info = HorizonSyncInfo::new( - vec![self.sync_peer.peer_node_id().clone()], + vec![self.sync_peer.node_id().clone()], HorizonSyncStatus::Outputs(mmr_position, self.num_outputs), ); self.shared.set_state_info(StateInfo::HorizonSync(info)); @@ -513,104 +576,60 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(()) } + async fn validate_rangeproofs(&self, mut unpruned_outputs: Vec) -> Result<(), HorizonSyncError> { + let concurrency = self.shared.config.sync_validation_concurrency; + let mut chunk_size = unpruned_outputs.len() / concurrency; + if unpruned_outputs.len() % concurrency > 0 { + chunk_size += 1; + } + // Validate rangeproofs in parallel + let mut tasks = (0..concurrency) + .map(|_| { + let end = cmp::min(unpruned_outputs.len(), chunk_size); + unpruned_outputs.drain(..end).collect::>() + }) + .map(|chunk| { + let prover = self.prover.clone(); + task::spawn_blocking(move || -> Result<(), HorizonSyncError> { + for o in chunk { + o.verify_range_proof(&prover) + .map_err(|err| HorizonSyncError::InvalidRangeProof(o.hash().to_hex(), err.to_string()))?; + } + Ok(()) + }) + }) + .collect::>(); + + while let Some(result) = tasks.next().await { + result??; + } + Ok(()) + } + // Finalize the horizon state synchronization by setting the chain metadata to the local tip and committing // the horizon state to the blockchain backend. async fn finalize_horizon_sync(&mut self) -> Result<(), HorizonSyncError> { debug!(target: LOG_TARGET, "Validating horizon state"); - let info = HorizonSyncInfo::new( - vec![self.sync_peer.peer_node_id().clone()], + self.shared.set_state_info(StateInfo::HorizonSync(HorizonSyncInfo::new( + vec![self.sync_peer.node_id().clone()], HorizonSyncStatus::Finalizing, - ); - self.shared.set_state_info(StateInfo::HorizonSync(info)); + ))); let header = self.db().fetch_chain_header(self.horizon_sync_height).await?; - let mut pruned_utxo_sum = HomomorphicCommitment::default(); - let mut pruned_kernel_sum = HomomorphicCommitment::default(); - - let mut prev_mmr = 0; - let mut prev_kernel_mmr = 0; - let bitmap = Arc::new( - self.db() - .fetch_complete_deleted_bitmap_at(header.hash().clone()) - .await? - .into_bitmap(), - ); - let expected_prev_best_block = self.shared.db.get_chain_metadata().await?.best_block().clone(); - for h in 0..=header.height() { - let curr_header = self.db().fetch_chain_header(h).await?; - - trace!( - target: LOG_TARGET, - "Fetching utxos from db: height:{}, header.output_mmr:{}, prev_mmr:{}, end:{}", - curr_header.height(), - curr_header.header().output_mmr_size, - prev_mmr, - curr_header.header().output_mmr_size - 1 - ); - let (utxos, _) = self - .db() - .fetch_utxos_by_mmr_position(prev_mmr, curr_header.header().output_mmr_size - 1, bitmap.clone()) - .await?; - trace!( - target: LOG_TARGET, - "Fetching kernels from db: height:{}, header.kernel_mmr:{}, prev_mmr:{}, end:{}", - curr_header.height(), - curr_header.header().kernel_mmr_size, - prev_kernel_mmr, - curr_header.header().kernel_mmr_size - 1 - ); - let kernels = self - .db() - .fetch_kernels_by_mmr_position(prev_kernel_mmr, curr_header.header().kernel_mmr_size - 1) - .await?; - - let mut utxo_sum = HomomorphicCommitment::default(); - debug!(target: LOG_TARGET, "Number of kernels returned: {}", kernels.len()); - debug!(target: LOG_TARGET, "Number of utxos returned: {}", utxos.len()); - let mut prune_counter = 0; - for u in utxos { - match u { - PrunedOutput::NotPruned { output } => { - utxo_sum = &output.commitment + &utxo_sum; - }, - _ => { - prune_counter += 1; - }, - } - } - if prune_counter > 0 { - debug!(target: LOG_TARGET, "Pruned {} outputs", prune_counter); - } - prev_mmr = curr_header.header().output_mmr_size; - - pruned_utxo_sum = &utxo_sum + &pruned_utxo_sum; - - for k in kernels { - pruned_kernel_sum = &k.excess + &pruned_kernel_sum; - } - prev_kernel_mmr = curr_header.header().kernel_mmr_size; - - trace!( - target: LOG_TARGET, - "Height: {} Kernel sum:{:?} Pruned UTXO sum: {:?}", - h, - pruned_kernel_sum, - pruned_utxo_sum - ); - } self.shared .sync_validators .final_horizon_state .validate( - &*self.db().clone().into_inner().db_read_access()?, + &*self.db().inner().db_read_access()?, header.height(), - &pruned_utxo_sum, - &pruned_kernel_sum, + &self.utxo_sum, + &self.kernel_sum, ) .map_err(HorizonSyncError::FinalStateValidationFailed)?; + let metadata = self.db().get_chain_metadata().await?; info!( target: LOG_TARGET, "Horizon state validation succeeded! Committing horizon state." @@ -621,9 +640,9 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { header.height(), header.hash().clone(), header.accumulated_data().total_accumulated_difficulty, - expected_prev_best_block, + metadata.best_block().clone(), ) - .set_pruned_height(header.height(), pruned_kernel_sum, pruned_utxo_sum) + .set_pruned_height(header.height(), self.kernel_sum.clone(), self.utxo_sum.clone()) .commit() .await?; diff --git a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs index 7cf95f2d4f..b17da13bc3 100644 --- a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs @@ -23,7 +23,7 @@ use super::error::BlockSyncError; use crate::{ base_node::{ - sync::{hooks::Hooks, rpc}, + sync::{hooks::Hooks, rpc, SyncPeer}, BlockSyncConfig, }, blocks::{Block, ChainBlock}, @@ -41,11 +41,7 @@ use std::{ sync::Arc, time::{Duration, Instant}, }; -use tari_comms::{ - connectivity::{ConnectivityRequester, ConnectivitySelection}, - peer_manager::NodeId, - PeerConnection, -}; +use tari_comms::{connectivity::ConnectivityRequester, peer_manager::NodeId, PeerConnection}; use tracing; const LOG_TARGET: &str = "c::bn::block_sync"; @@ -54,7 +50,7 @@ pub struct BlockSynchronizer { config: BlockSyncConfig, db: AsyncBlockchainDb, connectivity: ConnectivityRequester, - sync_peer: Option, + sync_peer: SyncPeer, block_validator: Arc, hooks: Hooks, } @@ -64,7 +60,7 @@ impl BlockSynchronizer { config: BlockSyncConfig, db: AsyncBlockchainDb, connectivity: ConnectivityRequester, - sync_peer: Option, + sync_peer: SyncPeer, block_validator: Arc, ) -> Self { Self { @@ -89,7 +85,7 @@ impl BlockSynchronizer { #[tracing::instrument(skip(self), err)] pub async fn synchronize(&mut self) -> Result<(), BlockSyncError> { - let peer_conn = self.get_next_sync_peer().await?; + let peer_conn = self.connect_to_sync_peer().await?; let node_id = peer_conn.peer_node_id().clone(); info!( target: LOG_TARGET, @@ -108,20 +104,9 @@ impl BlockSynchronizer { } } - async fn get_next_sync_peer(&mut self) -> Result { - match self.sync_peer { - Some(ref peer) => Ok(peer.clone()), - None => { - let mut peers = self - .connectivity - .select_connections(ConnectivitySelection::random_nodes(1, vec![])) - .await?; - if peers.is_empty() { - return Err(BlockSyncError::NoSyncPeers); - } - Ok(peers.remove(0)) - }, - } + async fn connect_to_sync_peer(&mut self) -> Result { + let connection = self.connectivity.dial_peer(self.sync_peer.node_id().clone()).await?; + Ok(connection) } async fn attempt_block_sync(&mut self, mut conn: PeerConnection) -> Result<(), BlockSyncError> { diff --git a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs index c5a69a4ea0..a332a53420 100644 --- a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs @@ -97,7 +97,7 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { self.hooks.add_on_rewind_hook(hook); } - pub async fn synchronize(&mut self) -> Result { + pub async fn synchronize(&mut self) -> Result { debug!(target: LOG_TARGET, "Starting header sync.",); self.hooks.call_on_starting_hook(); @@ -115,7 +115,7 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { "Attempting to synchronize headers with `{}`", node_id ); match self.attempt_sync(&sync_peer, peer_conn.clone()).await { - Ok(()) => return Ok(peer_conn), + Ok(()) => return Ok(sync_peer), // Try another peer Err(err @ BlockHeaderSyncError::NotInSync) => { warn!(target: LOG_TARGET, "{}", err); diff --git a/base_layer/core/src/blocks/accumulated_data.rs b/base_layer/core/src/blocks/accumulated_data.rs index c14f494c2e..7ba435dfd0 100644 --- a/base_layer/core/src/blocks/accumulated_data.rs +++ b/base_layer/core/src/blocks/accumulated_data.rs @@ -49,29 +49,32 @@ use tari_mmr::{pruned_hashset::PrunedHashSet, ArrayLike}; const LOG_TARGET: &str = "c::bn::acc_data"; -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, Clone)] pub struct BlockAccumulatedData { pub(crate) kernels: PrunedHashSet, pub(crate) outputs: PrunedHashSet, + pub(crate) witness: PrunedHashSet, pub(crate) deleted: DeletedBitmap, - pub(crate) range_proofs: PrunedHashSet, - pub(crate) kernel_sum: Commitment, + pub(crate) cumulative_kernel_sum: Commitment, + pub(crate) cumulative_utxo_sum: Commitment, } impl BlockAccumulatedData { pub fn new( kernels: PrunedHashSet, outputs: PrunedHashSet, - range_proofs: PrunedHashSet, + witness: PrunedHashSet, deleted: Bitmap, - total_kernel_sum: Commitment, + cumulative_kernel_sum: Commitment, + cumulative_utxo_sum: Commitment, ) -> Self { Self { kernels, outputs, - range_proofs, + witness, deleted: DeletedBitmap { deleted }, - kernel_sum: total_kernel_sum, + cumulative_kernel_sum, + cumulative_utxo_sum, } } @@ -79,12 +82,21 @@ impl BlockAccumulatedData { &self.deleted.deleted } + pub fn set_deleted(&mut self, deleted: DeletedBitmap) -> &mut Self { + self.deleted = deleted; + self + } + pub fn dissolve(self) -> (PrunedHashSet, PrunedHashSet, PrunedHashSet, Bitmap) { - (self.kernels, self.outputs, self.range_proofs, self.deleted.deleted) + (self.kernels, self.outputs, self.witness, self.deleted.deleted) } - pub fn kernel_sum(&self) -> &Commitment { - &self.kernel_sum + pub fn cumulative_kernel_sum(&self) -> &Commitment { + &self.cumulative_kernel_sum + } + + pub fn cumulative_utxo_sum(&self) -> &Commitment { + &self.cumulative_utxo_sum } } @@ -96,8 +108,9 @@ impl Default for BlockAccumulatedData { deleted: DeletedBitmap { deleted: Bitmap::create(), }, - range_proofs: Default::default(), - kernel_sum: Default::default(), + witness: Default::default(), + cumulative_kernel_sum: Default::default(), + cumulative_utxo_sum: Default::default(), } } } @@ -110,11 +123,21 @@ impl Display for BlockAccumulatedData { self.outputs.len().unwrap_or(0), self.deleted.deleted.cardinality(), self.kernels.len().unwrap_or(0), - self.range_proofs.len().unwrap_or(0) + self.witness.len().unwrap_or(0) ) } } +#[derive(Debug, Clone, Default)] +pub struct UpdateBlockAccumulatedData { + pub kernel_hash_set: Option, + pub utxo_hash_set: Option, + pub witness_hash_set: Option, + pub deleted_diff: Option, + pub utxo_sum: Option, + pub kernel_sum: Option, +} + /// Wrapper struct to serialize and deserialize Bitmap #[derive(Debug, Clone)] pub struct DeletedBitmap { diff --git a/base_layer/core/src/blocks/mod.rs b/base_layer/core/src/blocks/mod.rs index 19a49c3071..3b7eb851f1 100644 --- a/base_layer/core/src/blocks/mod.rs +++ b/base_layer/core/src/blocks/mod.rs @@ -30,6 +30,7 @@ pub use accumulated_data::{ ChainHeader, CompleteDeletedBitmap, DeletedBitmap, + UpdateBlockAccumulatedData, }; mod error; diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index bf7c9b4a06..d07925e225 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -32,6 +32,7 @@ use crate::{ DeletedBitmap, HistoricalBlock, NewBlockTemplate, + UpdateBlockAccumulatedData, }, chain_storage::{ blockchain_database::MmrRoots, @@ -61,7 +62,6 @@ use tari_common_types::{ chain_metadata::ChainMetadata, types::{BlockHash, Commitment, HashOutput, Signature}, }; -use tari_mmr::pruned_hashset::PrunedHashSet; const LOG_TARGET: &str = "c::bn::async_db"; @@ -145,7 +145,7 @@ impl AsyncBlockchainDb { //---------------------------------- Metadata --------------------------------------------// make_async_fn!(get_chain_metadata() -> ChainMetadata, "get_chain_metadata"); - make_async_fn!(fetch_horizon_data() -> Option, "fetch_horizon_data"); + make_async_fn!(fetch_horizon_data() -> HorizonData, "fetch_horizon_data"); //---------------------------------- TXO --------------------------------------------// make_async_fn!(fetch_utxo(hash: HashOutput) -> Option, "fetch_utxo"); @@ -228,6 +228,8 @@ impl AsyncBlockchainDb { //---------------------------------- Misc. --------------------------------------------// + make_async_fn!(prune_to_height(height: u64) -> (), "prune_to_height"); + make_async_fn!(rewind_to_height(height: u64) -> Vec>, "rewind_to_height"); make_async_fn!(rewind_to_hash(hash: BlockHash) -> Vec>, "rewind_to_hash"); @@ -280,11 +282,11 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { &mut self, height: u64, hash: HashOutput, - accumulated_data: u128, + accumulated_difficulty: u128, expected_prev_best_block: HashOutput, ) -> &mut Self { self.transaction - .set_best_block(height, hash, accumulated_data, expected_prev_best_block); + .set_best_block(height, hash, accumulated_difficulty, expected_prev_best_block); self } @@ -328,23 +330,12 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { self } - pub fn update_pruned_hash_set( - &mut self, - mmr_tree: MmrTree, - header_hash: HashOutput, - pruned_hash_set: PrunedHashSet, - ) -> &mut Self { - self.transaction - .update_pruned_hash_set(mmr_tree, header_hash, pruned_hash_set); - self - } - - pub fn update_block_accumulated_data_with_deleted_diff( + pub fn update_block_accumulated_data_via_horizon_sync( &mut self, header_hash: HashOutput, - deleted: Bitmap, + values: UpdateBlockAccumulatedData, ) -> &mut Self { - self.transaction.update_deleted_with_diff(header_hash, deleted); + self.transaction.update_block_accumulated_data(header_hash, values); self } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 52c1ce2099..1178a9ccae 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -31,6 +31,7 @@ use crate::{ DeletedBitmap, HistoricalBlock, NewBlockTemplate, + UpdateBlockAccumulatedData, }, chain_storage::{ consts::{ @@ -208,8 +209,30 @@ where B: BlockchainBackend }; if is_empty { info!(target: LOG_TARGET, "Blockchain db is empty. Adding genesis block."); - let genesis_block = blockchain_db.consensus_manager.get_genesis_block(); - blockchain_db.insert_block(Arc::new(genesis_block))?; + let genesis_block = Arc::new(blockchain_db.consensus_manager.get_genesis_block()); + blockchain_db.insert_block(genesis_block.clone())?; + let mut txn = DbTransaction::new(); + let utxo_sum = genesis_block + .block() + .body + .outputs() + .iter() + .map(|k| &k.commitment) + .sum::(); + let kernel_sum = genesis_block + .block() + .body + .kernels() + .iter() + .map(|k| &k.excess) + .sum::(); + txn.update_block_accumulated_data(genesis_block.hash().clone(), UpdateBlockAccumulatedData { + utxo_sum: Some(utxo_sum.clone()), + kernel_sum: Some(kernel_sum.clone()), + ..Default::default() + }); + txn.set_pruned_height(0, kernel_sum, utxo_sum); + blockchain_db.write(txn)?; blockchain_db.store_pruning_horizon(config.pruning_horizon)?; } if cleanup_orphans_at_startup { @@ -573,7 +596,10 @@ where B: BlockchainBackend /// Returns the sum of all kernels pub fn fetch_kernel_commitment_sum(&self, at_hash: &HashOutput) -> Result { - Ok(self.fetch_block_accumulated_data(at_hash.clone())?.kernel_sum) + Ok(self + .fetch_block_accumulated_data(at_hash.clone())? + .cumulative_kernel_sum() + .clone()) } /// Returns `n` hashes from height _h - offset_ where _h_ is the tip header height back to `h - n - offset`. @@ -869,6 +895,12 @@ where B: BlockchainBackend store_pruning_horizon(&mut *db, pruning_horizon) } + /// Prunes the blockchain up to and including the given height + pub fn prune_to_height(&self, height: u64) -> Result<(), ChainStorageError> { + let mut db = self.db_write_access()?; + prune_to_height(&mut *db, height) + } + /// Fetch a block from the blockchain database. /// /// # Returns @@ -972,9 +1004,9 @@ where B: BlockchainBackend rewind_to_hash(&mut *db, hash) } - pub fn fetch_horizon_data(&self) -> Result, ChainStorageError> { + pub fn fetch_horizon_data(&self) -> Result { let db = self.db_read_access()?; - db.fetch_horizon_data() + Ok(db.fetch_horizon_data()?.unwrap_or_default()) } pub fn fetch_complete_deleted_bitmap_at( @@ -1071,7 +1103,7 @@ pub fn calculate_mmr_roots(db: &T, block: &Block) -> Resul let BlockAccumulatedData { kernels, outputs, - range_proofs, + witness: range_proofs, .. } = db .fetch_block_accumulated_data(&header.prev_hash)? @@ -2034,6 +2066,7 @@ fn cleanup_orphans(db: &mut T, orphan_storage_capacity: us db.delete_oldest_orphans(horizon_height, orphan_storage_capacity) } + fn prune_database_if_needed( db: &mut T, pruning_horizon: u64, @@ -2055,34 +2088,60 @@ fn prune_database_if_needed( pruning_interval, ); if metadata.pruned_height() < abs_pruning_horizon.saturating_sub(pruning_interval) { - let last_pruned = metadata.pruned_height(); - info!( - target: LOG_TARGET, - "Pruning blockchain database at height {} (was={})", abs_pruning_horizon, last_pruned, - ); - let mut last_block = db.fetch_block_accumulated_data_by_height(last_pruned).or_not_found( + prune_to_height(db, abs_pruning_horizon - 1)?; + } + + Ok(()) +} + +fn prune_to_height(db: &mut T, target_horizon_height: u64) -> Result<(), ChainStorageError> { + let metadata = db.fetch_chain_metadata()?; + let last_pruned = metadata.pruned_height(); + if target_horizon_height < last_pruned { + return Err(ChainStorageError::InvalidArguments { + func: "prune_to_block", + arg: "target_horizon_height", + message: format!( + "Target pruning horizon {} is less than current pruning horizon {}", + target_horizon_height, last_pruned + ), + }); + } + + info!( + target: LOG_TARGET, + "Pruning blockchain database at height {} (was={})", target_horizon_height, last_pruned, + ); + let mut last_block = db.fetch_block_accumulated_data_by_height(last_pruned).or_not_found( + "BlockAccumulatedData", + "height", + last_pruned.to_string(), + )?; + let mut txn = DbTransaction::new(); + for block_to_prune in (last_pruned + 1)..=target_horizon_height { + let header = db.fetch_chain_header_by_height(block_to_prune)?; + let curr_block = db.fetch_block_accumulated_data_by_height(block_to_prune).or_not_found( "BlockAccumulatedData", "height", - last_pruned.to_string(), + block_to_prune.to_string(), )?; - let mut txn = DbTransaction::new(); - for block_to_prune in (last_pruned + 1)..abs_pruning_horizon { - let curr_block = db.fetch_block_accumulated_data_by_height(block_to_prune).or_not_found( - "BlockAccumulatedData", - "height", - block_to_prune.to_string(), - )?; - // Note, this could actually be done in one step instead of each block, since deleted is - // accumulated - let inputs_to_prune = curr_block.deleted.bitmap().clone() - last_block.deleted.bitmap(); - last_block = curr_block; - - txn.prune_outputs_and_update_horizon(inputs_to_prune.to_vec(), block_to_prune); - } + // Note, this could actually be done in one step instead of each block, since deleted is + // accumulated + let output_mmr_positions = curr_block.deleted() - last_block.deleted(); + last_block = curr_block; - db.write(txn)?; + txn.prune_outputs_at_positions(output_mmr_positions.to_vec()); + txn.delete_all_inputs_in_block(header.hash().clone()); } + txn.set_pruned_height( + target_horizon_height, + last_block.cumulative_kernel_sum().clone(), + last_block.cumulative_utxo_sum().clone(), + ); + // TODO: prune block accumulated data + + db.write(txn)?; Ok(()) } diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index 970988d655..01e7bcb78d 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -20,8 +20,8 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::{ - blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader}, - chain_storage::{error::ChainStorageError, MmrTree}, + blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader, UpdateBlockAccumulatedData}, + chain_storage::error::ChainStorageError, transactions::transaction::{TransactionKernel, TransactionOutput}, }; use croaring::Bitmap; @@ -35,7 +35,6 @@ use tari_crypto::tari_utilities::{ hex::{to_hex, Hex}, Hashable, }; -use tari_mmr::pruned_hashset::PrunedHashSet; #[derive(Debug)] pub struct DbTransaction { @@ -142,31 +141,26 @@ impl DbTransaction { self } - pub fn update_pruned_hash_set( - &mut self, - mmr_tree: MmrTree, - header_hash: HashOutput, - pruned_hash_set: PrunedHashSet, - ) -> &mut Self { - self.operations.push(WriteOperation::UpdatePrunedHashSet { - mmr_tree, - header_hash, - pruned_hash_set: Box::new(pruned_hash_set), + pub fn prune_outputs_at_positions(&mut self, output_mmr_positions: Vec) -> &mut Self { + self.operations.push(WriteOperation::PruneOutputsAtMmrPositions { + output_positions: output_mmr_positions, }); self } - pub fn prune_outputs_and_update_horizon(&mut self, output_mmr_positions: Vec, horizon: u64) -> &mut Self { - self.operations.push(WriteOperation::PruneOutputsAndUpdateHorizon { - output_positions: output_mmr_positions, - horizon, - }); + pub fn delete_all_inputs_in_block(&mut self, block_hash: BlockHash) -> &mut Self { + self.operations + .push(WriteOperation::DeleteAllInputsInBlock { block_hash }); self } - pub fn update_deleted_with_diff(&mut self, header_hash: HashOutput, deleted: Bitmap) -> &mut Self { + pub fn update_block_accumulated_data( + &mut self, + header_hash: HashOutput, + values: UpdateBlockAccumulatedData, + ) -> &mut Self { self.operations - .push(WriteOperation::UpdateDeletedBlockAccumulatedDataWithDiff { header_hash, deleted }); + .push(WriteOperation::UpdateBlockAccumulatedData { header_hash, values }); self } @@ -298,25 +292,18 @@ pub enum WriteOperation { DeleteOrphanChainTip(HashOutput), InsertOrphanChainTip(HashOutput), InsertMoneroSeedHeight(Vec, u64), - UpdatePrunedHashSet { - mmr_tree: MmrTree, - header_hash: HashOutput, - pruned_hash_set: Box, - }, - UpdateDeletedBlockAccumulatedDataWithDiff { + UpdateBlockAccumulatedData { header_hash: HashOutput, - deleted: Bitmap, + values: UpdateBlockAccumulatedData, }, UpdateDeletedBitmap { deleted: Bitmap, }, - PruneOutputsAndUpdateHorizon { + PruneOutputsAtMmrPositions { output_positions: Vec, - horizon: u64, }, - UpdateKernelSum { - header_hash: HashOutput, - kernel_sum: Commitment, + DeleteAllInputsInBlock { + block_hash: BlockHash, }, SetAccumulatedDataForOrphan(BlockHeaderAccumulatedData), SetBestBlock { @@ -383,14 +370,6 @@ impl fmt::Display for WriteOperation { write!(f, "Insert Monero seed string {} for height: {}", data.to_hex(), height) }, InsertChainOrphanBlock(block) => write!(f, "InsertChainOrphanBlock({})", block.hash().to_hex()), - UpdatePrunedHashSet { - mmr_tree, header_hash, .. - } => write!( - f, - "Update pruned hash set: {} header: {}", - mmr_tree, - header_hash.to_hex() - ), InsertPrunedOutput { header_hash: _, header_height: _, @@ -398,23 +377,14 @@ impl fmt::Display for WriteOperation { witness_hash: _, mmr_position: _, } => write!(f, "Insert pruned output"), - UpdateDeletedBlockAccumulatedDataWithDiff { - header_hash: _, - deleted: _, - } => write!(f, "Add deleted data for block"), + UpdateBlockAccumulatedData { header_hash, .. } => { + write!(f, "Update Block data for block {}", header_hash.to_hex()) + }, UpdateDeletedBitmap { deleted } => { write!(f, "Merge deleted bitmap at tip ({} new indexes)", deleted.cardinality()) }, - PruneOutputsAndUpdateHorizon { - output_positions, - horizon, - } => write!( - f, - "Prune {} outputs and set horizon to {}", - output_positions.len(), - horizon - ), - UpdateKernelSum { header_hash, .. } => write!(f, "Update kernel sum for block: {}", header_hash.to_hex()), + PruneOutputsAtMmrPositions { output_positions } => write!(f, "Prune {} output(s)", output_positions.len()), + DeleteAllInputsInBlock { block_hash } => write!(f, "Delete outputs in block {}", block_hash.to_hex()), SetAccumulatedDataForOrphan(accumulated_data) => { write!(f, "Set accumulated data for orphan {}", accumulated_data) }, diff --git a/base_layer/core/src/chain_storage/error.rs b/base_layer/core/src/chain_storage/error.rs index 14cbd7a53f..c776ec2222 100644 --- a/base_layer/core/src/chain_storage/error.rs +++ b/base_layer/core/src/chain_storage/error.rs @@ -169,13 +169,7 @@ pub trait OrNotFound { impl OrNotFound for Result, ChainStorageError> { fn or_not_found(self, entity: &'static str, field: &'static str, value: String) -> Result { - match self { - Ok(inner) => match inner { - None => Err(ChainStorageError::ValueNotFound { entity, field, value }), - Some(v) => Ok(v), - }, - Err(err) => Err(err), - } + self.and_then(|inner| inner.ok_or(ChainStorageError::ValueNotFound { entity, field, value })) } } diff --git a/base_layer/core/src/chain_storage/horizon_data.rs b/base_layer/core/src/chain_storage/horizon_data.rs index 6213d490f3..ae6a120bb4 100644 --- a/base_layer/core/src/chain_storage/horizon_data.rs +++ b/base_layer/core/src/chain_storage/horizon_data.rs @@ -21,9 +21,8 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use serde::{Deserialize, Serialize}; use tari_common_types::types::Commitment; -use tari_crypto::tari_utilities::ByteArray; -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, Default)] pub struct HorizonData { kernel_sum: Commitment, utxo_sum: Commitment, @@ -35,10 +34,7 @@ impl HorizonData { } pub fn zero() -> Self { - HorizonData { - kernel_sum: Commitment::from_bytes(&[0u8; 32]).expect("Could not create commitment"), - utxo_sum: Commitment::from_bytes(&[0u8; 32]).expect("Could not create commitment"), - } + Default::default() } pub fn kernel_sum(&self) -> &Commitment { diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 0b8e6333af..8859d77db6 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -33,6 +33,7 @@ use crate::{ ChainBlock, ChainHeader, DeletedBitmap, + UpdateBlockAccumulatedData, }, chain_storage::{ db_transaction::{DbKey, DbTransaction, DbValue, WriteOperation}, @@ -85,7 +86,7 @@ use tari_common_types::{ types::{BlockHash, Commitment, HashDigest, HashOutput, Signature, BLOCK_HASH_LENGTH}, }; use tari_crypto::tari_utilities::{hash::Hashable, hex::Hex, ByteArray}; -use tari_mmr::{pruned_hashset::PrunedHashSet, Hash, MerkleMountainRange, MutableMmr}; +use tari_mmr::{Hash, MerkleMountainRange, MutableMmr}; use tari_storage::lmdb_store::{db, LMDBBuilder, LMDBConfig, LMDBStore}; type DatabaseRef = Arc>; @@ -311,32 +312,19 @@ impl LMDBDatabase { self.insert_orphan_block(&write_txn, chain_block.block())?; self.set_accumulated_data_for_orphan(&write_txn, chain_block.accumulated_data())?; }, - UpdatePrunedHashSet { - mmr_tree, - header_hash, - pruned_hash_set, - } => { - self.update_pruned_hash_set(&write_txn, *mmr_tree, header_hash, (**pruned_hash_set).clone())?; - }, - UpdateDeletedBlockAccumulatedDataWithDiff { header_hash, deleted } => { - self.update_deleted_block_accumulated_data_with_diff(&write_txn, header_hash, deleted.clone())?; + UpdateBlockAccumulatedData { header_hash, values } => { + self.update_block_accumulated_data(&write_txn, header_hash, values.clone())?; }, UpdateDeletedBitmap { deleted } => { let mut bitmap = self.load_deleted_bitmap_model(&write_txn)?; bitmap.merge(deleted)?; bitmap.finish()?; }, - PruneOutputsAndUpdateHorizon { - output_positions, - horizon, - } => { - self.prune_outputs_and_update_horizon(&write_txn, output_positions, *horizon)?; + PruneOutputsAtMmrPositions { output_positions } => { + self.prune_outputs_at_positions(&write_txn, output_positions)?; }, - UpdateKernelSum { - header_hash, - kernel_sum, - } => { - self.update_block_accumulated_data_kernel_sum(&write_txn, header_hash, kernel_sum.clone())?; + DeleteAllInputsInBlock { block_hash } => { + self.delete_all_inputs_in_block(&write_txn, block_hash)?; }, SetBestBlock { height, @@ -845,6 +833,7 @@ impl LMDBDatabase { bitmap.remove(block_accum_data.deleted())?; bitmap.finish()?; + info!(target: LOG_TARGET, "delete accum {}", height); lmdb_delete( write_txn, &self.block_accumulated_data_db, @@ -1040,24 +1029,24 @@ impl LMDBDatabase { self.fetch_block_accumulated_data(&*txn, header.height - 1)? .ok_or_else(|| ChainStorageError::ValueNotFound { entity: "BlockAccumulatedData", - field: "prev_hash", - value: header.prev_hash.to_hex(), + field: "height", + value: (header.height - 1).to_string(), })? }; - let mut total_kernel_sum = Commitment::default(); - let mut total_utxo_sum = Commitment::default(); let BlockAccumulatedData { kernels: pruned_kernel_set, outputs: pruned_output_set, - range_proofs: pruned_proof_set, + witness: pruned_proof_set, + cumulative_kernel_sum: mut kernel_sum, + cumulative_utxo_sum: mut utxo_sum, .. } = data; let mut kernel_mmr = MerkleMountainRange::::new(pruned_kernel_set); for kernel in kernels { - total_kernel_sum = &total_kernel_sum + &kernel.excess; + kernel_sum = &kernel_sum + &kernel.excess; let pos = kernel_mmr.push(kernel.hash())?; trace!( target: LOG_TARGET, @@ -1070,7 +1059,7 @@ impl LMDBDatabase { let mut output_mmr = MutableMmr::::new(pruned_output_set, Bitmap::create())?; let mut witness_mmr = MerkleMountainRange::::new(pruned_proof_set); for output in outputs { - total_utxo_sum = &total_utxo_sum + &output.commitment; + utxo_sum = &utxo_sum + &output.commitment; output_mmr.push(output.hash())?; witness_mmr.push(output.witness_hash())?; debug!(target: LOG_TARGET, "Inserting output `{}`", output.commitment.to_hex()); @@ -1084,7 +1073,7 @@ impl LMDBDatabase { } for input in inputs { - total_utxo_sum = &total_utxo_sum - &input.commitment; + utxo_sum = &utxo_sum - &input.commitment; let index = self .fetch_mmr_leaf_index(&**txn, MmrTree::Utxo, &input.output_hash())? .ok_or(ChainStorageError::UnspendableInput)?; @@ -1119,7 +1108,8 @@ impl LMDBDatabase { output_mmr.mmr().get_pruned_hash_set()?, witness_mmr.get_pruned_hash_set()?, deleted_at_current_height, - total_kernel_sum, + kernel_sum, + utxo_sum, ), )?; @@ -1133,6 +1123,7 @@ impl LMDBDatabase { header_height: u64, data: &BlockAccumulatedData, ) -> Result<(), ChainStorageError> { + info!(target: LOG_TARGET, "insert accum {}", data); lmdb_insert( txn, &self.block_accumulated_data_db, @@ -1142,31 +1133,11 @@ impl LMDBDatabase { ) } - fn update_block_accumulated_data_kernel_sum( - &self, - write_txn: &WriteTransaction<'_>, - header_hash: &HashOutput, - kernel_sum: Commitment, - ) -> Result<(), ChainStorageError> { - let height = self.fetch_height_from_hash(write_txn, header_hash).or_not_found( - "BlockHash", - "hash", - header_hash.to_hex(), - )?; - let mut block_accum_data = self - .fetch_block_accumulated_data(write_txn, height)? - .unwrap_or_default(); - - block_accum_data.kernel_sum = kernel_sum; - lmdb_replace(write_txn, &self.block_accumulated_data_db, &height, &block_accum_data)?; - Ok(()) - } - - fn update_deleted_block_accumulated_data_with_diff( + fn update_block_accumulated_data( &self, write_txn: &WriteTransaction<'_>, header_hash: &HashOutput, - deleted: Bitmap, + values: UpdateBlockAccumulatedData, ) -> Result<(), ChainStorageError> { let height = self.fetch_height_from_hash(write_txn, header_hash).or_not_found( "BlockHash", @@ -1175,10 +1146,28 @@ impl LMDBDatabase { )?; let mut block_accum_data = self - .fetch_block_accumulated_data(write_txn, height)? + .fetch_block_accumulated_data(&*write_txn, height)? .unwrap_or_default(); - block_accum_data.deleted = deleted.into(); + if let Some(deleted_diff) = values.deleted_diff { + block_accum_data.deleted = deleted_diff; + } + if let Some(utxo_sum) = values.utxo_sum { + block_accum_data.cumulative_utxo_sum = utxo_sum; + } + if let Some(kernel_sum) = values.kernel_sum { + block_accum_data.cumulative_kernel_sum = kernel_sum; + } + if let Some(kernel_hash_set) = values.kernel_hash_set { + block_accum_data.kernels = kernel_hash_set; + } + if let Some(utxo_hash_set) = values.utxo_hash_set { + block_accum_data.outputs = utxo_hash_set; + } + if let Some(witness_hash_set) = values.witness_hash_set { + block_accum_data.witness = witness_hash_set; + } + lmdb_replace(write_txn, &self.block_accumulated_data_db, &height, &block_accum_data)?; Ok(()) } @@ -1206,36 +1195,21 @@ impl LMDBDatabase { Ok(()) } - fn update_pruned_hash_set( + fn delete_all_inputs_in_block( &self, - write_txn: &WriteTransaction<'_>, - mmr_tree: MmrTree, - header_hash: &HashOutput, - pruned_hash_set: PrunedHashSet, + txn: &WriteTransaction<'_>, + block_hash: &BlockHash, ) -> Result<(), ChainStorageError> { - let height = self.fetch_height_from_hash(write_txn, header_hash).or_not_found( - "BlockHash", - "hash", - header_hash.to_hex(), - )?; - let mut block_accum_data = self - .fetch_block_accumulated_data(write_txn, height)? - .unwrap_or_default(); - match mmr_tree { - MmrTree::Kernel => block_accum_data.kernels = pruned_hash_set, - MmrTree::Utxo => block_accum_data.outputs = pruned_hash_set, - MmrTree::Witness => block_accum_data.range_proofs = pruned_hash_set, - } - - lmdb_replace(write_txn, &self.block_accumulated_data_db, &height, &block_accum_data)?; + let inputs = + lmdb_delete_keys_starting_with::(txn, &self.inputs_db, block_hash.to_hex().as_str())?; + debug!(target: LOG_TARGET, "Deleted {} input(s)", inputs.len()); Ok(()) } - fn prune_outputs_and_update_horizon( + fn prune_outputs_at_positions( &self, write_txn: &WriteTransaction<'_>, output_positions: &[u32], - horizon: u64, ) -> Result<(), ChainStorageError> { for pos in output_positions { let (_height, hash) = lmdb_first_after::<_, (u64, Vec)>( @@ -1249,12 +1223,6 @@ impl LMDBDatabase { self.prune_output(write_txn, &key)?; } - self.set_metadata( - write_txn, - MetadataKey::PrunedHeight, - MetadataValue::PrunedHeight(horizon), - )?; - Ok(()) } diff --git a/base_layer/core/src/mempool/config.rs b/base_layer/core/src/mempool/config.rs index 6db181b14a..10f4852ead 100644 --- a/base_layer/core/src/mempool/config.rs +++ b/base_layer/core/src/mempool/config.rs @@ -26,21 +26,12 @@ use std::time::Duration; use tari_common::{configuration::seconds, NetworkConfigPath}; /// Configuration for the Mempool. -#[derive(Clone, Copy, Deserialize, Serialize)] +#[derive(Clone, Copy, Deserialize, Serialize, Default)] pub struct MempoolConfig { pub unconfirmed_pool: UnconfirmedPoolConfig, pub reorg_pool: ReorgPoolConfig, } -impl Default for MempoolConfig { - fn default() -> Self { - Self { - unconfirmed_pool: UnconfirmedPoolConfig::default(), - reorg_pool: ReorgPoolConfig::default(), - } - } -} - impl NetworkConfigPath for MempoolConfig { fn main_key_prefix() -> &'static str { "mempool" diff --git a/base_layer/core/src/transactions/aggregated_body.rs b/base_layer/core/src/transactions/aggregated_body.rs index a7f3134678..685ab6c47b 100644 --- a/base_layer/core/src/transactions/aggregated_body.rs +++ b/base_layer/core/src/transactions/aggregated_body.rs @@ -451,11 +451,7 @@ impl AggregateBody { fn validate_range_proofs(&self, range_proof_service: &RangeProofService) -> Result<(), TransactionError> { trace!(target: LOG_TARGET, "Checking range proofs"); for o in &self.outputs { - if !o.verify_range_proof(range_proof_service)? { - return Err(TransactionError::ValidationError( - "Range proof could not be verified".into(), - )); - } + o.verify_range_proof(range_proof_service)?; } Ok(()) } diff --git a/base_layer/core/src/transactions/coinbase_builder.rs b/base_layer/core/src/transactions/coinbase_builder.rs index 3c75d7fa84..90041e5876 100644 --- a/base_layer/core/src/transactions/coinbase_builder.rs +++ b/base_layer/core/src/transactions/coinbase_builder.rs @@ -323,7 +323,7 @@ mod test { assert!(factories .commitment .open_value(&p.spend_key, block_reward.into(), utxo.commitment())); - assert!(utxo.verify_range_proof(&factories.range_proof).unwrap()); + utxo.verify_range_proof(&factories.range_proof).unwrap(); assert!(utxo.features.flags.contains(OutputFlags::COINBASE_OUTPUT)); assert_eq!( tx.body.check_coinbase_output( diff --git a/base_layer/core/src/transactions/transaction.rs b/base_layer/core/src/transactions/transaction.rs index 9c59bbaddf..5731e5cbed 100644 --- a/base_layer/core/src/transactions/transaction.rs +++ b/base_layer/core/src/transactions/transaction.rs @@ -263,6 +263,8 @@ pub enum TransactionError { NoSignatureError, #[error("A range proof construction or verification has produced an error: {0}")] RangeProofError(#[from] RangeProofError), + #[error("Range proof verification has failed")] + InvalidRangeProof, #[error("An error occurred while performing a commitment signature: {0}")] SigningError(#[from] CommitmentSignatureError), #[error("Invalid kernel in body")] @@ -687,8 +689,12 @@ impl TransactionOutput { } /// Verify that range proof is valid - pub fn verify_range_proof(&self, prover: &RangeProofService) -> Result { - Ok(prover.verify(&self.proof.0, &self.commitment)) + pub fn verify_range_proof(&self, prover: &RangeProofService) -> Result<(), TransactionError> { + if prover.verify(&self.proof.0, &self.commitment) { + Ok(()) + } else { + Err(TransactionError::InvalidRangeProof) + } } /// Verify that the metadata signature is valid @@ -1477,7 +1483,7 @@ mod test { }); let script = unblinded_output1.script.clone(); let tx_output1 = unblinded_output1.as_transaction_output(&factories).unwrap(); - assert!(tx_output1.verify_range_proof(&factories.range_proof).unwrap()); + tx_output1.verify_range_proof(&factories.range_proof).unwrap(); let unblinded_output2 = test_params_2.create_unblinded_output(UtxoTestParams { value: (2u64.pow(32) + 1u64).into(), @@ -1517,7 +1523,8 @@ mod test { ) .unwrap(), ); - assert!(!tx_output3.verify_range_proof(&factories.range_proof).unwrap()); + let err = tx_output3.verify_range_proof(&factories.range_proof).unwrap_err(); + assert!(matches!(err, TransactionError::InvalidRangeProof)); } #[test] diff --git a/base_layer/core/src/transactions/transaction_protocol/recipient.rs b/base_layer/core/src/transactions/transaction_protocol/recipient.rs index c3c6f99ac1..f8cecc7c1e 100644 --- a/base_layer/core/src/transactions/transaction_protocol/recipient.rs +++ b/base_layer/core/src/transactions/transaction_protocol/recipient.rs @@ -263,7 +263,7 @@ mod test { assert!(factories .commitment .open_value(&p.spend_key, 500, &data.output.commitment)); - assert!(data.output.verify_range_proof(&factories.range_proof).unwrap()); + data.output.verify_range_proof(&factories.range_proof).unwrap(); let r_sum = &msg.public_nonce + &p.public_nonce; let e = build_challenge(&r_sum, &m); let s = Signature::sign(p.spend_key.clone(), p.nonce, &e).unwrap(); diff --git a/base_layer/core/src/transactions/transaction_protocol/sender.rs b/base_layer/core/src/transactions/transaction_protocol/sender.rs index b38577a2ad..ac642232b5 100644 --- a/base_layer/core/src/transactions/transaction_protocol/sender.rs +++ b/base_layer/core/src/transactions/transaction_protocol/sender.rs @@ -385,11 +385,7 @@ impl SenderTransactionProtocol { ) -> Result<(), TPE> { match &mut self.state { SenderState::CollectingSingleSignature(info) => { - if !rec.output.verify_range_proof(prover)? { - return Err(TPE::ValidationError( - "Recipient output range proof failed to verify".into(), - )); - } + rec.output.verify_range_proof(prover)?; // Consolidate transaction info info.outputs.push(rec.output.clone()); @@ -738,7 +734,7 @@ mod test { crypto_factories::CryptoFactories, tari_amount::*, test_helpers::{create_test_input, create_unblinded_output, TestParams}, - transaction::{KernelFeatures, OutputFeatures, TransactionOutput}, + transaction::{KernelFeatures, OutputFeatures, TransactionError, TransactionOutput}, transaction_protocol::{ sender::SenderTransactionProtocol, single_receiver::SingleReceiverTransactionProtocol, @@ -1027,13 +1023,13 @@ mod test { // Receiver gets message, deserializes it etc, and creates his response let bob_info = SingleReceiverTransactionProtocol::create(&msg, b.nonce, b.spend_key, features, &factories, None).unwrap(); // Alice gets message back, deserializes it, etc - match alice.add_single_recipient_info(bob_info, &factories.range_proof) { - Ok(_) => panic!("Range proof should have failed to verify"), - Err(e) => assert_eq!( - e, - TransactionProtocolError::ValidationError("Recipient output range proof failed to verify".into()) - ), - } + let err = alice + .add_single_recipient_info(bob_info, &factories.range_proof) + .unwrap_err(); + assert!(matches!( + err, + TransactionProtocolError::TransactionBuildError(TransactionError::InvalidRangeProof) + )); } #[test] diff --git a/base_layer/core/src/transactions/transaction_protocol/single_receiver.rs b/base_layer/core/src/transactions/transaction_protocol/single_receiver.rs index ab26e3e80c..3a15d6a134 100644 --- a/base_layer/core/src/transactions/transaction_protocol/single_receiver.rs +++ b/base_layer/core/src/transactions/transaction_protocol/single_receiver.rs @@ -219,10 +219,7 @@ mod test { factories.commitment.open_value(&k, info.amount.into(), &out.commitment), "Output commitment is invalid" ); - assert!( - out.verify_range_proof(&factories.range_proof).unwrap(), - "Range proof is invalid" - ); + out.verify_range_proof(&factories.range_proof).unwrap(); assert!(out.features.flags.is_empty(), "Output features flags have changed"); } } diff --git a/base_layer/mmr/src/merkle_proof.rs b/base_layer/mmr/src/merkle_proof.rs index a5a458e4cc..60ab82bb37 100644 --- a/base_layer/mmr/src/merkle_proof.rs +++ b/base_layer/mmr/src/merkle_proof.rs @@ -54,7 +54,7 @@ pub enum MerkleProofError { } /// A Merkle proof that proves a particular element at a particular position exists in an MMR. -#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, PartialOrd, Ord)] +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, PartialOrd, Ord, Default)] pub struct MerkleProof { /// The size of the MMR at the time the proof was created. mmr_size: usize, @@ -66,16 +66,6 @@ pub struct MerkleProof { peaks: Vec, } -impl Default for MerkleProof { - fn default() -> MerkleProof { - MerkleProof { - mmr_size: 0, - path: Vec::default(), - peaks: Vec::default(), - } - } -} - impl MerkleProof { /// Build a Merkle Proof the given MMR at the given *leaf* position. This is usually the version you'll want to /// call, since you'll know the leaf index more often than the MMR index. diff --git a/common/src/build/protobuf.rs b/common/src/build/protobuf.rs index 6898052e94..875320a468 100644 --- a/common/src/build/protobuf.rs +++ b/common/src/build/protobuf.rs @@ -24,9 +24,12 @@ where P: AsRef + Display { .output() .unwrap(); - if !out.status.success() { - panic!("status: {} - {}", out.status, String::from_utf8_lossy(&out.stderr)); - } + assert!( + out.status.success(), + "status: {} - {}", + out.status, + String::from_utf8_lossy(&out.stderr) + ); } } diff --git a/comms/src/test_utils/factories/net_address.rs b/comms/src/test_utils/factories/net_address.rs index 8a4f0e31d2..09d2d3e03e 100644 --- a/comms/src/test_utils/factories/net_address.rs +++ b/comms/src/test_utils/factories/net_address.rs @@ -61,21 +61,12 @@ impl TestFactory for NetAddressesFactory { //---------------------------------- NetAddressFactory --------------------------------------------// -#[derive(Clone)] +#[derive(Clone, Default)] pub struct NetAddressFactory { port: Option, is_use_os_port: bool, } -impl Default for NetAddressFactory { - fn default() -> Self { - Self { - port: None, - is_use_os_port: false, - } - } -} - impl NetAddressFactory { factory_setter!(with_port, port, Option); From 25acb67beceffbd90646910385ec2f49535b36b0 Mon Sep 17 00:00:00 2001 From: Stanimal Date: Tue, 2 Nov 2021 15:42:16 +0400 Subject: [PATCH 03/11] re-add the commiment sum calculation at end of horizon sync --- .../horizon_state_synchronization.rs | 93 ++++++++++++++++++- 1 file changed, 89 insertions(+), 4 deletions(-) diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs index f8ce7afbf0..ffd6005f06 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs @@ -29,8 +29,9 @@ use crate::{ }, sync::{rpc, SyncPeer}, }, - blocks::{BlockHeader, UpdateBlockAccumulatedData}, - chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainStorageError, MmrTree}, + blocks::{BlockHeader, ChainHeader, UpdateBlockAccumulatedData}, + chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainStorageError, MmrTree, PrunedOutput}, + crypto::commitment::HomomorphicCommitment, proto::base_node::{ sync_utxo as proto_sync_utxo, sync_utxos_response::UtxoOrDeleted, @@ -617,6 +618,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ))); let header = self.db().fetch_chain_header(self.horizon_sync_height).await?; + // TODO: Use accumulated sums + let (utxo_sum, kernel_sum) = self.calculate_commitment_sums(&header).await?; self.shared .sync_validators @@ -624,8 +627,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { .validate( &*self.db().inner().db_read_access()?, header.height(), - &self.utxo_sum, - &self.kernel_sum, + &utxo_sum, + &kernel_sum, ) .map_err(HorizonSyncError::FinalStateValidationFailed)?; @@ -649,6 +652,88 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(()) } + /// (UTXO sum, Kernel sum) + async fn calculate_commitment_sums( + &self, + header: &ChainHeader, + ) -> Result<(Commitment, Commitment), HorizonSyncError> { + let mut pruned_utxo_sum = HomomorphicCommitment::default(); + let mut pruned_kernel_sum = HomomorphicCommitment::default(); + + let mut prev_mmr = 0; + let mut prev_kernel_mmr = 0; + let bitmap = Arc::new( + self.db() + .fetch_complete_deleted_bitmap_at(header.hash().clone()) + .await? + .into_bitmap(), + ); + for h in 0..=header.height() { + let curr_header = self.db().fetch_chain_header(h).await?; + + trace!( + target: LOG_TARGET, + "Fetching utxos from db: height:{}, header.output_mmr:{}, prev_mmr:{}, end:{}", + curr_header.height(), + curr_header.header().output_mmr_size, + prev_mmr, + curr_header.header().output_mmr_size - 1 + ); + let (utxos, _) = self + .db() + .fetch_utxos_by_mmr_position(prev_mmr, curr_header.header().output_mmr_size - 1, bitmap.clone()) + .await?; + trace!( + target: LOG_TARGET, + "Fetching kernels from db: height:{}, header.kernel_mmr:{}, prev_mmr:{}, end:{}", + curr_header.height(), + curr_header.header().kernel_mmr_size, + prev_kernel_mmr, + curr_header.header().kernel_mmr_size - 1 + ); + let kernels = self + .db() + .fetch_kernels_by_mmr_position(prev_kernel_mmr, curr_header.header().kernel_mmr_size - 1) + .await?; + + let mut utxo_sum = HomomorphicCommitment::default(); + debug!(target: LOG_TARGET, "Number of kernels returned: {}", kernels.len()); + debug!(target: LOG_TARGET, "Number of utxos returned: {}", utxos.len()); + let mut prune_counter = 0; + for u in utxos { + match u { + PrunedOutput::NotPruned { output } => { + utxo_sum = &output.commitment + &utxo_sum; + }, + _ => { + prune_counter += 1; + }, + } + } + if prune_counter > 0 { + debug!(target: LOG_TARGET, "Pruned {} outputs", prune_counter); + } + prev_mmr = curr_header.header().output_mmr_size; + + pruned_utxo_sum = &utxo_sum + &pruned_utxo_sum; + + for k in kernels { + pruned_kernel_sum = &k.excess + &pruned_kernel_sum; + } + prev_kernel_mmr = curr_header.header().kernel_mmr_size; + + trace!( + target: LOG_TARGET, + "Height: {} Kernel sum:{:?} Pruned UTXO sum: {:?}", + h, + pruned_kernel_sum, + pruned_utxo_sum + ); + } + + Ok((pruned_utxo_sum, pruned_kernel_sum)) + } + #[inline] fn db(&self) -> &AsyncBlockchainDb { &self.shared.db From c0e0717640b91a29e9051e59617d540d2001f444 Mon Sep 17 00:00:00 2001 From: Stanimal Date: Thu, 18 Nov 2021 15:29:56 +0400 Subject: [PATCH 04/11] fix bad merge --- .../state_machine_service/states/horizon_state_sync.rs | 1 - .../horizon_state_sync/horizon_state_synchronization.rs | 6 ------ .../core/src/base_node/sync/header_sync/synchronizer.rs | 4 ++-- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs index 5d08f1f1f5..1e634cca80 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs @@ -44,7 +44,6 @@ use crate::{ chain_storage::BlockchainBackend, transactions::CryptoFactories, }; -use horizon_state_synchronization::HorizonStateSynchronization; use log::*; const LOG_TARGET: &str = "c::bn::state_machine_service::states::horizon_state_sync"; diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs index e3eb93d6d5..53802f00e2 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs @@ -94,12 +94,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { target: LOG_TARGET, "Preparing database for horizon sync to height (#{})", self.horizon_sync_height ); - let mut connection = self - .shared - .connectivity - .dial_peer(self.sync_peer.node_id().clone()) - .await?; - let mut client = connection.connect_rpc::().await?; let header = self.db().fetch_header(self.horizon_sync_height).await?.ok_or_else(|| { ChainStorageError::ValueNotFound { diff --git a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs index 4688be7107..4da220ff7c 100644 --- a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs @@ -114,8 +114,8 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { target: LOG_TARGET, "Attempting to synchronize headers with `{}`", node_id ); - match self.attempt_sync(&sync_peer, peer_conn.clone()).await { - Ok(()) => return Ok(sync_peer), + match self.attempt_sync(sync_peer, peer_conn.clone()).await { + Ok(()) => return Ok(sync_peer.clone()), // Try another peer Err(err @ BlockHeaderSyncError::NotInSync) => { warn!(target: LOG_TARGET, "{}", err); From 6e46b9e3430df8d77e6f17d1fc30df654dd8c0b9 Mon Sep 17 00:00:00 2001 From: Stanimal Date: Thu, 18 Nov 2021 23:25:57 +0400 Subject: [PATCH 05/11] wip2 --- applications/tari_base_node/src/main.rs | 5 +++ .../state_machine_service/state_machine.rs | 25 +++++++++--- .../horizon_state_synchronization.rs | 39 ++++++++++++++++++- .../src/chain_storage/blockchain_database.rs | 3 +- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 7 ++-- 5 files changed, 68 insertions(+), 11 deletions(-) diff --git a/applications/tari_base_node/src/main.rs b/applications/tari_base_node/src/main.rs index e5817d1268..33d8197b9e 100644 --- a/applications/tari_base_node/src/main.rs +++ b/applications/tari_base_node/src/main.rs @@ -282,6 +282,11 @@ async fn run_node(node_config: Arc, bootstrap: ConfigBootstrap) -> ctx.run().await; + task::spawn_blocking(|| { + std::thread::sleep(Duration::from_secs(100)); + }) + .await + .unwrap(); println!("Goodbye!"); Ok(()) } diff --git a/base_layer/core/src/base_node/state_machine_service/state_machine.rs b/base_layer/core/src/base_node/state_machine_service/state_machine.rs index 3b30f342b0..35a176a4ca 100644 --- a/base_layer/core/src/base_node/state_machine_service/state_machine.rs +++ b/base_layer/core/src/base_node/state_machine_service/state_machine.rs @@ -36,10 +36,13 @@ use crate::{ use futures::{future, future::Either}; use log::*; use randomx_rs::RandomXFlag; -use std::{future::Future, sync::Arc}; +use std::{future::Future, sync::Arc, time::Duration}; use tari_comms::{connectivity::ConnectivityRequester, PeerManager}; use tari_shutdown::ShutdownSignal; -use tokio::sync::{broadcast, watch}; +use tokio::{ + sync::{broadcast, watch}, + time, +}; const LOG_TARGET: &str = "c::bn::base_node"; @@ -220,7 +223,7 @@ impl BaseNodeStateMachine { // Get the next `StateEvent`, returning a `UserQuit` state event if the interrupt signal is triggered let mut mdc = vec![]; log_mdc::iter(|k, v| mdc.push((k.to_owned(), v.to_owned()))); - let next_event = select_next_state_event(interrupt_signal, next_state_future).await; + let next_event = select_next_state_event(delayed(interrupt_signal), next_state_future).await; log_mdc::extend(mdc); // Publish the event on the event bus let _ = self.event_publisher.send(Arc::new(next_event.clone())); @@ -259,12 +262,24 @@ impl BaseNodeStateMachine { /// Polls both the interrupt signal and the given future. If the given future `state_fut` is ready first it's value is /// returned, otherwise if the interrupt signal is triggered, `StateEvent::UserQuit` is returned. -async fn select_next_state_event(interrupt_signal: ShutdownSignal, state_fut: F) -> StateEvent -where F: Future { +async fn select_next_state_event(interrupt_signal: I, state_fut: F) -> StateEvent +where + F: Future, + I: Future, +{ futures::pin_mut!(state_fut); + futures::pin_mut!(interrupt_signal); // If future A and B are both ready `future::select` will prefer A match future::select(interrupt_signal, state_fut).await { Either::Left(_) => StateEvent::UserQuit, Either::Right((state, _)) => state, } } + +async fn delayed(fut: F) -> R +where F: Future { + let ret = fut.await; + error!(target: LOG_TARGET, "SLEEEPIN",); + time::sleep(Duration::from_secs(100)).await; + ret +} diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs index 53802f00e2..690e5919f9 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs @@ -120,6 +120,19 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { }, Err(err) => { warn!(target: LOG_TARGET, "Error during sync:{}", err); + let data = self.db().inner().fetch_horizon_data()?; + error!( + target: LOG_TARGET, + "***************** kernal = {} - utxo = {} ********************* ", + data.kernel_sum().to_hex(), + data.utxo_sum().to_hex() + ); + error!( + target: LOG_TARGET, + "IN MEM: ***************** kernal = {} - utxo = {} ********************* ", + self.kernel_sum.to_hex(), + self.utxo_sum.to_hex() + ); Err(err) }, } @@ -159,12 +172,19 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } let data = self.db().fetch_horizon_data().await?; + let dd = crate::chain_storage::HorizonData::default(); debug!( target: LOG_TARGET, "Loaded from horizon data utxo_sum = {}, kernel_sum = {}", data.utxo_sum().to_hex(), data.kernel_sum().to_hex(), ); + error!( + target: LOG_TARGET, + "DEFAULT: utxo_sum = {}, kernel_sum = {}", + dd.utxo_sum().to_hex(), + dd.kernel_sum().to_hex(), + ); self.utxo_sum = data.utxo_sum().clone(); self.kernel_sum = data.kernel_sum().clone(); @@ -418,6 +438,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { witness_hashes.push(output.witness_hash()); unpruned_outputs.push(output.clone()); self.utxo_sum = &self.utxo_sum + &output.commitment; + + error!(target: LOG_TARGET, "UTXO = {}", self.utxo_sum.to_hex()); txn.insert_output_via_horizon_sync( output, current_header.hash().clone(), @@ -532,6 +554,13 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { txn.set_pruned_height(metadata.pruned_height(), self.kernel_sum.clone(), self.utxo_sum.clone()); txn.commit().await?; + let data = self.db().fetch_horizon_data().await?; + error!( + target: LOG_TARGET, + "***************** kernal = {} - utxo = {} ********************* ", + data.kernel_sum().to_hex(), + data.utxo_sum().to_hex() + ); debug!( target: LOG_TARGET, "UTXO: {}, Header #{}, added {} utxos, added {} txos in {:.2?}", @@ -620,7 +649,15 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let header = self.db().fetch_chain_header(self.horizon_sync_height).await?; // TODO: Use accumulated sums - let (utxo_sum, kernel_sum) = self.calculate_commitment_sums(&header).await?; + let (calc_utxo_sum, calc_kernel_sum) = self.calculate_commitment_sums(&header).await?; + let utxo_sum = &self.utxo_sum; + let kernel_sum = &self.kernel_sum; + if *utxo_sum != calc_utxo_sum { + error!(target: LOG_TARGET, "UTXO sum isnt equal!"); + } + if *kernel_sum != calc_kernel_sum { + error!(target: LOG_TARGET, "KERNEL sum isnt equal!"); + } self.shared .sync_validators diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index b4e5dd7c94..ca12a52043 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -2088,7 +2088,8 @@ fn prune_database_if_needed( pruning_interval, ); if metadata.pruned_height() < abs_pruning_horizon.saturating_sub(pruning_interval) { - prune_to_height(db, abs_pruning_horizon - 1)?; + debug!(target: LOG_TARGET, "GONNA PRUNNEEEEE",); + // prune_to_height(db, abs_pruning_horizon - 1)?; } Ok(()) diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 416a7e70ea..9d64885c7a 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -2205,10 +2205,9 @@ fn fetch_horizon_data(txn: &ConstTransaction<'_>, db: &Database) -> Result Ok(Some(data)), None => Ok(None), - _ => Err(ChainStorageError::ValueNotFound { - entity: "ChainMetadata", - field: "HorizonData", - value: "".to_string(), + Some(k) => Err(ChainStorageError::DataInconsistencyDetected { + function: "fetch_horizon_data", + details: format!("Received incorrect value {:?} for key horizon data", k), }), } } From 31a5b5b3d3c26cd6f9605964308eebd405a298f8 Mon Sep 17 00:00:00 2001 From: Stanimal Date: Tue, 23 Nov 2021 18:27:00 +0400 Subject: [PATCH 06/11] refactor: sync_utxos, sync_kernels and horizon sync --- applications/tari_base_node/src/main.rs | 5 - base_layer/core/src/base_node/proto/rpc.proto | 2 +- .../states/horizon_state_sync.rs | 22 +- .../states/horizon_state_sync/error.rs | 10 + .../horizon_state_synchronization.rs | 200 +++++++++--------- .../states/sync_decide.rs | 35 ++- .../core/src/base_node/sync/rpc/service.rs | 104 ++++++--- .../src/base_node/sync/rpc/sync_utxos_task.rs | 192 ++++++++++------- base_layer/core/src/chain_storage/async_db.rs | 5 +- .../src/chain_storage/blockchain_backend.rs | 7 + .../src/chain_storage/blockchain_database.rs | 68 +++++- .../core/src/chain_storage/db_transaction.rs | 23 +- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 165 ++++++++++----- .../tests/blockchain_database.rs | 112 ++++++++-- base_layer/core/src/lib.rs | 8 +- .../core/src/test_helpers/blockchain.rs | 8 + .../transaction_protocol/sender.rs | 16 +- comms/dht/src/config.rs | 1 + 18 files changed, 649 insertions(+), 334 deletions(-) diff --git a/applications/tari_base_node/src/main.rs b/applications/tari_base_node/src/main.rs index 33d8197b9e..e5817d1268 100644 --- a/applications/tari_base_node/src/main.rs +++ b/applications/tari_base_node/src/main.rs @@ -282,11 +282,6 @@ async fn run_node(node_config: Arc, bootstrap: ConfigBootstrap) -> ctx.run().await; - task::spawn_blocking(|| { - std::thread::sleep(Duration::from_secs(100)); - }) - .await - .unwrap(); println!("Goodbye!"); Ok(()) } diff --git a/base_layer/core/src/base_node/proto/rpc.proto b/base_layer/core/src/base_node/proto/rpc.proto index f1c4ee9d6f..ac7fb7ec9d 100644 --- a/base_layer/core/src/base_node/proto/rpc.proto +++ b/base_layer/core/src/base_node/proto/rpc.proto @@ -45,7 +45,7 @@ message FindChainSplitResponse { } message SyncKernelsRequest { - uint64 start = 1; + bytes start_header_hash = 1; bytes end_header_hash = 2; } diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs index 1e634cca80..bd1ed5a5f9 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs @@ -68,21 +68,23 @@ impl HorizonStateSync { ) -> StateEvent { let local_metadata = match shared.db.get_chain_metadata().await { Ok(metadata) => metadata, - Err(err) => return StateEvent::FatalError(err.to_string()), + Err(err) => return err.into(), }; - if local_metadata.height_of_longest_chain() > 0 && - local_metadata.height_of_longest_chain() >= local_metadata.pruned_height() - { + let last_header = match shared.db.fetch_last_header().await { + Ok(h) => h, + Err(err) => return err.into(), + }; + + let horizon_sync_height = local_metadata.horizon_block(last_header.height); + if local_metadata.pruned_height() >= horizon_sync_height { + info!(target: LOG_TARGET, "Horizon state was already synchronized."); return StateEvent::HorizonStateSynchronized; } - let horizon_sync_height = match shared.db.fetch_last_header().await { - Ok(header) => header.height.saturating_sub(local_metadata.pruning_horizon()), - Err(err) => return StateEvent::FatalError(err.to_string()), - }; - - if local_metadata.height_of_longest_chain() > horizon_sync_height { + // We're already synced because we have full blocks higher than our target pruned height + if local_metadata.height_of_longest_chain() >= horizon_sync_height { + info!(target: LOG_TARGET, "Horizon state was already synchronized."); return StateEvent::HorizonStateSynchronized; } diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/error.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/error.rs index cf8779d3f2..e0371fff7a 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/error.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/error.rs @@ -27,6 +27,7 @@ use tokio::task; use tari_comms::{ connectivity::ConnectivityError, + peer_manager::NodeId, protocol::rpc::{RpcError, RpcStatus}, }; use tari_mmr::error::MerkleMountainRangeError; @@ -73,6 +74,15 @@ pub enum HorizonSyncError { MerkleMountainRangeError(#[from] MerkleMountainRangeError), #[error("Connectivity error: {0}")] ConnectivityError(#[from] ConnectivityError), + #[error( + "Sync peer {peer} has a tip height of {remote_peer_height} which is less than the target height of \ + {target_pruning_horizon}" + )] + InappropriateSyncPeer { + peer: NodeId, + target_pruning_horizon: u64, + remote_peer_height: u64, + }, } impl From for HorizonSyncError { diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs index 4393928d22..78d80f72c4 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs @@ -20,21 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{ - convert::{TryFrom, TryInto}, - sync::Arc, -}; - -use croaring::Bitmap; -use futures::StreamExt; -use log::*; -use tari_crypto::{ - commitment::HomomorphicCommitment, - tari_utilities::{hex::Hex, Hashable}, -}; - -use tari_common_types::types::{HashDigest, RangeProofService}; -use tari_mmr::{MerkleMountainRange, MutableMmr}; +use super::error::HorizonSyncError; use crate::{ base_node::{ state_machine_service::{ @@ -45,7 +31,6 @@ use crate::{ }, blocks::{BlockHeader, ChainHeader, UpdateBlockAccumulatedData}, chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainStorageError, MmrTree, PrunedOutput}, - crypto::commitment::HomomorphicCommitment, proto::base_node::{ sync_utxo as proto_sync_utxo, sync_utxos_response::UtxoOrDeleted, @@ -58,7 +43,6 @@ use crate::{ transaction_kernel::TransactionKernel, transaction_output::TransactionOutput, }, - transactions::transaction::{TransactionKernel, TransactionOutput}, }; use croaring::Bitmap; use futures::{stream::FuturesUnordered, StreamExt}; @@ -70,9 +54,11 @@ use std::{ sync::Arc, time::Instant, }; -use super::error::HorizonSyncError; use tari_common_types::types::{Commitment, HashDigest, RangeProofService}; -use tari_crypto::tari_utilities::{hex::Hex, Hashable}; +use tari_crypto::{ + commitment::HomomorphicCommitment, + tari_utilities::{hex::Hex, Hashable}, +}; use tari_mmr::{MerkleMountainRange, MutableMmr}; use tokio::task; @@ -111,7 +97,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { pub async fn synchronize(&mut self) -> Result<(), HorizonSyncError> { debug!( target: LOG_TARGET, - "Preparing database for horizon sync to height (#{})", self.horizon_sync_height + "Preparing database for horizon sync to height #{}", self.horizon_sync_height ); let header = self.db().fetch_header(self.horizon_sync_height).await?.ok_or_else(|| { @@ -139,19 +125,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { }, Err(err) => { warn!(target: LOG_TARGET, "Error during sync:{}", err); - let data = self.db().inner().fetch_horizon_data()?; - error!( - target: LOG_TARGET, - "***************** kernal = {} - utxo = {} ********************* ", - data.kernel_sum().to_hex(), - data.utxo_sum().to_hex() - ); - error!( - target: LOG_TARGET, - "IN MEM: ***************** kernal = {} - utxo = {} ********************* ", - self.kernel_sum.to_hex(), - self.utxo_sum.to_hex() - ); Err(err) }, } @@ -172,40 +145,61 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } async fn initialize(&mut self) -> Result<(), HorizonSyncError> { - let local_metadata = self.db().get_chain_metadata().await?; - let remote_metadata = self.sync_peer.claimed_chain_metadata(); - - // If the target pruning horizon is greater than our current tip, prune the blockchain to our current tip - // and continue from there. This will update the horizon data accordingly. - let target_pruning_horizon = local_metadata.horizon_block(remote_metadata.height_of_longest_chain()); - if target_pruning_horizon >= local_metadata.height_of_longest_chain() { - info!( - target: LOG_TARGET, - "Target horizon height {} is past the current tip height {}. Pruning blockchain to tip", - target_pruning_horizon, - local_metadata.height_of_longest_chain() - ); - self.db() - .prune_to_height(local_metadata.height_of_longest_chain()) - .await?; + let db = self.db(); + let local_metadata = db.get_chain_metadata().await?; + + if local_metadata.height_of_longest_chain() == 0 { + let horizon_data = db.fetch_horizon_data().await?; + self.utxo_sum = horizon_data.utxo_sum().clone(); + self.kernel_sum = horizon_data.kernel_sum().clone(); + + return Ok(()); + } + + // let header = self.db().fetch_chain_header(self.horizon_sync_height).await?; + // let acc = db.fetch_block_accumulated_data(header.hash().clone()).await?; + let new_prune_height = cmp::min(local_metadata.height_of_longest_chain(), self.horizon_sync_height); + if local_metadata.pruned_height() < new_prune_height { + debug!(target: LOG_TARGET, "Pruning block chain to height {}", new_prune_height); + db.prune_to_height(new_prune_height).await?; } - let data = self.db().fetch_horizon_data().await?; - let dd = crate::chain_storage::HorizonData::default(); + // let (calc_utxo_sum, calc_kernel_sum) = self.calculate_commitment_sums(&header).await?; + + // prune_to_height updates the horizon data + let horizon_data = db.fetch_horizon_data().await?; + // if *horizon_data.kernel_sum() != acc.cumulative_kernel_sum { + // error!(target: LOG_TARGET, "KERNEL SUM NOT EQUAL CALCULATED"); + // } + // if *horizon_data.utxo_sum() != acc.cumulative_utxo_sum { + // error!(target: LOG_TARGET, "UTXO SUM NOT EQUAL CALCULATED"); + // } + + // let (calc_utxo_sum, calc_kernel_sum) = self.calculate_commitment_sums(&header).await?; + // if calc_kernel_sum != acc.cumulative_kernel_sum { + // error!(target: LOG_TARGET, "KERNEL SUM NOT EQUAL CALCULATED"); + // } + // if calc_utxo_sum != acc.cumulative_utxo_sum { + // error!(target: LOG_TARGET, "UTXO SUM NOT EQUAL CALCULATED"); + // } + + // if calc_kernel_sum != *horizon_data.kernel_sum() { + // error!(target: LOG_TARGET, "HORIZON KERNEL SUM NOT EQUAL CALCULATED"); + // } + // if calc_utxo_sum != *horizon_data.utxo_sum() { + // error!(target: LOG_TARGET, "HORIZON UTXO SUM NOT EQUAL CALCULATED"); + // } + debug!( target: LOG_TARGET, "Loaded from horizon data utxo_sum = {}, kernel_sum = {}", - data.utxo_sum().to_hex(), - data.kernel_sum().to_hex(), - ); - error!( - target: LOG_TARGET, - "DEFAULT: utxo_sum = {}, kernel_sum = {}", - dd.utxo_sum().to_hex(), - dd.kernel_sum().to_hex(), + horizon_data.utxo_sum().to_hex(), + horizon_data.kernel_sum().to_hex(), ); - self.utxo_sum = data.utxo_sum().clone(); - self.kernel_sum = data.kernel_sum().clone(); + // self.utxo_sum = calc_utxo_sum; + // self.kernel_sum = calc_kernel_sum; + self.utxo_sum = horizon_data.utxo_sum().clone(); + self.kernel_sum = horizon_data.kernel_sum().clone(); Ok(()) } @@ -248,42 +242,44 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { latency.unwrap_or_default().as_millis() ); - let start = local_num_kernels; - let end = remote_num_kernels; - let end_hash = to_header.hash(); - + let mut current_header = self + .db() + .fetch_header_containing_kernel_mmr(local_num_kernels + 1) + .await?; let req = SyncKernelsRequest { - start, - end_header_hash: end_hash, + start_header_hash: current_header.hash().clone(), + end_header_hash: to_header.hash(), }; let mut kernel_stream = client.sync_kernels(req).await?; - let mut current_header = self.db().fetch_header_containing_kernel_mmr(start + 1).await?; debug!( target: LOG_TARGET, "Found header for kernels at mmr pos: {} height: {}", - start, + local_num_kernels, current_header.height() ); - let mut kernels = vec![]; + let mut kernel_hashes = vec![]; let db = self.db().clone(); let mut txn = db.write_transaction(); - let mut mmr_position = start; + let mut mmr_position = local_num_kernels; + let end = remote_num_kernels; while let Some(kernel) = kernel_stream.next().await { let kernel: TransactionKernel = kernel?.try_into().map_err(HorizonSyncError::ConversionError)?; kernel .verify_signature() .map_err(HorizonSyncError::InvalidKernelSignature)?; - kernels.push(kernel.clone()); + kernel_hashes.push(kernel.hash()); self.kernel_sum = &self.kernel_sum + &kernel.excess; + txn.insert_kernel_via_horizon_sync(kernel, current_header.hash().clone(), mmr_position as u32); if mmr_position == current_header.header().kernel_mmr_size - 1 { + let num_kernels = kernel_hashes.len(); debug!( target: LOG_TARGET, "Header #{} ({} kernels)", current_header.height(), - kernels.len() + num_kernels, ); // Validate root let block_data = db @@ -292,8 +288,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let kernel_pruned_set = block_data.dissolve().0; let mut kernel_mmr = MerkleMountainRange::::new(kernel_pruned_set); - for kernel in kernels.drain(..) { - kernel_mmr.push(kernel.hash())?; + for hash in kernel_hashes.drain(..) { + kernel_mmr.push(hash)?; } let mmr_root = kernel_mmr.get_merkle_root()?; @@ -320,10 +316,18 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ..Default::default() }, ); + debug!(target: LOG_TARGET, "Setting kernel sum = {}", self.kernel_sum.to_hex()); txn.set_pruned_height(metadata.pruned_height(), self.kernel_sum.clone(), self.utxo_sum.clone()); txn.commit().await?; - + debug!( + target: LOG_TARGET, + "Committed {} kernel(s), ({}/{}) {} remaining", + num_kernels, + mmr_position + 1, + end, + end - (mmr_position + 1) + ); if mmr_position < end - 1 { current_header = db.fetch_chain_header(current_header.height() + 1).await?; } @@ -396,9 +400,10 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { include_deleted_bitmaps: true, include_pruned_utxos: true, }; - let mut output_stream = client.sync_utxos(req).await?; let mut current_header = self.db().fetch_header_containing_utxo_mmr(start + 1).await?; + let mut output_stream = client.sync_utxos(req).await?; + debug!( target: LOG_TARGET, "Found header for utxos at mmr pos: {} - {} height: {}", @@ -458,7 +463,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { unpruned_outputs.push(output.clone()); self.utxo_sum = &self.utxo_sum + &output.commitment; - error!(target: LOG_TARGET, "UTXO = {}", self.utxo_sum.to_hex()); txn.insert_output_via_horizon_sync( output, current_header.hash().clone(), @@ -555,7 +559,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { }); } - self.validate_rangeproofs(mem::take(&mut unpruned_outputs)).await?; + // self.validate_rangeproofs(mem::take(&mut unpruned_outputs)).await?; txn.update_deleted_bitmap(diff_bitmap.clone()); @@ -576,14 +580,14 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let data = self.db().fetch_horizon_data().await?; error!( target: LOG_TARGET, - "***************** kernal = {} - utxo = {} ********************* ", - data.kernel_sum().to_hex(), - data.utxo_sum().to_hex() + "***************** utxo = {} ********************* ", + data.utxo_sum().to_hex(), ); debug!( target: LOG_TARGET, - "UTXO: {}, Header #{}, added {} utxos, added {} txos in {:.2?}", + "UTXO: {}/{}, Header #{}, added {} utxos, added {} txos in {:.2?}", mmr_position, + end, current_header.height(), height_utxo_counter, height_txo_counter, @@ -668,24 +672,24 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let header = self.db().fetch_chain_header(self.horizon_sync_height).await?; // TODO: Use accumulated sums - let (calc_utxo_sum, calc_kernel_sum) = self.calculate_commitment_sums(&header).await?; - let utxo_sum = &self.utxo_sum; - let kernel_sum = &self.kernel_sum; - if *utxo_sum != calc_utxo_sum { - error!(target: LOG_TARGET, "UTXO sum isnt equal!"); - } - if *kernel_sum != calc_kernel_sum { - error!(target: LOG_TARGET, "KERNEL sum isnt equal!"); - } + // let (calc_utxo_sum, calc_kernel_sum) = self.calculate_commitment_sums(&header).await?; + // let utxo_sum = &self.utxo_sum; + // let kernel_sum = &self.kernel_sum; + // if *utxo_sum != calc_utxo_sum { + // error!(target: LOG_TARGET, "UTXO sum isnt equal!"); + // } + // if *kernel_sum != calc_kernel_sum { + // error!(target: LOG_TARGET, "KERNEL sum isnt equal!"); + // } self.shared .sync_validators .final_horizon_state .validate( &*self.db().inner().db_read_access()?, - header.height(), - &utxo_sum, - &kernel_sum, + header.height() - 1, + &self.utxo_sum, + &self.kernel_sum, ) .map_err(HorizonSyncError::FinalStateValidationFailed)?; @@ -754,8 +758,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { .await?; let mut utxo_sum = HomomorphicCommitment::default(); - debug!(target: LOG_TARGET, "Number of kernels returned: {}", kernels.len()); - debug!(target: LOG_TARGET, "Number of utxos returned: {}", utxos.len()); + trace!(target: LOG_TARGET, "Number of kernels returned: {}", kernels.len()); + trace!(target: LOG_TARGET, "Number of utxos returned: {}", utxos.len()); let mut prune_counter = 0; for u in utxos { match u { @@ -768,7 +772,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } } if prune_counter > 0 { - debug!(target: LOG_TARGET, "Pruned {} outputs", prune_counter); + trace!(target: LOG_TARGET, "Pruned {} outputs", prune_counter); } prev_mmr = curr_header.header().output_mmr_size; diff --git a/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs b/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs index fb7c85673b..6f50f748a9 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs @@ -56,17 +56,28 @@ impl DecideNextSync { ); if shared.config.pruning_horizon > 0 { - // Filter sync peers that claim to be able to provide full blocks up until our pruned height + let last_header = match shared.db.fetch_last_header().await { + Ok(h) => h, + Err(err) => return err.into(), + }; + + let horizon_sync_height = local_metadata.horizon_block(last_header.height); + // Filter sync peers that claim to be able to provide blocks up until our pruned height let sync_peers_iter = self.sync_peers.iter().filter(|sync_peer| { - let chain_metadata = sync_peer.claimed_chain_metadata(); - let our_pruned_height_from_peer = - local_metadata.horizon_block(chain_metadata.height_of_longest_chain()); - let their_pruned_height = chain_metadata.pruned_height(); - our_pruned_height_from_peer >= their_pruned_height + let remote_metadata = sync_peer.claimed_chain_metadata(); + remote_metadata.height_of_longest_chain() >= horizon_sync_height }); match find_best_latency(sync_peers_iter) { - Some(sync_peer) => ProceedToHorizonSync(sync_peer), + Some(sync_peer) => { + debug!( + target: LOG_TARGET, + "Proceeding to horizon sync with sync peer {} with a latency of {:.2?}", + sync_peer.node_id(), + sync_peer.latency() + ); + ProceedToHorizonSync(sync_peer) + }, None => Continue, } } else { @@ -76,7 +87,15 @@ impl DecideNextSync { }); match find_best_latency(sync_peers_iter) { - Some(sync_peer) => ProceedToBlockSync(sync_peer), + Some(sync_peer) => { + debug!( + target: LOG_TARGET, + "Proceeding to block sync with sync peer {} with a latency of {:.2?}", + sync_peer.node_id(), + sync_peer.latency() + ); + ProceedToBlockSync(sync_peer) + }, None => Continue, } } diff --git a/base_layer/core/src/base_node/sync/rpc/service.rs b/base_layer/core/src/base_node/sync/rpc/service.rs index 9e20df058a..d40bec4b48 100644 --- a/base_layer/core/src/base_node/sync/rpc/service.rs +++ b/base_layer/core/src/base_node/sync/rpc/service.rs @@ -22,7 +22,7 @@ use crate::{ base_node::sync::rpc::{sync_utxos_task::SyncUtxosTask, BaseNodeSyncService}, - chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, OrNotFound}, + chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend}, iterators::NonOverlappingIntegerPairIter, proto, proto::base_node::{ @@ -34,6 +34,7 @@ use crate::{ SyncUtxosRequest, SyncUtxosResponse, }, + tari_utilities::Hashable, }; use log::*; use std::{ @@ -387,47 +388,57 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ request: Request, ) -> Result, RpcStatus> { let req = request.into_message(); - const BATCH_SIZE: usize = 1000; - let (tx, rx) = mpsc::channel(BATCH_SIZE); + let (tx, rx) = mpsc::channel(100); let db = self.db(); - task::spawn(async move { - let end = match db - .fetch_chain_header_by_block_hash(req.end_header_hash.clone()) - .await - .or_not_found("BlockHeader", "hash", req.end_header_hash.to_hex()) - .map_err(RpcStatus::log_internal_error(LOG_TARGET)) - { - Ok(header) => { - if header.header().kernel_mmr_size < req.start { - let _ = tx - .send(Err(RpcStatus::bad_request("Start mmr position after requested header"))) - .await; - return; - } + let start_header = db + .fetch_header_by_block_hash(req.start_header_hash.clone()) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))? + .ok_or_else(|| RpcStatus::not_found("Unknown start header"))?; + + let end_header = db + .fetch_header_by_block_hash(req.end_header_hash.clone()) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))? + .ok_or_else(|| RpcStatus::not_found("Unknown end header"))?; + + let mut current_height = start_header.height; + let end_height = end_header.height; + let mut current_mmr_position = start_header.kernel_mmr_size; + let mut current_header_hash = start_header.hash(); + + if current_height > end_height { + return Err(RpcStatus::bad_request("start header height is after end header")); + } - header.header().kernel_mmr_size - }, - Err(err) => { - let _ = tx.send(Err(err)).await; - return; - }, - }; - let iter = NonOverlappingIntegerPairIter::new(req.start, end, BATCH_SIZE); - for (start, end) in iter { + task::spawn(async move { + while current_height <= end_height { if tx.is_closed() { break; } - debug!(target: LOG_TARGET, "Streaming kernels {} to {}", start, end); let res = db - .fetch_kernels_by_mmr_position(start, end) + .fetch_kernels_in_block(current_header_hash.clone()) .await .map_err(RpcStatus::log_internal_error(LOG_TARGET)); match res { Ok(kernels) if kernels.is_empty() => { + let _ = tx + .send(Err(RpcStatus::general(format!( + "No kernels in block {}", + current_header_hash.to_hex() + )))) + .await; break; }, Ok(kernels) => { + debug!( + target: LOG_TARGET, + "Streaming kernels {} to {}", + current_mmr_position, + current_mmr_position + kernels.len() as u64 + ); + current_mmr_position += kernels.len() as u64; let kernels = kernels.into_iter().map(proto::types::TransactionKernel::from).map(Ok); // Ensure task stops if the peer prematurely stops their RPC session if utils::mpsc::send_all(&tx, kernels).await.is_err() { @@ -439,6 +450,36 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ break; }, } + + current_height += 1; + + if current_height <= end_height { + let res = db + .fetch_header(current_height) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET)); + match res { + Ok(Some(header)) => { + current_header_hash = header.hash(); + }, + Ok(None) => { + let _ = tx + .send(Err(RpcStatus::not_found(format!( + "Could not find header #{} while streaming UTXOs after position {}", + current_height, current_mmr_position + )))) + .await; + break; + }, + Err(err) => { + error!(target: LOG_TARGET, "DB error while streaming kernels: {}", err); + let _ = tx + .send(Err(RpcStatus::general("DB error while streaming kernels"))) + .await; + break; + }, + } + } } }); Ok(Streaming::new(rx)) @@ -450,15 +491,18 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ let peer = request.context().peer_node_id(); debug!( target: LOG_TARGET, - "Received sync_utxos request from {} (start = {}, include_pruned_utxos = {}, include_deleted_bitmaps = {})", + "Received sync_utxos request from header {} to {} (start = {}, include_pruned_utxos = {}, \ + include_deleted_bitmaps = {})", peer, req.start, + req.end_header_hash.to_hex(), req.include_pruned_utxos, req.include_deleted_bitmaps ); let (tx, rx) = mpsc::channel(200); - task::spawn(SyncUtxosTask::new(self.db(), request.into_message()).run(tx)); + let task = SyncUtxosTask::new(self.db()); + task.run(request.into_message(), tx).await?; Ok(Streaming::new(rx)) } diff --git a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs index 343be2425b..9a6a59f612 100644 --- a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs +++ b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs @@ -21,137 +21,171 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::{ + blocks::BlockHeader, chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend}, proto, proto::base_node::{SyncUtxo, SyncUtxosRequest, SyncUtxosResponse}, }; +use croaring::Bitmap; use log::*; use std::{cmp, sync::Arc, time::Instant}; use tari_comms::{protocol::rpc::RpcStatus, utils}; use tari_crypto::tari_utilities::{hex::Hex, Hashable}; -use tokio::sync::mpsc; +use tokio::{sync::mpsc, task}; const LOG_TARGET: &str = "c::base_node::sync_rpc::sync_utxo_task"; pub(crate) struct SyncUtxosTask { db: AsyncBlockchainDb, - request: SyncUtxosRequest, } impl SyncUtxosTask where B: BlockchainBackend + 'static { - pub(crate) fn new(db: AsyncBlockchainDb, request: SyncUtxosRequest) -> Self { - Self { db, request } + pub(crate) fn new(db: AsyncBlockchainDb) -> Self { + Self { db } } - pub(crate) async fn run(self, mut tx: mpsc::Sender>) { - if let Err(err) = self.start_streaming(&mut tx).await { - let _ = tx.send(Err(err)).await; - } - } - - async fn start_streaming( - &self, - tx: &mut mpsc::Sender>, + pub(crate) async fn run( + self, + request: SyncUtxosRequest, + mut tx: mpsc::Sender>, ) -> Result<(), RpcStatus> { + let start_header = self + .db + .fetch_header_containing_utxo_mmr(request.start + 1) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))?; + let end_header = self .db - .fetch_header_by_block_hash(self.request.end_header_hash.clone()) + .fetch_header_by_block_hash(request.end_header_hash.clone()) .await .map_err(RpcStatus::log_internal_error(LOG_TARGET))? - .ok_or_else(|| { - RpcStatus::not_found(format!( - "End header hash {} is was not found", - self.request.end_header_hash.to_hex() - )) - })?; + .ok_or_else(|| RpcStatus::not_found("End header hash is was not found"))?; - if self.request.start > end_header.output_mmr_size - 1 { + if start_header.height() > end_header.height { return Err(RpcStatus::bad_request(format!( - "start index {} cannot be greater than the end header's output MMR size ({})", - self.request.start, end_header.output_mmr_size + "start header height {} cannot be greater than the end header height ({})", + start_header.height(), + end_header.height ))); } - let prev_header = self - .db - .fetch_header_containing_utxo_mmr(self.request.start) - .await - .map_err(RpcStatus::log_internal_error(LOG_TARGET))?; - let (mut prev_header, _) = prev_header.into_parts(); + let (skip_outputs, prev_utxo_mmr_size) = if start_header.height() == 0 { + (request.start, 0) + } else { + let prev_header = self + .db + .fetch_header_by_block_hash(start_header.header().prev_hash.clone()) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))? + .ok_or_else(|| RpcStatus::not_found("Previous start header hash is was not found"))?; - if prev_header.height > end_header.height { - return Err(RpcStatus::bad_request("start index is greater than end index")); - } - // we need to construct a temp bitmap for the height the client requested + let skip = request.start.checked_sub(prev_header.output_mmr_size) + // This is a data inconsistency because fetch_header_containing_utxo_mmr returned the header we are basing this on + .ok_or_else(|| RpcStatus::general(format!("Data inconsistency: output mmr size of header at {} was more than the start index {}", prev_header.height, request.start)))?; + (skip, prev_header.output_mmr_size) + }; + + // we need to fetch the spent bitmap for the height the client requested let bitmap = self .db .fetch_complete_deleted_bitmap_at(end_header.hash()) .await - .map_err(|_| RpcStatus::not_found("Could not get tip deleted bitmap"))? + .map_err(|_| { + RpcStatus::general(format!( + "Could not get tip deleted bitmap at hash {}", + end_header.hash().to_hex() + )) + })? .into_bitmap(); - let bitmap = Arc::new(bitmap); - loop { - let timer = Instant::now(); - if prev_header.height == end_header.height { - break; - } - let current_header = self - .db - .fetch_header(prev_header.height + 1) + let include_pruned_utxos = request.include_pruned_utxos; + let include_deleted_bitmaps = request.include_deleted_bitmaps; + task::spawn(async move { + if let Err(err) = self + .start_streaming( + &mut tx, + start_header.into_header(), + skip_outputs, + prev_utxo_mmr_size, + end_header, + bitmap, + include_pruned_utxos, + include_deleted_bitmaps, + ) .await - .map_err(RpcStatus::log_internal_error(LOG_TARGET))? - .ok_or_else(|| { - RpcStatus::general(format!( - "Potential data consistency issue: header {} not found", - prev_header.height + 1 - )) - })?; + { + let _ = tx.send(Err(err)).await; + } + }); + + Ok(()) + } + + async fn start_streaming( + &self, + tx: &mut mpsc::Sender>, + mut current_header: BlockHeader, + mut skip_outputs: u64, + mut prev_utxo_mmr_size: u64, + end_header: BlockHeader, + bitmap: Arc, + include_pruned_utxos: bool, + include_deleted_bitmaps: bool, + ) -> Result<(), RpcStatus> { + debug!( + target: LOG_TARGET, + "Starting stream task with current_header: {}, skip_outputs: {}, prev_utxo_mmr_size: {}, end_header: {}, \ + include_pruned_utxos: {:?}, include_deleted_bitmaps: {:?}", + current_header.hash().to_hex(), + skip_outputs, + prev_utxo_mmr_size, + end_header.hash().to_hex(), + include_pruned_utxos, + include_deleted_bitmaps + ); + while current_header.height <= end_header.height { + let timer = Instant::now(); + let current_header_hash = current_header.hash(); debug!( target: LOG_TARGET, - "previous header = {} ({}) current header = {} ({})", - prev_header.height, - prev_header.hash().to_hex(), + "current header = {} ({})", current_header.height, - current_header.hash().to_hex() + current_header_hash.to_hex() ); - let start = cmp::max(self.request.start, prev_header.output_mmr_size); - let end = current_header.output_mmr_size - 1; + let start = prev_utxo_mmr_size + skip_outputs; + let end = current_header.output_mmr_size; if tx.is_closed() { debug!(target: LOG_TARGET, "Exiting sync_utxos early because client has gone",); break; } - debug!( - target: LOG_TARGET, - "Streaming UTXOs {}-{} ({}) for block #{}", - start, - end, - end.saturating_sub(start).saturating_add(1), - current_header.height - ); let (utxos, deleted_diff) = self .db - .fetch_utxos_by_mmr_position(start, end, bitmap.clone()) + .fetch_utxos_in_block(current_header.hash(), bitmap.clone()) .await .map_err(RpcStatus::log_internal_error(LOG_TARGET))?; - trace!( + debug!( target: LOG_TARGET, - "Loaded {} UTXO(s) and |deleted_diff| = {}", + "Streaming UTXO(s) {}-{} ({}) for block #{}. Deleted diff len = {}", + start, + end, utxos.len(), + current_header.height, deleted_diff.cardinality(), ); let utxos = utxos .into_iter() .enumerate() + .skip(skip_outputs as usize) // Only include pruned UTXOs if include_pruned_utxos is true - .filter(|(_, utxo)| self.request.include_pruned_utxos || !utxo.is_pruned()) + .filter(|(_, utxo)| include_pruned_utxos || !utxo.is_pruned()) .map(|(i, utxo)| { SyncUtxosResponse { utxo_or_deleted: Some(proto::base_node::sync_utxos_response::UtxoOrDeleted::Utxo( @@ -167,7 +201,10 @@ where B: BlockchainBackend + 'static break; } - if self.request.include_deleted_bitmaps { + // We only want to skip the first block UTXOs + skip_outputs = 0; + + if include_deleted_bitmaps { let bitmaps = SyncUtxosResponse { utxo_or_deleted: Some(proto::base_node::sync_utxos_response::UtxoOrDeleted::DeletedDiff( deleted_diff.serialize(), @@ -187,14 +224,25 @@ where B: BlockchainBackend + 'static timer.elapsed() ); - prev_header = current_header; + prev_utxo_mmr_size = current_header.output_mmr_size; + current_header = self + .db + .fetch_header(current_header.height + 1) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))? + .ok_or_else(|| { + RpcStatus::general(format!( + "Potential data consistency issue: header {} not found", + current_header.height + 1 + )) + })?; } debug!( target: LOG_TARGET, "UTXO sync completed to UTXO {} (Header hash = {})", - prev_header.output_mmr_size, - prev_header.hash().to_hex() + current_header.output_mmr_size, + current_header.hash().to_hex() ); Ok(()) diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index 0b3fed3c9d..f4f2521149 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -30,7 +30,6 @@ use tari_common_types::{ chain_metadata::ChainMetadata, types::{BlockHash, Commitment, HashOutput, Signature}, }; -use tari_mmr::pruned_hashset::PrunedHashSet; use crate::{ blocks::{ @@ -167,6 +166,8 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_utxos_and_mined_info(hashes: Vec) -> Vec>, "fetch_utxos_and_mined_info"); + make_async_fn!(fetch_utxos_in_block(hash: HashOutput, deleted: Arc) -> (Vec, Bitmap), "fetch_utxos_in_block"); + make_async_fn!(fetch_utxos_by_mmr_position(start: u64, end: u64, deleted: Arc) -> (Vec, Bitmap), "fetch_utxos_by_mmr_position"); //---------------------------------- Kernel --------------------------------------------// @@ -174,6 +175,8 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_kernels_by_mmr_position(start: u64, end: u64) -> Vec, "fetch_kernels_by_mmr_position"); + make_async_fn!(fetch_kernels_in_block(hash: HashOutput) -> Vec, "fetch_kernels_in_block"); + //---------------------------------- MMR --------------------------------------------// make_async_fn!(prepare_new_block(template: NewBlockTemplate) -> Block, "prepare_new_block"); diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index 17595aae6c..c4fa2920f9 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -103,6 +103,13 @@ pub trait BlockchainBackend: Send + Sync { /// Fetch kernels by MMR position fn fetch_kernels_by_mmr_position(&self, start: u64, end: u64) -> Result, ChainStorageError>; + /// Fetch all UTXOs and spends in the block + fn fetch_utxos_in_block( + &self, + header_hash: &HashOutput, + deleted: &Bitmap, + ) -> Result<(Vec, Bitmap), ChainStorageError>; + fn fetch_utxos_by_mmr_position( &self, start: u64, diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index fc39bab314..4ceba06227 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -15,7 +15,7 @@ use tari_crypto::tari_utilities::{hex::Hex, ByteArray, Hashable}; use tari_common_types::{ chain_metadata::ChainMetadata, - types::{BlockHash, Commitment, HashDigest, HashOutput, Signature}, + types::{BlockHash, Commitment, HashDigest, HashOutput, Signature, BLOCK_HASH_LENGTH}, }; use tari_mmr::{pruned_hashset::PrunedHashSet, MerkleMountainRange, MutableMmr}; @@ -384,6 +384,11 @@ where B: BlockchainBackend db.fetch_kernel_by_excess_sig(&excess_sig) } + pub fn fetch_kernels_in_block(&self, hash: HashOutput) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_kernels_in_block(&hash) + } + pub fn fetch_kernels_by_mmr_position( &self, start: u64, @@ -393,6 +398,15 @@ where B: BlockchainBackend db.fetch_kernels_by_mmr_position(start, end) } + pub fn fetch_utxos_in_block( + &self, + hash: HashOutput, + deleted: Arc, + ) -> Result<(Vec, Bitmap), ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_utxos_in_block(&hash, &deleted) + } + pub fn fetch_utxos_by_mmr_position( &self, start: u64, @@ -2092,7 +2106,7 @@ fn prune_database_if_needed( ); if metadata.pruned_height() < abs_pruning_horizon.saturating_sub(pruning_interval) { debug!(target: LOG_TARGET, "GONNA PRUNNEEEEE",); - // prune_to_height(db, abs_pruning_horizon - 1)?; + // prune_to_height(db, abs_pruning_horizon)?; } Ok(()) @@ -2107,7 +2121,28 @@ fn prune_to_height(db: &mut T, target_horizon_height: u64) arg: "target_horizon_height", message: format!( "Target pruning horizon {} is less than current pruning horizon {}", - target_horizon_height, last_pruned + target_horizon_height, + last_pruned + 1 + ), + }); + } + + if target_horizon_height == last_pruned { + info!( + target: LOG_TARGET, + "Blockchain already pruned to height {}", target_horizon_height + ); + return Ok(()); + } + + if metadata.height_of_longest_chain() > 0 && target_horizon_height > metadata.height_of_longest_chain() { + return Err(ChainStorageError::InvalidArguments { + func: "prune_to_block", + arg: "target_horizon_height", + message: format!( + "Target pruning horizon {} is less than current block height {}", + target_horizon_height, + metadata.height_of_longest_chain() ), }); } @@ -2121,8 +2156,9 @@ fn prune_to_height(db: &mut T, target_horizon_height: u64) "height", last_pruned.to_string(), )?; + let mut block_before_last = None; let mut txn = DbTransaction::new(); - for block_to_prune in (last_pruned + 1)..=target_horizon_height { + for block_to_prune in (last_pruned + 1)..target_horizon_height { let header = db.fetch_chain_header_by_height(block_to_prune)?; let curr_block = db.fetch_block_accumulated_data_by_height(block_to_prune).or_not_found( "BlockAccumulatedData", @@ -2132,18 +2168,30 @@ fn prune_to_height(db: &mut T, target_horizon_height: u64) // Note, this could actually be done in one step instead of each block, since deleted is // accumulated let output_mmr_positions = curr_block.deleted() - last_block.deleted(); + block_before_last = Some(last_block); last_block = curr_block; txn.prune_outputs_at_positions(output_mmr_positions.to_vec()); txn.delete_all_inputs_in_block(header.hash().clone()); } - txn.set_pruned_height( - target_horizon_height, - last_block.cumulative_kernel_sum().clone(), - last_block.cumulative_utxo_sum().clone(), - ); - // TODO: prune block accumulated data + if let Some(block) = block_before_last { + txn.set_pruned_height( + target_horizon_height, + block.cumulative_kernel_sum().clone(), + block.cumulative_utxo_sum().clone(), + ); + } + // If we prune to the tip, we cannot provide any full blocks + if metadata.height_of_longest_chain() == target_horizon_height { + let genesis = db.fetch_chain_header_by_height(0)?; + txn.set_best_block( + 0, + genesis.hash().clone(), + genesis.accumulated_data().total_accumulated_difficulty, + vec![0; BLOCK_HASH_LENGTH], + ); + } db.write(txn)?; Ok(()) diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index 929b8fd917..c86dc21d77 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -1,18 +1,3 @@ -use std::{ - fmt, - fmt::{Display, Error, Formatter}, - sync::Arc, -}; - -use croaring::Bitmap; -use tari_crypto::tari_utilities::{ - hex::{to_hex, Hex}, - Hashable, -}; - -use tari_common_types::types::{BlockHash, Commitment, HashOutput}; -use tari_mmr::pruned_hashset::PrunedHashSet; - // Copyright 2019. The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the @@ -34,6 +19,7 @@ use tari_mmr::pruned_hashset::PrunedHashSet; // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + use crate::{ blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader, UpdateBlockAccumulatedData}, chain_storage::error::ChainStorageError, @@ -42,6 +28,13 @@ use crate::{ transaction_output::TransactionOutput, }, }; +use std::{ + fmt, + fmt::{Display, Error, Formatter}, + sync::Arc, +}; + +use croaring::Bitmap; use tari_common_types::types::{BlockHash, Commitment, HashOutput}; use tari_crypto::tari_utilities::{ hex::{to_hex, Hex}, diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 9a2f2d2856..a231eb87ea 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -37,7 +37,7 @@ use tari_common_types::{ chain_metadata::ChainMetadata, types::{BlockHash, Commitment, HashDigest, HashOutput, Signature, BLOCK_HASH_LENGTH}, }; -use tari_mmr::{pruned_hashset::PrunedHashSet, Hash, MerkleMountainRange, MutableMmr}; +use tari_mmr::{Hash, MerkleMountainRange, MutableMmr}; use tari_storage::lmdb_store::{db, LMDBBuilder, LMDBConfig, LMDBStore}; use crate::{ @@ -1596,12 +1596,11 @@ impl BlockchainBackend for LMDBDatabase { fn fetch_kernels_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError> { let txn = self.read_transaction()?; - Ok( - lmdb_fetch_keys_starting_with(header_hash.to_hex().as_str(), &txn, &self.kernels_db)? - .into_iter() - .map(|f: TransactionKernelRowData| f.kernel) - .collect(), - ) + let kernels = lmdb_fetch_keys_starting_with(header_hash.to_hex().as_str(), &txn, &self.kernels_db)? + .into_iter() + .map(|f: TransactionKernelRowData| f.kernel) + .collect(); + Ok(kernels) } fn fetch_kernel_by_excess( @@ -1641,62 +1640,116 @@ impl BlockchainBackend for LMDBDatabase { fn fetch_kernels_by_mmr_position(&self, start: u64, end: u64) -> Result, ChainStorageError> { let txn = self.read_transaction()?; - if let Some(start_height) = lmdb_first_after(&txn, &self.kernel_mmr_size_index, &(start + 1).to_be_bytes())? { - let end_height: u64 = - lmdb_first_after(&txn, &self.kernel_mmr_size_index, &(end + 1).to_be_bytes())?.unwrap_or(start_height); - let previous_mmr_count = if start_height == 0 { - 0 - } else { - let header: BlockHeader = - lmdb_get(&txn, &self.headers_db, &(start_height - 1))?.expect("Header should exist"); - debug!(target: LOG_TARGET, "Previous header:{}", header); - header.kernel_mmr_size - }; + let start_height = match lmdb_first_after(&txn, &self.kernel_mmr_size_index, &(start + 1).to_be_bytes())? { + Some(h) => h, + None => return Ok(vec![]), + }; + let end_height: u64 = + lmdb_first_after(&txn, &self.kernel_mmr_size_index, &(end + 1).to_be_bytes())?.unwrap_or(start_height); - let total_size = (end - start) as usize + 1; - let mut result = Vec::with_capacity(total_size); + let previous_mmr_count = if start_height == 0 { + 0 + } else { + let header: BlockHeader = + lmdb_get(&txn, &self.headers_db, &(start_height - 1))?.expect("Header should exist"); + debug!(target: LOG_TARGET, "Previous header:{}", header); + header.kernel_mmr_size + }; - let mut skip_amount = (start - previous_mmr_count) as usize; - debug!( - target: LOG_TARGET, - "Fetching kernels by MMR position. Start {}, end {}, in headers at height {}-{}, prev mmr count: {}, \ - skipping the first:{}", - start, - end, - start_height, - end_height, - previous_mmr_count, - skip_amount + let total_size = (end - start) as usize + 1; + let mut result = Vec::with_capacity(total_size); + + let mut skip_amount = (start - previous_mmr_count) as usize; + debug!( + target: LOG_TARGET, + "Fetching kernels by MMR position. Start {}, end {}, in headers at height {}-{}, prev mmr count: {}, \ + skipping the first:{}", + start, + end, + start_height, + end_height, + previous_mmr_count, + skip_amount + ); + + for height in start_height..=end_height { + let acc_data = lmdb_get::<_, BlockHeaderAccumulatedData>(&txn, &self.header_accumulated_data_db, &height)? + .ok_or_else(|| ChainStorageError::ValueNotFound { + entity: "BlockHeader", + field: "height", + value: height.to_string(), + })?; + + result.extend( + lmdb_fetch_keys_starting_with::( + acc_data.hash.to_hex().as_str(), + &txn, + &self.kernels_db, + )? + .into_iter() + .skip(skip_amount) + .take(total_size - result.len()) + .map(|f| f.kernel), ); - for height in start_height..=end_height { - let hash = lmdb_get::<_, BlockHeaderAccumulatedData>(&txn, &self.header_accumulated_data_db, &height)? - .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeader", - field: "height", - value: height.to_string(), - })? - .hash; - - result.extend( - lmdb_fetch_keys_starting_with::( - hash.to_hex().as_str(), - &txn, - &self.kernels_db, - )? - .into_iter() - .skip(skip_amount) - .take(total_size - result.len()) - .map(|f| f.kernel), - ); + skip_amount = 0; + } + Ok(result) + } - skip_amount = 0; + fn fetch_utxos_in_block( + &self, + header_hash: &HashOutput, + deleted: &Bitmap, + ) -> Result<(Vec, Bitmap), ChainStorageError> { + let txn = self.read_transaction()?; + + let height = + self.fetch_height_from_hash(&txn, header_hash)? + .ok_or_else(|| ChainStorageError::ValueNotFound { + entity: "BlockHeader", + field: "hash", + value: header_hash.to_hex(), + })?; + + let utxos = lmdb_fetch_keys_starting_with::( + header_hash.to_hex().as_str(), + &txn, + &self.utxos_db, + )? + .into_iter() + .map(|row| { + if deleted.contains(row.mmr_position) { + return PrunedOutput::Pruned { + output_hash: row.hash, + witness_hash: row.witness_hash, + }; } - Ok(result) - } else { - Ok(vec![]) - } + if let Some(output) = row.output { + PrunedOutput::NotPruned { output } + } else { + PrunedOutput::Pruned { + output_hash: row.hash, + witness_hash: row.witness_hash, + } + } + }) + .collect(); + + // Builds a BitMap of the deleted UTXO MMR indexes that occurred at the current height + let acc_data = + self.fetch_block_accumulated_data(&txn, height)? + .ok_or_else(|| ChainStorageError::ValueNotFound { + entity: "BlockAccumulatedData", + field: "height", + value: height.to_string(), + })?; + + let mut difference_bitmap = Bitmap::create(); + difference_bitmap.or_inplace(acc_data.deleted()); + + Ok((utxos, difference_bitmap)) } fn fetch_utxos_by_mmr_position( diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index 4bcd6099ec..bcaf0040dd 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -20,15 +20,11 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::sync::Arc; - -use tari_common::configuration::Network; -use tari_test_utils::unpack_enum; - use crate::{ blocks::{Block, BlockHeader, NewBlockTemplate}, chain_storage::{BlockchainDatabase, ChainStorageError}, consensus::ConsensusManager, + crypto::tari_utilities::hex::Hex, proof_of_work::Difficulty, tari_utilities::Hashable, test_helpers::{ @@ -38,9 +34,18 @@ use crate::{ }, transactions::{ tari_amount::T, - transaction_entities::{transaction::Transaction, unblinded_output::UnblindedOutput}, + test_helpers::{schema_to_transaction, TransactionSchema}, + transaction_entities::{ + output_features::OutputFeatures, + transaction::Transaction, + unblinded_output::UnblindedOutput, + }, }, + txn_schema, }; +use std::sync::Arc; +use tari_common::configuration::Network; +use tari_test_utils::unpack_enum; fn setup() -> BlockchainDatabase { create_new_blockchain() @@ -353,16 +358,6 @@ mod fetch_block_hashes_from_header_tip { } mod add_block { - use crate::{ - chain_storage::ChainStorageError, - crypto::tari_utilities::hex::Hex, - transactions::{ - tari_amount::T, - test_helpers::{schema_to_transaction, TransactionSchema}, - transaction_entities::output_features::OutputFeatures, - }, - txn_schema, - }; use super::*; @@ -484,3 +479,88 @@ mod prepare_new_block { assert_eq!(block.header.height, 1); } } + +mod fetch_header_containing_utxo_mmr { + use super::*; + + #[test] + fn it_returns_genesis() { + let db = setup(); + let genesis = db.fetch_block(0).unwrap(); + assert_eq!(genesis.block().body.outputs().len(), 4001); + let mut mmr_position = 0; + genesis.block().body.outputs().iter().for_each(|_| { + let header = db.fetch_header_containing_utxo_mmr(mmr_position).unwrap(); + assert_eq!(header.height(), 0); + mmr_position += 1; + }); + let err = db.fetch_header_containing_utxo_mmr(4002).unwrap_err(); + matches!(err, ChainStorageError::ValueNotFound { .. }); + } + + #[test] + fn it_returns_corresponding_header() { + let db = setup(); + let genesis = db.fetch_block(0).unwrap(); + let _ = add_many_chained_blocks(5, &db); + let num_genesis_outputs = genesis.block().body.outputs().len() as u64; + + for i in 1..=5 { + let header = db.fetch_header_containing_utxo_mmr(num_genesis_outputs + i).unwrap(); + assert_eq!(header.height(), i); + } + let err = db + .fetch_header_containing_utxo_mmr(num_genesis_outputs + 5 + 1) + .unwrap_err(); + matches!(err, ChainStorageError::ValueNotFound { .. }); + } +} + +mod fetch_header_containing_kernel_mmr { + use super::*; + + #[test] + fn it_returns_genesis() { + let db = setup(); + let genesis = db.fetch_block(0).unwrap(); + assert_eq!(genesis.block().body.kernels().len(), 2); + let mut mmr_position = 0; + genesis.block().body.kernels().iter().for_each(|_| { + let header = db.fetch_header_containing_kernel_mmr(mmr_position).unwrap(); + assert_eq!(header.height(), 0); + mmr_position += 1; + }); + let err = db.fetch_header_containing_kernel_mmr(3).unwrap_err(); + matches!(err, ChainStorageError::ValueNotFound { .. }); + } + + #[test] + fn it_returns_corresponding_header() { + let db = setup(); + let genesis = db.fetch_block(0).unwrap(); + let (blocks, outputs) = add_many_chained_blocks(1, &db); + let num_genesis_kernels = genesis.block().body.kernels().len() as u64; + let (txns, _) = schema_to_transaction(&[txn_schema!(from: vec![outputs[0].clone()], to: vec![50 * T])]); + + let (block, _) = create_next_block(&blocks[0], txns); + db.add_block(block).unwrap(); + let _ = add_many_chained_blocks(3, &db); + + let header = db.fetch_header_containing_kernel_mmr(num_genesis_kernels).unwrap(); + assert_eq!(header.height(), 1); + + for i in 2..=3 { + let header = db.fetch_header_containing_kernel_mmr(num_genesis_kernels + i).unwrap(); + assert_eq!(header.height(), 2); + } + for i in 4..=5 { + let header = db.fetch_header_containing_kernel_mmr(num_genesis_kernels + i).unwrap(); + assert_eq!(header.height(), i); + } + + let err = db + .fetch_header_containing_kernel_mmr(num_genesis_kernels + 5 + 1) + .unwrap_err(); + matches!(err, ChainStorageError::ValueNotFound { .. }); + } +} diff --git a/base_layer/core/src/lib.rs b/base_layer/core/src/lib.rs index 1522d8420b..965a6b980c 100644 --- a/base_layer/core/src/lib.rs +++ b/base_layer/core/src/lib.rs @@ -20,10 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#![cfg_attr(not(debug_assertions), deny(unused_variables))] -#![cfg_attr(not(debug_assertions), deny(unused_imports))] -#![cfg_attr(not(debug_assertions), deny(dead_code))] -#![cfg_attr(not(debug_assertions), deny(unused_extern_crates))] +// #![cfg_attr(not(debug_assertions), deny(unused_variables))] +// #![cfg_attr(not(debug_assertions), deny(unused_imports))] +// #![cfg_attr(not(debug_assertions), deny(dead_code))] +// #![cfg_attr(not(debug_assertions), deny(unused_extern_crates))] #![deny(unused_must_use)] #![deny(unreachable_patterns)] #![deny(unknown_lints)] diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index 462c972621..51f09fe0bd 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -272,6 +272,14 @@ impl BlockchainBackend for TempDatabase { self.db.as_ref().unwrap().fetch_kernels_by_mmr_position(start, end) } + fn fetch_utxos_in_block( + &self, + header_hash: &HashOutput, + deleted: &Bitmap, + ) -> Result<(Vec, Bitmap), ChainStorageError> { + self.db.as_ref().unwrap().fetch_utxos_in_block(header_hash, deleted) + } + fn fetch_utxos_by_mmr_position( &self, start: u64, diff --git a/base_layer/core/src/transactions/transaction_protocol/sender.rs b/base_layer/core/src/transactions/transaction_protocol/sender.rs index 0a3b9d8205..cf6c4eb388 100644 --- a/base_layer/core/src/transactions/transaction_protocol/sender.rs +++ b/base_layer/core/src/transactions/transaction_protocol/sender.rs @@ -752,7 +752,7 @@ mod test { crypto_factories::CryptoFactories, tari_amount::*, test_helpers::{create_test_input, create_unblinded_output, TestParams}, - transaction_entities::{KernelFeatures, OutputFeatures, TransactionError, TransactionOutput}, + transaction_entities::{KernelFeatures, OutputFeatures, TransactionOutput}, transaction_protocol::{ sender::SenderTransactionProtocol, single_receiver::SingleReceiverTransactionProtocol, @@ -1041,13 +1041,13 @@ mod test { // Receiver gets message, deserializes it etc, and creates his response let bob_info = SingleReceiverTransactionProtocol::create(&msg, b.nonce, b.spend_key, features, &factories, None).unwrap(); // Alice gets message back, deserializes it, etc - let err = alice - .add_single_recipient_info(bob_info, &factories.range_proof) - .unwrap_err(); - assert!(matches!( - err, - TransactionProtocolError::TransactionBuildError(TransactionError::InvalidRangeProof) - )); + match alice.add_single_recipient_info(bob_info, &factories.range_proof) { + Ok(_) => panic!("Range proof should have failed to verify"), + Err(e) => assert_eq!( + e, + TransactionProtocolError::ValidationError("Recipient output range proof failed to verify".into()) + ), + } } #[test] diff --git a/comms/dht/src/config.rs b/comms/dht/src/config.rs index ccfd260c9b..978f3581d9 100644 --- a/comms/dht/src/config.rs +++ b/comms/dht/src/config.rs @@ -82,6 +82,7 @@ pub struct DhtConfig { /// Default: 6 hrs pub ban_duration: Duration, /// This allows the use of test addresses in the network. + /// Default: false pub allow_test_addresses: bool, /// The maximum number of messages over `flood_ban_timespan` to allow before banning the peer (for `ban_duration`) /// Default: 1000 messages From 90308ec0f6d67a1af4078c9285a1837a6d14f80a Mon Sep 17 00:00:00 2001 From: Stanimal Date: Thu, 25 Nov 2021 09:42:07 +0400 Subject: [PATCH 07/11] simplify db calls for sync_utxo and sync_kernel rpc --- base_layer/core/src/base_node/proto/rpc.proto | 2 +- .../states/horizon_state_sync.rs | 5 +- .../states/horizon_state_sync/error.rs | 29 +-- .../horizon_state_synchronization.rs | 180 ++++++--------- .../base_node/sync/block_sync/synchronizer.rs | 3 +- .../core/src/base_node/sync/rpc/service.rs | 4 +- .../src/base_node/sync/rpc/sync_utxos_task.rs | 34 ++- base_layer/core/src/chain_storage/async_db.rs | 4 - .../src/chain_storage/blockchain_backend.rs | 10 - .../src/chain_storage/blockchain_database.rs | 69 ++---- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 208 +++--------------- base_layer/core/src/lib.rs | 8 +- .../core/src/test_helpers/blockchain.rs | 16 -- base_layer/core/src/validation/error.rs | 2 +- base_layer/core/tests/base_node_rpc.rs | 3 +- base_layer/key_manager/src/mnemonic.rs | 4 +- base_layer/wallet/tests/wallet/mod.rs | 1 - 17 files changed, 152 insertions(+), 430 deletions(-) diff --git a/base_layer/core/src/base_node/proto/rpc.proto b/base_layer/core/src/base_node/proto/rpc.proto index ac7fb7ec9d..f1c4ee9d6f 100644 --- a/base_layer/core/src/base_node/proto/rpc.proto +++ b/base_layer/core/src/base_node/proto/rpc.proto @@ -45,7 +45,7 @@ message FindChainSplitResponse { } message SyncKernelsRequest { - bytes start_header_hash = 1; + uint64 start = 1; bytes end_header_hash = 2; } diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs index bd1ed5a5f9..e4a163f003 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs @@ -84,7 +84,10 @@ impl HorizonStateSync { // We're already synced because we have full blocks higher than our target pruned height if local_metadata.height_of_longest_chain() >= horizon_sync_height { - info!(target: LOG_TARGET, "Horizon state was already synchronized."); + info!( + target: LOG_TARGET, + "Tip height is higher than our pruned height. Horizon state is already synchronized." + ); return StateEvent::HorizonStateSynchronized; } diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/error.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/error.rs index e0371fff7a..df62e5495a 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/error.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/error.rs @@ -20,24 +20,20 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::num::TryFromIntError; - -use thiserror::Error; -use tokio::task; - -use tari_comms::{ - connectivity::ConnectivityError, - peer_manager::NodeId, - protocol::rpc::{RpcError, RpcStatus}, -}; -use tari_mmr::error::MerkleMountainRangeError; - use crate::{ base_node::{comms_interface::CommsInterfaceError, state_machine_service::states::helpers::BaseNodeRequestError}, chain_storage::{ChainStorageError, MmrTree}, transactions::transaction_entities::error::TransactionError, validation::ValidationError, }; +use std::num::TryFromIntError; +use tari_comms::{ + connectivity::ConnectivityError, + protocol::rpc::{RpcError, RpcStatus}, +}; +use tari_mmr::error::MerkleMountainRangeError; +use thiserror::Error; +use tokio::task; #[derive(Debug, Error)] pub enum HorizonSyncError { @@ -74,15 +70,6 @@ pub enum HorizonSyncError { MerkleMountainRangeError(#[from] MerkleMountainRangeError), #[error("Connectivity error: {0}")] ConnectivityError(#[from] ConnectivityError), - #[error( - "Sync peer {peer} has a tip height of {remote_peer_height} which is less than the target height of \ - {target_pruning_horizon}" - )] - InappropriateSyncPeer { - peer: NodeId, - target_pruning_horizon: u64, - remote_peer_height: u64, - }, } impl From for HorizonSyncError { diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs index 78d80f72c4..2013ce3264 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs @@ -135,8 +135,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { client: &mut rpc::BaseNodeSyncRpcClient, to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { - debug!(target: LOG_TARGET, "Initializing"); - self.initialize().await?; + // debug!(target: LOG_TARGET, "Initializing"); + // self.initialize().await?; debug!(target: LOG_TARGET, "Synchronizing kernels"); self.synchronize_kernels(client, to_header).await?; debug!(target: LOG_TARGET, "Synchronizing outputs"); @@ -144,65 +144,30 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(()) } - async fn initialize(&mut self) -> Result<(), HorizonSyncError> { - let db = self.db(); - let local_metadata = db.get_chain_metadata().await?; - - if local_metadata.height_of_longest_chain() == 0 { - let horizon_data = db.fetch_horizon_data().await?; - self.utxo_sum = horizon_data.utxo_sum().clone(); - self.kernel_sum = horizon_data.kernel_sum().clone(); - - return Ok(()); - } - - // let header = self.db().fetch_chain_header(self.horizon_sync_height).await?; - // let acc = db.fetch_block_accumulated_data(header.hash().clone()).await?; - let new_prune_height = cmp::min(local_metadata.height_of_longest_chain(), self.horizon_sync_height); - if local_metadata.pruned_height() < new_prune_height { - debug!(target: LOG_TARGET, "Pruning block chain to height {}", new_prune_height); - db.prune_to_height(new_prune_height).await?; - } - - // let (calc_utxo_sum, calc_kernel_sum) = self.calculate_commitment_sums(&header).await?; - - // prune_to_height updates the horizon data - let horizon_data = db.fetch_horizon_data().await?; - // if *horizon_data.kernel_sum() != acc.cumulative_kernel_sum { - // error!(target: LOG_TARGET, "KERNEL SUM NOT EQUAL CALCULATED"); - // } - // if *horizon_data.utxo_sum() != acc.cumulative_utxo_sum { - // error!(target: LOG_TARGET, "UTXO SUM NOT EQUAL CALCULATED"); - // } - - // let (calc_utxo_sum, calc_kernel_sum) = self.calculate_commitment_sums(&header).await?; - // if calc_kernel_sum != acc.cumulative_kernel_sum { - // error!(target: LOG_TARGET, "KERNEL SUM NOT EQUAL CALCULATED"); - // } - // if calc_utxo_sum != acc.cumulative_utxo_sum { - // error!(target: LOG_TARGET, "UTXO SUM NOT EQUAL CALCULATED"); - // } - - // if calc_kernel_sum != *horizon_data.kernel_sum() { - // error!(target: LOG_TARGET, "HORIZON KERNEL SUM NOT EQUAL CALCULATED"); - // } - // if calc_utxo_sum != *horizon_data.utxo_sum() { - // error!(target: LOG_TARGET, "HORIZON UTXO SUM NOT EQUAL CALCULATED"); - // } - - debug!( - target: LOG_TARGET, - "Loaded from horizon data utxo_sum = {}, kernel_sum = {}", - horizon_data.utxo_sum().to_hex(), - horizon_data.kernel_sum().to_hex(), - ); - // self.utxo_sum = calc_utxo_sum; - // self.kernel_sum = calc_kernel_sum; - self.utxo_sum = horizon_data.utxo_sum().clone(); - self.kernel_sum = horizon_data.kernel_sum().clone(); - - Ok(()) - } + // async fn initialize(&mut self) -> Result<(), HorizonSyncError> { + // let db = self.db(); + // let local_metadata = db.get_chain_metadata().await?; + // + // let new_prune_height = cmp::min(local_metadata.height_of_longest_chain(), self.horizon_sync_height); + // if local_metadata.pruned_height() < new_prune_height { + // debug!(target: LOG_TARGET, "Pruning block chain to height {}", new_prune_height); + // db.prune_to_height(new_prune_height).await?; + // } + // + // // prune_to_height updates horizon data + // let horizon_data = db.fetch_horizon_data().await?; + // + // debug!( + // target: LOG_TARGET, + // "Loaded from horizon data utxo_sum = {}, kernel_sum = {}", + // horizon_data.utxo_sum().to_hex(), + // horizon_data.kernel_sum().to_hex(), + // ); + // self.utxo_sum = horizon_data.utxo_sum().clone(); + // self.kernel_sum = horizon_data.kernel_sum().clone(); + // + // Ok(()) + // } async fn synchronize_kernels( &mut self, @@ -247,7 +212,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { .fetch_header_containing_kernel_mmr(local_num_kernels + 1) .await?; let req = SyncKernelsRequest { - start_header_hash: current_header.hash().clone(), + start: local_num_kernels, end_header_hash: to_header.hash(), }; let mut kernel_stream = client.sync_kernels(req).await?; @@ -407,15 +372,13 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { debug!( target: LOG_TARGET, "Found header for utxos at mmr pos: {} - {} height: {}", - start + 1, + start, current_header.header().output_mmr_size, current_header.height() ); let db = self.db().clone(); - let mut output_hashes = vec![]; - let mut witness_hashes = vec![]; let mut txn = db.write_transaction(); let mut unpruned_outputs = vec![]; let mut mmr_position = start; @@ -435,7 +398,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { while let Some(response) = output_stream.next().await { let res: SyncUtxosResponse = response?; - if res.mmr_index > 0 && res.mmr_index != mmr_position { + if res.mmr_index != 0 && res.mmr_index != mmr_position { return Err(HorizonSyncError::IncorrectResponse(format!( "Expected MMR position of {} but got {}", mmr_position, res.mmr_index, @@ -458,9 +421,10 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ); height_utxo_counter += 1; let output = TransactionOutput::try_from(output).map_err(HorizonSyncError::ConversionError)?; - output_hashes.push(output.hash()); - witness_hashes.push(output.witness_hash()); unpruned_outputs.push(output.clone()); + + output_mmr.push(output.hash())?; + witness_mmr.push(output.witness_hash())?; self.utxo_sum = &self.utxo_sum + &output.commitment; txn.insert_output_via_horizon_sync( @@ -481,8 +445,9 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { current_header.height() ); height_txo_counter += 1; - output_hashes.push(utxo.hash.clone()); - witness_hashes.push(utxo.witness_hash.clone()); + output_mmr.push(utxo.hash.clone())?; + witness_mmr.push(utxo.witness_hash.clone())?; + txn.insert_pruned_output_via_horizon_sync( utxo.hash, utxo.witness_hash, @@ -501,15 +466,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ))); } - // Validate root - for hash in output_hashes.drain(..) { - output_mmr.push(hash)?; - } - - for hash in witness_hashes.drain(..) { - witness_mmr.push(hash)?; - } - // Check that the difference bitmap isn't excessively large. Bitmap::deserialize panics if greater // than isize::MAX, however isize::MAX is still an inordinate amount of data. An // arbitrary 4 MiB limit is used. @@ -559,7 +515,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { }); } - // self.validate_rangeproofs(mem::take(&mut unpruned_outputs)).await?; + self.validate_rangeproofs(mem::take(&mut unpruned_outputs)).await?; txn.update_deleted_bitmap(diff_bitmap.clone()); @@ -577,12 +533,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { txn.set_pruned_height(metadata.pruned_height(), self.kernel_sum.clone(), self.utxo_sum.clone()); txn.commit().await?; - let data = self.db().fetch_horizon_data().await?; - error!( - target: LOG_TARGET, - "***************** utxo = {} ********************* ", - data.utxo_sum().to_hex(), - ); debug!( target: LOG_TARGET, "UTXO: {}/{}, Header #{}, added {} utxos, added {} txos in {:.2?}", @@ -593,17 +543,28 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { height_txo_counter, timer.elapsed() ); - height_txo_counter = 0; height_utxo_counter = 0; timer = Instant::now(); - current_header = db.fetch_chain_header(current_header.height() + 1).await?; - debug!( - target: LOG_TARGET, - "Expecting to receive the next UTXO set for header #{}", - current_header.height() - ); + if mmr_position == end { + debug!( + target: LOG_TARGET, + "Sync complete at mmr position {}, height #{}", + mmr_position, + current_header.height() + ); + break; + } else { + current_header = db.fetch_chain_header(current_header.height() + 1).await?; + debug!( + target: LOG_TARGET, + "Expecting to receive the next UTXO set {}-{} for header #{}", + mmr_position, + current_header.header().output_mmr_size, + current_header.height() + ); + } }, v => { error!(target: LOG_TARGET, "Remote node returned an invalid response {:?}", v); @@ -671,25 +632,17 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ))); let header = self.db().fetch_chain_header(self.horizon_sync_height).await?; - // TODO: Use accumulated sums - // let (calc_utxo_sum, calc_kernel_sum) = self.calculate_commitment_sums(&header).await?; - // let utxo_sum = &self.utxo_sum; - // let kernel_sum = &self.kernel_sum; - // if *utxo_sum != calc_utxo_sum { - // error!(target: LOG_TARGET, "UTXO sum isnt equal!"); - // } - // if *kernel_sum != calc_kernel_sum { - // error!(target: LOG_TARGET, "KERNEL sum isnt equal!"); - // } + // TODO: Use cumulative kernel and utxo sums + let (calc_utxo_sum, calc_kernel_sum) = self.calculate_commitment_sums(&header).await?; self.shared .sync_validators .final_horizon_state .validate( &*self.db().inner().db_read_access()?, - header.height() - 1, - &self.utxo_sum, - &self.kernel_sum, + header.height(), + &calc_utxo_sum, + &calc_kernel_sum, ) .map_err(HorizonSyncError::FinalStateValidationFailed)?; @@ -742,7 +695,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ); let (utxos, _) = self .db() - .fetch_utxos_by_mmr_position(prev_mmr, curr_header.header().output_mmr_size - 1, bitmap.clone()) + .fetch_utxos_in_block(curr_header.hash().clone(), bitmap.clone()) .await?; trace!( target: LOG_TARGET, @@ -752,19 +705,13 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { prev_kernel_mmr, curr_header.header().kernel_mmr_size - 1 ); - let kernels = self - .db() - .fetch_kernels_by_mmr_position(prev_kernel_mmr, curr_header.header().kernel_mmr_size - 1) - .await?; - let mut utxo_sum = HomomorphicCommitment::default(); - trace!(target: LOG_TARGET, "Number of kernels returned: {}", kernels.len()); trace!(target: LOG_TARGET, "Number of utxos returned: {}", utxos.len()); let mut prune_counter = 0; for u in utxos { match u { PrunedOutput::NotPruned { output } => { - utxo_sum = &output.commitment + &utxo_sum; + pruned_utxo_sum = &output.commitment + &pruned_utxo_sum; }, _ => { prune_counter += 1; @@ -776,8 +723,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } prev_mmr = curr_header.header().output_mmr_size; - pruned_utxo_sum = &utxo_sum + &pruned_utxo_sum; - + let kernels = self.db().fetch_kernels_in_block(curr_header.hash().clone()).await?; + trace!(target: LOG_TARGET, "Number of kernels returned: {}", kernels.len()); for k in kernels { pruned_kernel_sum = &k.excess + &pruned_kernel_sum; } @@ -791,7 +738,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { pruned_utxo_sum ); } - Ok((pruned_utxo_sum, pruned_kernel_sum)) } diff --git a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs index b17da13bc3..b476f4078f 100644 --- a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs @@ -31,7 +31,7 @@ use crate::{ proto::base_node::SyncBlocksRequest, tari_utilities::{hex::Hex, Hashable}, transactions::aggregated_body::AggregateBody, - validation::BlockSyncBodyValidation, + validation::{BlockSyncBodyValidation, ValidationError}, }; use futures::StreamExt; use log::*; @@ -96,6 +96,7 @@ impl BlockSynchronizer { self.db.cleanup_orphans().await?; Ok(()) }, + Err(err @ BlockSyncError::ValidationError(ValidationError::AsyncTaskFailed(_))) => Err(err), Err(err @ BlockSyncError::ValidationError(_)) | Err(err @ BlockSyncError::ReceivedInvalidBlockBody(_)) => { self.ban_peer(node_id, &err).await?; Err(err) diff --git a/base_layer/core/src/base_node/sync/rpc/service.rs b/base_layer/core/src/base_node/sync/rpc/service.rs index 47f4b6c75d..cc62c819d0 100644 --- a/base_layer/core/src/base_node/sync/rpc/service.rs +++ b/base_layer/core/src/base_node/sync/rpc/service.rs @@ -392,10 +392,10 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ let db = self.db(); let start_header = db - .fetch_header_by_block_hash(req.start_header_hash.clone()) + .fetch_header_containing_kernel_mmr(req.start + 1) .await .map_err(RpcStatus::log_internal_error(LOG_TARGET))? - .ok_or_else(|| RpcStatus::not_found("Unknown start header"))?; + .into_header(); let end_header = db .fetch_header_by_block_hash(req.end_header_hash.clone()) diff --git a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs index 9a6a59f612..eb13ab4941 100644 --- a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs +++ b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs @@ -26,9 +26,8 @@ use crate::{ proto, proto::base_node::{SyncUtxo, SyncUtxosRequest, SyncUtxosResponse}, }; -use croaring::Bitmap; use log::*; -use std::{cmp, sync::Arc, time::Instant}; +use std::{sync::Arc, time::Instant}; use tari_comms::{protocol::rpc::RpcStatus, utils}; use tari_crypto::tari_utilities::{hex::Hex, Hashable}; use tokio::{sync::mpsc, task}; @@ -88,20 +87,6 @@ where B: BlockchainBackend + 'static (skip, prev_header.output_mmr_size) }; - // we need to fetch the spent bitmap for the height the client requested - let bitmap = self - .db - .fetch_complete_deleted_bitmap_at(end_header.hash()) - .await - .map_err(|_| { - RpcStatus::general(format!( - "Could not get tip deleted bitmap at hash {}", - end_header.hash().to_hex() - )) - })? - .into_bitmap(); - let bitmap = Arc::new(bitmap); - let include_pruned_utxos = request.include_pruned_utxos; let include_deleted_bitmaps = request.include_deleted_bitmaps; task::spawn(async move { @@ -112,7 +97,6 @@ where B: BlockchainBackend + 'static skip_outputs, prev_utxo_mmr_size, end_header, - bitmap, include_pruned_utxos, include_deleted_bitmaps, ) @@ -125,6 +109,7 @@ where B: BlockchainBackend + 'static Ok(()) } + #[allow(clippy::too_many_arguments)] async fn start_streaming( &self, tx: &mut mpsc::Sender>, @@ -132,10 +117,23 @@ where B: BlockchainBackend + 'static mut skip_outputs: u64, mut prev_utxo_mmr_size: u64, end_header: BlockHeader, - bitmap: Arc, include_pruned_utxos: bool, include_deleted_bitmaps: bool, ) -> Result<(), RpcStatus> { + // we need to fetch the spent bitmap for the height the client requested + let bitmap = self + .db + .fetch_complete_deleted_bitmap_at(end_header.hash()) + .await + .map_err(|err| { + error!(target: LOG_TARGET, "Failed to get deleted bitmap: {}", err); + RpcStatus::general(format!( + "Could not get deleted bitmap at hash {}", + end_header.hash().to_hex() + )) + })? + .into_bitmap(); + let bitmap = Arc::new(bitmap); debug!( target: LOG_TARGET, "Starting stream task with current_header: {}, skip_outputs: {}, prev_utxo_mmr_size: {}, end_header: {}, \ diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index f4f2521149..3d5c61d3c2 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -168,13 +168,9 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_utxos_in_block(hash: HashOutput, deleted: Arc) -> (Vec, Bitmap), "fetch_utxos_in_block"); - make_async_fn!(fetch_utxos_by_mmr_position(start: u64, end: u64, deleted: Arc) -> (Vec, Bitmap), "fetch_utxos_by_mmr_position"); - //---------------------------------- Kernel --------------------------------------------// make_async_fn!(fetch_kernel_by_excess_sig(excess_sig: Signature) -> Option<(TransactionKernel, HashOutput)>, "fetch_kernel_by_excess_sig"); - make_async_fn!(fetch_kernels_by_mmr_position(start: u64, end: u64) -> Vec, "fetch_kernels_by_mmr_position"); - make_async_fn!(fetch_kernels_in_block(hash: HashOutput) -> Vec, "fetch_kernels_in_block"); //---------------------------------- MMR --------------------------------------------// diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index c4fa2920f9..e89b8220f2 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -100,9 +100,6 @@ pub trait BlockchainBackend: Send + Sync { excess_sig: &Signature, ) -> Result, ChainStorageError>; - /// Fetch kernels by MMR position - fn fetch_kernels_by_mmr_position(&self, start: u64, end: u64) -> Result, ChainStorageError>; - /// Fetch all UTXOs and spends in the block fn fetch_utxos_in_block( &self, @@ -110,13 +107,6 @@ pub trait BlockchainBackend: Send + Sync { deleted: &Bitmap, ) -> Result<(Vec, Bitmap), ChainStorageError>; - fn fetch_utxos_by_mmr_position( - &self, - start: u64, - end: u64, - deleted: &Bitmap, - ) -> Result<(Vec, Bitmap), ChainStorageError>; - /// Fetch a specific output. Returns the output and the leaf index in the output MMR fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError>; diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 4ceba06227..116231350e 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -15,7 +15,7 @@ use tari_crypto::tari_utilities::{hex::Hex, ByteArray, Hashable}; use tari_common_types::{ chain_metadata::ChainMetadata, - types::{BlockHash, Commitment, HashDigest, HashOutput, Signature, BLOCK_HASH_LENGTH}, + types::{BlockHash, Commitment, HashDigest, HashOutput, Signature}, }; use tari_mmr::{pruned_hashset::PrunedHashSet, MerkleMountainRange, MutableMmr}; @@ -389,15 +389,6 @@ where B: BlockchainBackend db.fetch_kernels_in_block(&hash) } - pub fn fetch_kernels_by_mmr_position( - &self, - start: u64, - end: u64, - ) -> Result, ChainStorageError> { - let db = self.db_read_access()?; - db.fetch_kernels_by_mmr_position(start, end) - } - pub fn fetch_utxos_in_block( &self, hash: HashOutput, @@ -407,16 +398,6 @@ where B: BlockchainBackend db.fetch_utxos_in_block(&hash, &deleted) } - pub fn fetch_utxos_by_mmr_position( - &self, - start: u64, - end: u64, - deleted: Arc, - ) -> Result<(Vec, Bitmap), ChainStorageError> { - let db = self.db_read_access()?; - db.fetch_utxos_by_mmr_position(start, end, deleted.as_ref()) - } - /// Returns the block header at the given block height. pub fn fetch_header(&self, height: u64) -> Result, ChainStorageError> { let db = self.db_read_access()?; @@ -2105,8 +2086,7 @@ fn prune_database_if_needed( pruning_interval, ); if metadata.pruned_height() < abs_pruning_horizon.saturating_sub(pruning_interval) { - debug!(target: LOG_TARGET, "GONNA PRUNNEEEEE",); - // prune_to_height(db, abs_pruning_horizon)?; + prune_to_height(db, abs_pruning_horizon)?; } Ok(()) @@ -2117,12 +2097,11 @@ fn prune_to_height(db: &mut T, target_horizon_height: u64) let last_pruned = metadata.pruned_height(); if target_horizon_height < last_pruned { return Err(ChainStorageError::InvalidArguments { - func: "prune_to_block", + func: "prune_to_height", arg: "target_horizon_height", message: format!( "Target pruning horizon {} is less than current pruning horizon {}", - target_horizon_height, - last_pruned + 1 + target_horizon_height, last_pruned ), }); } @@ -2135,12 +2114,12 @@ fn prune_to_height(db: &mut T, target_horizon_height: u64) return Ok(()); } - if metadata.height_of_longest_chain() > 0 && target_horizon_height > metadata.height_of_longest_chain() { + if target_horizon_height > metadata.height_of_longest_chain() { return Err(ChainStorageError::InvalidArguments { - func: "prune_to_block", + func: "prune_to_height", arg: "target_horizon_height", message: format!( - "Target pruning horizon {} is less than current block height {}", + "Target pruning horizon {} is greater than current block height {}", target_horizon_height, metadata.height_of_longest_chain() ), @@ -2156,9 +2135,8 @@ fn prune_to_height(db: &mut T, target_horizon_height: u64) "height", last_pruned.to_string(), )?; - let mut block_before_last = None; let mut txn = DbTransaction::new(); - for block_to_prune in (last_pruned + 1)..target_horizon_height { + for block_to_prune in (last_pruned + 1)..=target_horizon_height { let header = db.fetch_chain_header_by_height(block_to_prune)?; let curr_block = db.fetch_block_accumulated_data_by_height(block_to_prune).or_not_found( "BlockAccumulatedData", @@ -2168,30 +2146,27 @@ fn prune_to_height(db: &mut T, target_horizon_height: u64) // Note, this could actually be done in one step instead of each block, since deleted is // accumulated let output_mmr_positions = curr_block.deleted() - last_block.deleted(); - block_before_last = Some(last_block); last_block = curr_block; txn.prune_outputs_at_positions(output_mmr_positions.to_vec()); txn.delete_all_inputs_in_block(header.hash().clone()); } - if let Some(block) = block_before_last { - txn.set_pruned_height( - target_horizon_height, - block.cumulative_kernel_sum().clone(), - block.cumulative_utxo_sum().clone(), - ); - } + txn.set_pruned_height( + target_horizon_height, + last_block.cumulative_kernel_sum().clone(), + last_block.cumulative_utxo_sum().clone(), + ); // If we prune to the tip, we cannot provide any full blocks - if metadata.height_of_longest_chain() == target_horizon_height { - let genesis = db.fetch_chain_header_by_height(0)?; - txn.set_best_block( - 0, - genesis.hash().clone(), - genesis.accumulated_data().total_accumulated_difficulty, - vec![0; BLOCK_HASH_LENGTH], - ); - } + // if metadata.height_of_longest_chain() == target_horizon_height - 1 { + // let genesis = db.fetch_chain_header_by_height(0)?; + // txn.set_best_block( + // 0, + // genesis.hash().clone(), + // genesis.accumulated_data().total_accumulated_difficulty, + // vec![0; BLOCK_HASH_LENGTH], + // ); + // } db.write(txn)?; Ok(()) diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index a231eb87ea..291714c4de 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -24,22 +24,14 @@ // let's ignore this clippy error in this module #![allow(clippy::ptr_arg)] -use std::{convert::TryFrom, fmt, fs, fs::File, ops::Deref, path::Path, sync::Arc, time::Instant}; - use croaring::Bitmap; use fs2::FileExt; use lmdb_zero::{ConstTransaction, Database, Environment, ReadTransaction, WriteTransaction}; use log::*; use serde::{Deserialize, Serialize}; +use std::{fmt, fs, fs::File, ops::Deref, path::Path, sync::Arc, time::Instant}; use tari_crypto::tari_utilities::{hash::Hashable, hex::Hex, ByteArray}; -use tari_common_types::{ - chain_metadata::ChainMetadata, - types::{BlockHash, Commitment, HashDigest, HashOutput, Signature, BLOCK_HASH_LENGTH}, -}; -use tari_mmr::{Hash, MerkleMountainRange, MutableMmr}; -use tari_storage::lmdb_store::{db, LMDBBuilder, LMDBConfig, LMDBStore}; - use crate::{ blocks::{ Block, @@ -95,6 +87,12 @@ use crate::{ }, }, }; +use tari_common_types::{ + chain_metadata::ChainMetadata, + types::{BlockHash, Commitment, HashDigest, HashOutput, Signature, BLOCK_HASH_LENGTH}, +}; +use tari_mmr::{Hash, MerkleMountainRange, MutableMmr}; +use tari_storage::lmdb_store::{db, LMDBBuilder, LMDBConfig, LMDBStore}; type DatabaseRef = Arc>; @@ -1638,66 +1636,6 @@ impl BlockchainBackend for LMDBDatabase { } } - fn fetch_kernels_by_mmr_position(&self, start: u64, end: u64) -> Result, ChainStorageError> { - let txn = self.read_transaction()?; - - let start_height = match lmdb_first_after(&txn, &self.kernel_mmr_size_index, &(start + 1).to_be_bytes())? { - Some(h) => h, - None => return Ok(vec![]), - }; - let end_height: u64 = - lmdb_first_after(&txn, &self.kernel_mmr_size_index, &(end + 1).to_be_bytes())?.unwrap_or(start_height); - - let previous_mmr_count = if start_height == 0 { - 0 - } else { - let header: BlockHeader = - lmdb_get(&txn, &self.headers_db, &(start_height - 1))?.expect("Header should exist"); - debug!(target: LOG_TARGET, "Previous header:{}", header); - header.kernel_mmr_size - }; - - let total_size = (end - start) as usize + 1; - let mut result = Vec::with_capacity(total_size); - - let mut skip_amount = (start - previous_mmr_count) as usize; - debug!( - target: LOG_TARGET, - "Fetching kernels by MMR position. Start {}, end {}, in headers at height {}-{}, prev mmr count: {}, \ - skipping the first:{}", - start, - end, - start_height, - end_height, - previous_mmr_count, - skip_amount - ); - - for height in start_height..=end_height { - let acc_data = lmdb_get::<_, BlockHeaderAccumulatedData>(&txn, &self.header_accumulated_data_db, &height)? - .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeader", - field: "height", - value: height.to_string(), - })?; - - result.extend( - lmdb_fetch_keys_starting_with::( - acc_data.hash.to_hex().as_str(), - &txn, - &self.kernels_db, - )? - .into_iter() - .skip(skip_amount) - .take(total_size - result.len()) - .map(|f| f.kernel), - ); - - skip_amount = 0; - } - Ok(result) - } - fn fetch_utxos_in_block( &self, header_hash: &HashOutput, @@ -1705,14 +1643,6 @@ impl BlockchainBackend for LMDBDatabase { ) -> Result<(Vec, Bitmap), ChainStorageError> { let txn = self.read_transaction()?; - let height = - self.fetch_height_from_hash(&txn, header_hash)? - .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeader", - field: "hash", - value: header_hash.to_hex(), - })?; - let utxos = lmdb_fetch_keys_starting_with::( header_hash.to_hex().as_str(), &txn, @@ -1737,6 +1667,14 @@ impl BlockchainBackend for LMDBDatabase { }) .collect(); + let height = + self.fetch_height_from_hash(&txn, header_hash)? + .ok_or_else(|| ChainStorageError::ValueNotFound { + entity: "BlockHeader", + field: "hash", + value: header_hash.to_hex(), + })?; + // Builds a BitMap of the deleted UTXO MMR indexes that occurred at the current height let acc_data = self.fetch_block_accumulated_data(&txn, height)? @@ -1752,105 +1690,6 @@ impl BlockchainBackend for LMDBDatabase { Ok((utxos, difference_bitmap)) } - fn fetch_utxos_by_mmr_position( - &self, - start: u64, - end: u64, - deleted: &Bitmap, - ) -> Result<(Vec, Bitmap), ChainStorageError> { - let txn = self.read_transaction()?; - let start_height = lmdb_first_after(&txn, &self.output_mmr_size_index, &(start + 1).to_be_bytes())? - .ok_or_else(|| { - ChainStorageError::InvalidQuery(format!( - "Unable to find block height from start output MMR index {}", - start - )) - })?; - let end_height: u64 = - lmdb_first_after(&txn, &self.output_mmr_size_index, &(end + 1).to_be_bytes())?.unwrap_or(start_height); - - let previous_mmr_count = if start_height == 0 { - 0 - } else { - let header: BlockHeader = - lmdb_get(&txn, &self.headers_db, &(start_height - 1))?.expect("Header should exist"); - debug!(target: LOG_TARGET, "Previous header:{}", header); - header.output_mmr_size - }; - - let total_size = end - .checked_sub(start) - .and_then(|v| v.checked_add(1)) - .and_then(|v| usize::try_from(v).ok()) - .ok_or_else(|| { - ChainStorageError::InvalidQuery("fetch_utxos_by_mmr_position: end is less than start".to_string()) - })?; - let mut result = Vec::with_capacity(total_size); - - let mut skip_amount = (start - previous_mmr_count) as usize; - debug!( - target: LOG_TARGET, - "Fetching outputs by MMR position. Start {}, end {}, starting in header at height {}, prev mmr count: \ - {}, skipping the first:{}", - start, - end, - start_height, - previous_mmr_count, - skip_amount - ); - let mut difference_bitmap = Bitmap::create(); - - for height in start_height..=end_height { - let accum_data = - lmdb_get::<_, BlockHeaderAccumulatedData>(&txn, &self.header_accumulated_data_db, &height)? - .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeader", - field: "height", - value: height.to_string(), - })?; - - result.extend( - lmdb_fetch_keys_starting_with::( - accum_data.hash.to_hex().as_str(), - &txn, - &self.utxos_db, - )? - .into_iter() - .skip(skip_amount) - .take(total_size - result.len()) - .map(|row| { - if deleted.contains(row.mmr_position) { - return PrunedOutput::Pruned { - output_hash: row.hash, - witness_hash: row.witness_hash, - }; - } - if let Some(output) = row.output { - PrunedOutput::NotPruned { output } - } else { - PrunedOutput::Pruned { - output_hash: row.hash, - witness_hash: row.witness_hash, - } - } - }), - ); - - // Builds a BitMap of the deleted UTXO MMR indexes that occurred at the current height - let diff_bitmap = self - .fetch_block_accumulated_data(&txn, height) - .or_not_found("BlockAccumulatedData", "height", height.to_string())? - .deleted() - .clone(); - difference_bitmap.or_inplace(&diff_bitmap); - - skip_amount = 0; - } - - difference_bitmap.run_optimize(); - Ok((result, difference_bitmap)) - } - fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError> { debug!(target: LOG_TARGET, "Fetch output: {}", output_hash.to_hex()); let txn = self.read_transaction()?; @@ -2192,7 +2031,7 @@ impl BlockchainBackend for LMDBDatabase { fn fetch_horizon_data(&self) -> Result, ChainStorageError> { let txn = self.read_transaction()?; - fetch_horizon_data(&txn, &self.metadata_db) + Ok(Some(fetch_horizon_data(&txn, &self.metadata_db)?)) } fn get_stats(&self) -> Result { @@ -2249,7 +2088,7 @@ fn fetch_chain_height(txn: &ConstTransaction<'_>, db: &Database) -> Result, db: &Database) -> Result { let k = MetadataKey::PrunedHeight; let val: Option = lmdb_get(txn, db, &k.as_u32())?; @@ -2258,13 +2097,18 @@ fn fetch_pruned_height(txn: &ConstTransaction<'_>, db: &Database) -> Result Ok(0), } } -// Fetches the best block hash from the provided metadata db. -fn fetch_horizon_data(txn: &ConstTransaction<'_>, db: &Database) -> Result, ChainStorageError> { + +/// Fetches the horizon data from the provided metadata db. +fn fetch_horizon_data(txn: &ConstTransaction<'_>, db: &Database) -> Result { let k = MetadataKey::HorizonData; let val: Option = lmdb_get(txn, db, &k.as_u32())?; match val { - Some(MetadataValue::HorizonData(data)) => Ok(Some(data)), - None => Ok(None), + Some(MetadataValue::HorizonData(data)) => Ok(data), + None => Err(ChainStorageError::ValueNotFound { + entity: "HorizonData", + field: "metadata", + value: "".to_string(), + }), Some(k) => Err(ChainStorageError::DataInconsistencyDetected { function: "fetch_horizon_data", details: format!("Received incorrect value {:?} for key horizon data", k), diff --git a/base_layer/core/src/lib.rs b/base_layer/core/src/lib.rs index 965a6b980c..1522d8420b 100644 --- a/base_layer/core/src/lib.rs +++ b/base_layer/core/src/lib.rs @@ -20,10 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// #![cfg_attr(not(debug_assertions), deny(unused_variables))] -// #![cfg_attr(not(debug_assertions), deny(unused_imports))] -// #![cfg_attr(not(debug_assertions), deny(dead_code))] -// #![cfg_attr(not(debug_assertions), deny(unused_extern_crates))] +#![cfg_attr(not(debug_assertions), deny(unused_variables))] +#![cfg_attr(not(debug_assertions), deny(unused_imports))] +#![cfg_attr(not(debug_assertions), deny(dead_code))] +#![cfg_attr(not(debug_assertions), deny(unused_extern_crates))] #![deny(unused_must_use)] #![deny(unreachable_patterns)] #![deny(unknown_lints)] diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index 51f09fe0bd..3673f7e950 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -268,10 +268,6 @@ impl BlockchainBackend for TempDatabase { self.db.as_ref().unwrap().fetch_kernel_by_excess_sig(excess_sig) } - fn fetch_kernels_by_mmr_position(&self, start: u64, end: u64) -> Result, ChainStorageError> { - self.db.as_ref().unwrap().fetch_kernels_by_mmr_position(start, end) - } - fn fetch_utxos_in_block( &self, header_hash: &HashOutput, @@ -280,18 +276,6 @@ impl BlockchainBackend for TempDatabase { self.db.as_ref().unwrap().fetch_utxos_in_block(header_hash, deleted) } - fn fetch_utxos_by_mmr_position( - &self, - start: u64, - end: u64, - deleted: &Bitmap, - ) -> Result<(Vec, Bitmap), ChainStorageError> { - self.db - .as_ref() - .unwrap() - .fetch_utxos_by_mmr_position(start, end, deleted) - } - fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError> { self.db.as_ref().unwrap().fetch_output(output_hash) } diff --git a/base_layer/core/src/validation/error.rs b/base_layer/core/src/validation/error.rs index 369cf1831a..bc2f3bb0d5 100644 --- a/base_layer/core/src/validation/error.rs +++ b/base_layer/core/src/validation/error.rs @@ -57,7 +57,7 @@ pub enum ValidationError { InvalidAccountingBalance, #[error("Transaction contains already spent inputs")] ContainsSTxO, - #[error("Transaction contains already outputs that already exist")] + #[error("Transaction contains outputs that already exist")] ContainsTxO, #[error("Transaction contains an output commitment that already exists")] ContainsDuplicateUtxoCommitment, diff --git a/base_layer/core/tests/base_node_rpc.rs b/base_layer/core/tests/base_node_rpc.rs index 7643b5ca28..0a7ecdd856 100644 --- a/base_layer/core/tests/base_node_rpc.rs +++ b/base_layer/core/tests/base_node_rpc.rs @@ -303,8 +303,7 @@ async fn test_get_height_at_time() { let (_, service, base_node, request_mock, consensus_manager, block0, _utxo0, _temp_dir) = setup().await; let mut prev_block = block0.clone(); - let mut times = Vec::new(); - times.push(prev_block.header().timestamp); + let mut times = vec![prev_block.header().timestamp]; for _ in 0..10 { tokio::time::sleep(Duration::from_secs(2)).await; let new_block = base_node diff --git a/base_layer/key_manager/src/mnemonic.rs b/base_layer/key_manager/src/mnemonic.rs index 9338e82217..8df3f90734 100644 --- a/base_layer/key_manager/src/mnemonic.rs +++ b/base_layer/key_manager/src/mnemonic.rs @@ -350,7 +350,7 @@ mod test { "abandon".to_string(), "tipico".to_string(), ]; - assert_eq!(MnemonicLanguage::detect_language(&words2).is_err(), true); + assert!(MnemonicLanguage::detect_language(&words2).is_err()); // bounds check (last word is invalid) let words3 = vec![ @@ -360,7 +360,7 @@ mod test { "abandon".to_string(), "topazio".to_string(), ]; - assert_eq!(MnemonicLanguage::detect_language(&words3).is_err(), true); + assert!(MnemonicLanguage::detect_language(&words3).is_err()); // building up a word list: English/French + French -> French let mut words = Vec::with_capacity(3); diff --git a/base_layer/wallet/tests/wallet/mod.rs b/base_layer/wallet/tests/wallet/mod.rs index d54c307c01..c33481f3c2 100644 --- a/base_layer/wallet/tests/wallet/mod.rs +++ b/base_layer/wallet/tests/wallet/mod.rs @@ -69,7 +69,6 @@ use tari_wallet::{ handle::TransactionEvent, storage::sqlite_db::TransactionServiceSqliteDatabase, }, - utxo_scanner_service::utxo_scanning::UtxoScannerService, Wallet, WalletConfig, WalletSqlite, From 4b9a4a7c3d28cc8d36324c5347b466fa78592ca0 Mon Sep 17 00:00:00 2001 From: Stanimal Date: Thu, 25 Nov 2021 11:26:11 +0400 Subject: [PATCH 08/11] fix tests --- .../chain_storage/tests/blockchain_database.rs | 16 ++++++++++++---- .../src/transactions/transaction_entities/mod.rs | 4 ++-- .../transaction_entities/transaction_output.rs | 10 ++++++++-- .../transactions/transaction_protocol/sender.rs | 6 ++++-- 4 files changed, 26 insertions(+), 10 deletions(-) diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index bcaf0040dd..0d7365229d 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -67,7 +67,13 @@ fn add_many_chained_blocks( size: usize, db: &BlockchainDatabase, ) -> (Vec>, Vec) { - let mut prev_block = Arc::new(db.fetch_block(0).unwrap().try_into_block().unwrap()); + let last_header = db.fetch_last_header().unwrap(); + let mut prev_block = db + .fetch_block(last_header.height) + .unwrap() + .try_into_block() + .map(Arc::new) + .unwrap(); let mut blocks = Vec::with_capacity(size); let mut outputs = Vec::with_capacity(size); for _ in 1..=size as u64 { @@ -547,19 +553,21 @@ mod fetch_header_containing_kernel_mmr { let _ = add_many_chained_blocks(3, &db); let header = db.fetch_header_containing_kernel_mmr(num_genesis_kernels).unwrap(); + assert_eq!(header.height(), 0); + let header = db.fetch_header_containing_kernel_mmr(num_genesis_kernels + 1).unwrap(); assert_eq!(header.height(), 1); for i in 2..=3 { let header = db.fetch_header_containing_kernel_mmr(num_genesis_kernels + i).unwrap(); assert_eq!(header.height(), 2); } - for i in 4..=5 { + for i in 4..=6 { let header = db.fetch_header_containing_kernel_mmr(num_genesis_kernels + i).unwrap(); - assert_eq!(header.height(), i); + assert_eq!(header.height(), i - 1); } let err = db - .fetch_header_containing_kernel_mmr(num_genesis_kernels + 5 + 1) + .fetch_header_containing_kernel_mmr(num_genesis_kernels + 6 + 1) .unwrap_err(); matches!(err, ChainStorageError::ValueNotFound { .. }); } diff --git a/base_layer/core/src/transactions/transaction_entities/mod.rs b/base_layer/core/src/transactions/transaction_entities/mod.rs index 34a1d76771..d5f2201eaa 100644 --- a/base_layer/core/src/transactions/transaction_entities/mod.rs +++ b/base_layer/core/src/transactions/transaction_entities/mod.rs @@ -156,7 +156,7 @@ mod test { }); let script = unblinded_output1.script.clone(); let tx_output1 = unblinded_output1.as_transaction_output(&factories).unwrap(); - assert!(tx_output1.verify_range_proof(&factories.range_proof).unwrap()); + tx_output1.verify_range_proof(&factories.range_proof).unwrap(); let unblinded_output2 = test_params_2.create_unblinded_output(UtxoTestParams { value: (2u64.pow(32) + 1u64).into(), @@ -196,7 +196,7 @@ mod test { ) .unwrap(), ); - assert!(!tx_output3.verify_range_proof(&factories.range_proof).unwrap()); + assert!(tx_output3.verify_range_proof(&factories.range_proof).is_ok()); } #[test] diff --git a/base_layer/core/src/transactions/transaction_entities/transaction_output.rs b/base_layer/core/src/transactions/transaction_entities/transaction_output.rs index 204278b27a..a9f5a290eb 100644 --- a/base_layer/core/src/transactions/transaction_entities/transaction_output.rs +++ b/base_layer/core/src/transactions/transaction_entities/transaction_output.rs @@ -117,8 +117,14 @@ impl TransactionOutput { } /// Verify that range proof is valid - pub fn verify_range_proof(&self, prover: &RangeProofService) -> Result { - Ok(prover.verify(&self.proof.0, &self.commitment)) + pub fn verify_range_proof(&self, prover: &RangeProofService) -> Result<(), TransactionError> { + if prover.verify(&self.proof.0, &self.commitment) { + Ok(()) + } else { + Err(TransactionError::ValidationError( + "Recipient output range proof failed to verify".to_string(), + )) + } } /// Verify that the metadata signature is valid diff --git a/base_layer/core/src/transactions/transaction_protocol/sender.rs b/base_layer/core/src/transactions/transaction_protocol/sender.rs index cf6c4eb388..3961ba05cc 100644 --- a/base_layer/core/src/transactions/transaction_protocol/sender.rs +++ b/base_layer/core/src/transactions/transaction_protocol/sender.rs @@ -752,7 +752,7 @@ mod test { crypto_factories::CryptoFactories, tari_amount::*, test_helpers::{create_test_input, create_unblinded_output, TestParams}, - transaction_entities::{KernelFeatures, OutputFeatures, TransactionOutput}, + transaction_entities::{KernelFeatures, OutputFeatures, TransactionError, TransactionOutput}, transaction_protocol::{ sender::SenderTransactionProtocol, single_receiver::SingleReceiverTransactionProtocol, @@ -1045,7 +1045,9 @@ mod test { Ok(_) => panic!("Range proof should have failed to verify"), Err(e) => assert_eq!( e, - TransactionProtocolError::ValidationError("Recipient output range proof failed to verify".into()) + TransactionProtocolError::TransactionBuildError(TransactionError::ValidationError( + "Recipient output range proof failed to verify".into() + )) ), } } From 4b1e91332f1b7f3562a13314967d4479054fe12b Mon Sep 17 00:00:00 2001 From: Stanimal Date: Fri, 26 Nov 2021 11:45:27 +0400 Subject: [PATCH 09/11] prune outputs at end of horizon sync --- .../horizon_state_synchronization.rs | 137 +++++++++--------- .../src/base_node/sync/rpc/sync_utxos_task.rs | 2 +- .../core/src/blocks/accumulated_data.rs | 23 +-- base_layer/core/src/chain_storage/async_db.rs | 7 +- .../src/chain_storage/blockchain_backend.rs | 2 +- .../src/chain_storage/blockchain_database.rs | 43 +----- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 19 +-- .../core/src/test_helpers/blockchain.rs | 2 +- comms/Cargo.toml | 2 +- 9 files changed, 101 insertions(+), 136 deletions(-) diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs index 2013ce3264..287d562c91 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs @@ -71,8 +71,7 @@ pub struct HorizonStateSynchronization<'a, B: BlockchainBackend> { prover: Arc, num_kernels: u64, num_outputs: u64, - kernel_sum: Commitment, - utxo_sum: Commitment, + full_bitmap: Option, } impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { @@ -89,8 +88,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { prover, num_kernels: 0, num_outputs: 0, - kernel_sum: Default::default(), - utxo_sum: Default::default(), + full_bitmap: None, } } @@ -135,8 +133,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { client: &mut rpc::BaseNodeSyncRpcClient, to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { - // debug!(target: LOG_TARGET, "Initializing"); - // self.initialize().await?; + debug!(target: LOG_TARGET, "Initializing"); + self.initialize().await?; debug!(target: LOG_TARGET, "Synchronizing kernels"); self.synchronize_kernels(client, to_header).await?; debug!(target: LOG_TARGET, "Synchronizing outputs"); @@ -144,30 +142,20 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(()) } - // async fn initialize(&mut self) -> Result<(), HorizonSyncError> { - // let db = self.db(); - // let local_metadata = db.get_chain_metadata().await?; - // - // let new_prune_height = cmp::min(local_metadata.height_of_longest_chain(), self.horizon_sync_height); - // if local_metadata.pruned_height() < new_prune_height { - // debug!(target: LOG_TARGET, "Pruning block chain to height {}", new_prune_height); - // db.prune_to_height(new_prune_height).await?; - // } - // - // // prune_to_height updates horizon data - // let horizon_data = db.fetch_horizon_data().await?; - // - // debug!( - // target: LOG_TARGET, - // "Loaded from horizon data utxo_sum = {}, kernel_sum = {}", - // horizon_data.utxo_sum().to_hex(), - // horizon_data.kernel_sum().to_hex(), - // ); - // self.utxo_sum = horizon_data.utxo_sum().clone(); - // self.kernel_sum = horizon_data.kernel_sum().clone(); - // - // Ok(()) - // } + async fn initialize(&mut self) -> Result<(), HorizonSyncError> { + let db = self.db(); + let local_metadata = db.get_chain_metadata().await?; + + let new_prune_height = cmp::min(local_metadata.height_of_longest_chain(), self.horizon_sync_height); + if local_metadata.pruned_height() < new_prune_height { + debug!(target: LOG_TARGET, "Pruning block chain to height {}", new_prune_height); + db.prune_to_height(new_prune_height + 1).await?; + } + + self.full_bitmap = Some(db.fetch_deleted_bitmap_at_tip().await?.into_bitmap()); + + Ok(()) + } async fn synchronize_kernels( &mut self, @@ -175,7 +163,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { let local_num_kernels = self.db().fetch_mmr_size(MmrTree::Kernel).await?; - let metadata = self.db().get_chain_metadata().await?; let remote_num_kernels = to_header.kernel_mmr_size; self.num_kernels = remote_num_kernels; @@ -235,7 +222,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { .map_err(HorizonSyncError::InvalidKernelSignature)?; kernel_hashes.push(kernel.hash()); - self.kernel_sum = &self.kernel_sum + &kernel.excess; txn.insert_kernel_via_horizon_sync(kernel, current_header.hash().clone(), mmr_position as u32); if mmr_position == current_header.header().kernel_mmr_size - 1 { @@ -276,13 +262,10 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { txn.update_block_accumulated_data_via_horizon_sync( current_header.hash().clone(), UpdateBlockAccumulatedData { - kernel_sum: Some(self.kernel_sum.clone()), kernel_hash_set: Some(kernel_hash_set), ..Default::default() }, ); - debug!(target: LOG_TARGET, "Setting kernel sum = {}", self.kernel_sum.to_hex()); - txn.set_pruned_height(metadata.pruned_height(), self.kernel_sum.clone(), self.utxo_sum.clone()); txn.commit().await?; debug!( @@ -323,8 +306,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ) -> Result<(), HorizonSyncError> { let local_num_outputs = self.db().fetch_mmr_size(MmrTree::Utxo).await?; - let metadata = self.db().get_chain_metadata().await?; - let remote_num_outputs = to_header.output_mmr_size; self.num_outputs = remote_num_outputs; @@ -390,7 +371,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { .fetch_block_accumulated_data(current_header.header().prev_hash.clone()) .await?; let (_, output_pruned_set, witness_pruned_set, _) = block_data.dissolve(); - let mut full_bitmap = self.db().fetch_deleted_bitmap_at_tip().await?.into_bitmap(); let mut output_mmr = MerkleMountainRange::::new(output_pruned_set); let mut witness_mmr = MerkleMountainRange::::new(witness_pruned_set); @@ -425,7 +405,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { output_mmr.push(output.hash())?; witness_mmr.push(output.witness_hash())?; - self.utxo_sum = &self.utxo_sum + &output.commitment; txn.insert_output_via_horizon_sync( output, @@ -489,11 +468,12 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { // Merge the differences into the final bitmap so that we can commit to the entire spend state // in the output MMR - full_bitmap.or_inplace(&diff_bitmap); - full_bitmap.run_optimize(); + let bitmap = self.full_bitmap_mut(); + bitmap.or_inplace(&diff_bitmap); + bitmap.run_optimize(); let pruned_output_set = output_mmr.get_pruned_hash_set()?; - let output_mmr = MutableMmr::::new(pruned_output_set.clone(), full_bitmap.clone())?; + let output_mmr = MutableMmr::::new(pruned_output_set.clone(), bitmap.clone())?; let mmr_root = output_mmr.get_merkle_root()?; if mmr_root != current_header.header().output_mr { @@ -523,14 +503,12 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { txn.update_block_accumulated_data_via_horizon_sync( current_header.hash().clone(), UpdateBlockAccumulatedData { - utxo_sum: Some(self.utxo_sum.clone()), utxo_hash_set: Some(pruned_output_set), witness_hash_set: Some(witness_hash_set), deleted_diff: Some(diff_bitmap.into()), ..Default::default() }, ); - txn.set_pruned_height(metadata.pruned_height(), self.kernel_sum.clone(), self.utxo_sum.clone()); txn.commit().await?; debug!( @@ -588,6 +566,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { "Sync node did not send all utxos requested".to_string(), )); } + Ok(()) } @@ -632,7 +611,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ))); let header = self.db().fetch_chain_header(self.horizon_sync_height).await?; - // TODO: Use cumulative kernel and utxo sums let (calc_utxo_sum, calc_kernel_sum) = self.calculate_commitment_sums(&header).await?; self.shared @@ -659,16 +637,29 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { header.accumulated_data().total_accumulated_difficulty, metadata.best_block().clone(), ) - .set_pruned_height(header.height(), self.kernel_sum.clone(), self.utxo_sum.clone()) + .set_pruned_height(header.height(), calc_kernel_sum, calc_utxo_sum) .commit() .await?; Ok(()) } + fn take_final_bitmap(&mut self) -> Arc { + self.full_bitmap + .take() + .map(Arc::new) + .expect("take_full_bitmap called before initialize") + } + + fn full_bitmap_mut(&mut self) -> &mut Bitmap { + self.full_bitmap + .as_mut() + .expect("full_bitmap_mut called before initialize") + } + /// (UTXO sum, Kernel sum) async fn calculate_commitment_sums( - &self, + &mut self, header: &ChainHeader, ) -> Result<(Commitment, Commitment), HorizonSyncError> { let mut pruned_utxo_sum = HomomorphicCommitment::default(); @@ -676,12 +667,12 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let mut prev_mmr = 0; let mut prev_kernel_mmr = 0; - let bitmap = Arc::new( - self.db() - .fetch_complete_deleted_bitmap_at(header.hash().clone()) - .await? - .into_bitmap(), - ); + + let bitmap = self.take_final_bitmap(); + let mut txn = self.db().write_transaction(); + let mut utxo_mmr_position = 0; + let mut prune_positions = vec![]; + for h in 0..=header.height() { let curr_header = self.db().fetch_chain_header(h).await?; @@ -693,10 +684,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { prev_mmr, curr_header.header().output_mmr_size - 1 ); - let (utxos, _) = self - .db() - .fetch_utxos_in_block(curr_header.hash().clone(), bitmap.clone()) - .await?; + let (utxos, _) = self.db().fetch_utxos_in_block(curr_header.hash().clone(), None).await?; trace!( target: LOG_TARGET, "Fetching kernels from db: height:{}, header.kernel_mmr:{}, prev_mmr:{}, end:{}", @@ -711,12 +699,22 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { for u in utxos { match u { PrunedOutput::NotPruned { output } => { - pruned_utxo_sum = &output.commitment + &pruned_utxo_sum; + if bitmap.contains(utxo_mmr_position) { + debug!( + target: LOG_TARGET, + "Found output that needs pruning at height: {} position: {}", h, utxo_mmr_position + ); + prune_positions.push(utxo_mmr_position); + prune_counter += 1; + } else { + pruned_utxo_sum = &output.commitment + &pruned_utxo_sum; + } }, _ => { prune_counter += 1; }, } + utxo_mmr_position += 1; } if prune_counter > 0 { trace!(target: LOG_TARGET, "Pruned {} outputs", prune_counter); @@ -730,14 +728,23 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } prev_kernel_mmr = curr_header.header().kernel_mmr_size; - trace!( - target: LOG_TARGET, - "Height: {} Kernel sum:{:?} Pruned UTXO sum: {:?}", - h, - pruned_kernel_sum, - pruned_utxo_sum - ); + if h % 1000 == 0 { + debug!( + target: LOG_TARGET, + "Final Validation: {:.2}% complete. Height: {}, mmr_position: {} ", + (h as f32 / header.height() as f32) * 100.0, + h, + utxo_mmr_position, + ); + } } + + if !prune_positions.is_empty() { + debug!(target: LOG_TARGET, "Pruning {} spent outputs", prune_positions.len()); + txn.prune_output_at_positions(prune_positions); + txn.commit().await?; + } + Ok((pruned_utxo_sum, pruned_kernel_sum)) } diff --git a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs index eb13ab4941..aa27f064f0 100644 --- a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs +++ b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs @@ -166,7 +166,7 @@ where B: BlockchainBackend + 'static let (utxos, deleted_diff) = self .db - .fetch_utxos_in_block(current_header.hash(), bitmap.clone()) + .fetch_utxos_in_block(current_header.hash(), Some(bitmap.clone())) .await .map_err(RpcStatus::log_internal_error(LOG_TARGET))?; debug!( diff --git a/base_layer/core/src/blocks/accumulated_data.rs b/base_layer/core/src/blocks/accumulated_data.rs index 7ba435dfd0..88c45f9504 100644 --- a/base_layer/core/src/blocks/accumulated_data.rs +++ b/base_layer/core/src/blocks/accumulated_data.rs @@ -49,14 +49,13 @@ use tari_mmr::{pruned_hashset::PrunedHashSet, ArrayLike}; const LOG_TARGET: &str = "c::bn::acc_data"; -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize)] pub struct BlockAccumulatedData { pub(crate) kernels: PrunedHashSet, pub(crate) outputs: PrunedHashSet, pub(crate) witness: PrunedHashSet, pub(crate) deleted: DeletedBitmap, - pub(crate) cumulative_kernel_sum: Commitment, - pub(crate) cumulative_utxo_sum: Commitment, + pub(crate) kernel_sum: Commitment, } impl BlockAccumulatedData { @@ -65,16 +64,14 @@ impl BlockAccumulatedData { outputs: PrunedHashSet, witness: PrunedHashSet, deleted: Bitmap, - cumulative_kernel_sum: Commitment, - cumulative_utxo_sum: Commitment, + total_kernel_sum: Commitment, ) -> Self { Self { kernels, outputs, witness, deleted: DeletedBitmap { deleted }, - cumulative_kernel_sum, - cumulative_utxo_sum, + kernel_sum: total_kernel_sum, } } @@ -91,12 +88,8 @@ impl BlockAccumulatedData { (self.kernels, self.outputs, self.witness, self.deleted.deleted) } - pub fn cumulative_kernel_sum(&self) -> &Commitment { - &self.cumulative_kernel_sum - } - - pub fn cumulative_utxo_sum(&self) -> &Commitment { - &self.cumulative_utxo_sum + pub fn kernel_sum(&self) -> &Commitment { + &self.kernel_sum } } @@ -109,8 +102,7 @@ impl Default for BlockAccumulatedData { deleted: Bitmap::create(), }, witness: Default::default(), - cumulative_kernel_sum: Default::default(), - cumulative_utxo_sum: Default::default(), + kernel_sum: Default::default(), } } } @@ -134,7 +126,6 @@ pub struct UpdateBlockAccumulatedData { pub utxo_hash_set: Option, pub witness_hash_set: Option, pub deleted_diff: Option, - pub utxo_sum: Option, pub kernel_sum: Option, } diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index 3d5c61d3c2..4f4de6678b 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -166,7 +166,7 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_utxos_and_mined_info(hashes: Vec) -> Vec>, "fetch_utxos_and_mined_info"); - make_async_fn!(fetch_utxos_in_block(hash: HashOutput, deleted: Arc) -> (Vec, Bitmap), "fetch_utxos_in_block"); + make_async_fn!(fetch_utxos_in_block(hash: HashOutput, deleted: Option>) -> (Vec, Bitmap), "fetch_utxos_in_block"); //---------------------------------- Kernel --------------------------------------------// make_async_fn!(fetch_kernel_by_excess_sig(excess_sig: Signature) -> Option<(TransactionKernel, HashOutput)>, "fetch_kernel_by_excess_sig"); @@ -367,6 +367,11 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { self } + pub fn prune_output_at_positions(&mut self, positions: Vec) -> &mut Self { + self.transaction.prune_outputs_at_positions(positions); + self + } + pub async fn commit(&mut self) -> Result<(), ChainStorageError> { let transaction = mem::take(&mut self.transaction); self.db.write(transaction).await diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index e89b8220f2..ede1ca87c2 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -104,7 +104,7 @@ pub trait BlockchainBackend: Send + Sync { fn fetch_utxos_in_block( &self, header_hash: &HashOutput, - deleted: &Bitmap, + deleted: Option<&Bitmap>, ) -> Result<(Vec, Bitmap), ChainStorageError>; /// Fetch a specific output. Returns the output and the leaf index in the output MMR diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 116231350e..07c0212feb 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -215,22 +215,10 @@ where B: BlockchainBackend let genesis_block = Arc::new(blockchain_db.consensus_manager.get_genesis_block()); blockchain_db.insert_block(genesis_block.clone())?; let mut txn = DbTransaction::new(); - let utxo_sum = genesis_block - .block() - .body - .outputs() - .iter() - .map(|k| &k.commitment) - .sum::(); - let kernel_sum = genesis_block - .block() - .body - .kernels() - .iter() - .map(|k| &k.excess) - .sum::(); + let body = &genesis_block.block().body; + let utxo_sum = body.outputs().iter().map(|k| &k.commitment).sum::(); + let kernel_sum = body.kernels().iter().map(|k| &k.excess).sum::(); txn.update_block_accumulated_data(genesis_block.hash().clone(), UpdateBlockAccumulatedData { - utxo_sum: Some(utxo_sum.clone()), kernel_sum: Some(kernel_sum.clone()), ..Default::default() }); @@ -392,10 +380,10 @@ where B: BlockchainBackend pub fn fetch_utxos_in_block( &self, hash: HashOutput, - deleted: Arc, + deleted: Option>, ) -> Result<(Vec, Bitmap), ChainStorageError> { let db = self.db_read_access()?; - db.fetch_utxos_in_block(&hash, &deleted) + db.fetch_utxos_in_block(&hash, deleted.as_deref()) } /// Returns the block header at the given block height. @@ -594,10 +582,7 @@ where B: BlockchainBackend /// Returns the sum of all kernels pub fn fetch_kernel_commitment_sum(&self, at_hash: &HashOutput) -> Result { - Ok(self - .fetch_block_accumulated_data(at_hash.clone())? - .cumulative_kernel_sum() - .clone()) + Ok(self.fetch_block_accumulated_data(at_hash.clone())?.kernel_sum().clone()) } /// Returns `n` hashes from height _h - offset_ where _h_ is the tip header height back to `h - n - offset`. @@ -2152,22 +2137,6 @@ fn prune_to_height(db: &mut T, target_horizon_height: u64) txn.delete_all_inputs_in_block(header.hash().clone()); } - txn.set_pruned_height( - target_horizon_height, - last_block.cumulative_kernel_sum().clone(), - last_block.cumulative_utxo_sum().clone(), - ); - // If we prune to the tip, we cannot provide any full blocks - // if metadata.height_of_longest_chain() == target_horizon_height - 1 { - // let genesis = db.fetch_chain_header_by_height(0)?; - // txn.set_best_block( - // 0, - // genesis.hash().clone(), - // genesis.accumulated_data().total_accumulated_difficulty, - // vec![0; BLOCK_HASH_LENGTH], - // ); - // } - db.write(txn)?; Ok(()) } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 291714c4de..2e5d545379 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -1041,19 +1041,18 @@ impl LMDBDatabase { })? }; + let mut total_kernel_sum = Commitment::default(); let BlockAccumulatedData { kernels: pruned_kernel_set, outputs: pruned_output_set, witness: pruned_proof_set, - cumulative_kernel_sum: mut kernel_sum, - cumulative_utxo_sum: mut utxo_sum, .. } = data; let mut kernel_mmr = MerkleMountainRange::::new(pruned_kernel_set); for kernel in kernels { - kernel_sum = &kernel_sum + &kernel.excess; + total_kernel_sum = &total_kernel_sum + &kernel.excess; let pos = kernel_mmr.push(kernel.hash())?; trace!( target: LOG_TARGET, @@ -1066,7 +1065,6 @@ impl LMDBDatabase { let mut output_mmr = MutableMmr::::new(pruned_output_set, Bitmap::create())?; let mut witness_mmr = MerkleMountainRange::::new(pruned_proof_set); for output in outputs { - utxo_sum = &utxo_sum + &output.commitment; output_mmr.push(output.hash())?; witness_mmr.push(output.witness_hash())?; debug!(target: LOG_TARGET, "Inserting output `{}`", output.commitment.to_hex()); @@ -1080,7 +1078,6 @@ impl LMDBDatabase { } for input in inputs { - utxo_sum = &utxo_sum - &input.commitment; let index = self .fetch_mmr_leaf_index(&**txn, MmrTree::Utxo, &input.output_hash())? .ok_or(ChainStorageError::UnspendableInput)?; @@ -1115,8 +1112,7 @@ impl LMDBDatabase { output_mmr.mmr().get_pruned_hash_set()?, witness_mmr.get_pruned_hash_set()?, deleted_at_current_height, - kernel_sum, - utxo_sum, + total_kernel_sum, ), )?; @@ -1159,11 +1155,8 @@ impl LMDBDatabase { if let Some(deleted_diff) = values.deleted_diff { block_accum_data.deleted = deleted_diff; } - if let Some(utxo_sum) = values.utxo_sum { - block_accum_data.cumulative_utxo_sum = utxo_sum; - } if let Some(kernel_sum) = values.kernel_sum { - block_accum_data.cumulative_kernel_sum = kernel_sum; + block_accum_data.kernel_sum = kernel_sum; } if let Some(kernel_hash_set) = values.kernel_hash_set { block_accum_data.kernels = kernel_hash_set; @@ -1639,7 +1632,7 @@ impl BlockchainBackend for LMDBDatabase { fn fetch_utxos_in_block( &self, header_hash: &HashOutput, - deleted: &Bitmap, + deleted: Option<&Bitmap>, ) -> Result<(Vec, Bitmap), ChainStorageError> { let txn = self.read_transaction()?; @@ -1650,7 +1643,7 @@ impl BlockchainBackend for LMDBDatabase { )? .into_iter() .map(|row| { - if deleted.contains(row.mmr_position) { + if deleted.map(|b| b.contains(row.mmr_position)).unwrap_or(false) { return PrunedOutput::Pruned { output_hash: row.hash, witness_hash: row.witness_hash, diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index 3673f7e950..f18b55ee65 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -271,7 +271,7 @@ impl BlockchainBackend for TempDatabase { fn fetch_utxos_in_block( &self, header_hash: &HashOutput, - deleted: &Bitmap, + deleted: Option<&Bitmap>, ) -> Result<(Vec, Bitmap), ChainStorageError> { self.db.as_ref().unwrap().fetch_utxos_in_block(header_hash, deleted) } diff --git a/comms/Cargo.toml b/comms/Cargo.toml index 861ab55179..76b6ce3fa0 100644 --- a/comms/Cargo.toml +++ b/comms/Cargo.toml @@ -69,4 +69,4 @@ tari_common = { version = "^0.21", path = "../common", features = ["build"] } c_integration = [] avx2 = ["tari_crypto/avx2"] metrics = [] -rpc = ["tower-make"] +rpc = ["tower-make", "tower/util"] From bde5ffdbfa586df4d1ec483aa43604aee69a6dae Mon Sep 17 00:00:00 2001 From: Stanimal Date: Fri, 26 Nov 2021 12:46:16 +0400 Subject: [PATCH 10/11] handle edge case --- .../horizon_state_synchronization.rs | 5 +++-- .../src/base_node/sync/rpc/sync_utxos_task.rs | 6 +++++- base_layer/core/src/chain_storage/async_db.rs | 9 +++++++-- .../src/chain_storage/blockchain_database.rs | 5 ++++- .../core/src/chain_storage/db_transaction.rs | 20 ++++++++++++------- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 10 ++++------ .../transactions/transaction_entities/mod.rs | 2 +- .../chain_storage_tests/chain_storage.rs | 2 +- .../features/Propagation.feature | 2 +- 9 files changed, 39 insertions(+), 22 deletions(-) diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs index 287d562c91..61f477643f 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs @@ -149,7 +149,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let new_prune_height = cmp::min(local_metadata.height_of_longest_chain(), self.horizon_sync_height); if local_metadata.pruned_height() < new_prune_height { debug!(target: LOG_TARGET, "Pruning block chain to height {}", new_prune_height); - db.prune_to_height(new_prune_height + 1).await?; + db.prune_to_height(new_prune_height).await?; } self.full_bitmap = Some(db.fetch_deleted_bitmap_at_tip().await?.into_bitmap()); @@ -637,7 +637,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { header.accumulated_data().total_accumulated_difficulty, metadata.best_block().clone(), ) - .set_pruned_height(header.height(), calc_kernel_sum, calc_utxo_sum) + .set_pruned_height(header.height()) + .set_horizon_data(calc_kernel_sum, calc_utxo_sum) .commit() .await?; diff --git a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs index aa27f064f0..48da40edc3 100644 --- a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs +++ b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs @@ -145,7 +145,7 @@ where B: BlockchainBackend + 'static include_pruned_utxos, include_deleted_bitmaps ); - while current_header.height <= end_header.height { + loop { let timer = Instant::now(); let current_header_hash = current_header.hash(); @@ -223,6 +223,10 @@ where B: BlockchainBackend + 'static ); prev_utxo_mmr_size = current_header.output_mmr_size; + if current_header.height + 1 > end_header.height { + break; + } + current_header = self .db .fetch_header(current_header.height + 1) diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index 4f4de6678b..116e87e8dc 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -302,8 +302,13 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { self } - pub fn set_pruned_height(&mut self, height: u64, kernel_sum: Commitment, utxo_sum: Commitment) -> &mut Self { - self.transaction.set_pruned_height(height, kernel_sum, utxo_sum); + pub fn set_pruned_height(&mut self, height: u64) -> &mut Self { + self.transaction.set_pruned_height(height); + self + } + + pub fn set_horizon_data(&mut self, kernel_sum: Commitment, utxo_sum: Commitment) -> &mut Self { + self.transaction.set_horizon_data(kernel_sum, utxo_sum); self } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 07c0212feb..07cd9c20b3 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -222,7 +222,8 @@ where B: BlockchainBackend kernel_sum: Some(kernel_sum.clone()), ..Default::default() }); - txn.set_pruned_height(0, kernel_sum, utxo_sum); + txn.set_pruned_height(0); + txn.set_horizon_data(kernel_sum, utxo_sum); blockchain_db.write(txn)?; blockchain_db.store_pruning_horizon(config.pruning_horizon)?; } @@ -2137,6 +2138,8 @@ fn prune_to_height(db: &mut T, target_horizon_height: u64) txn.delete_all_inputs_in_block(header.hash().clone()); } + txn.set_pruned_height(target_horizon_height); + db.write(txn)?; Ok(()) } diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index c86dc21d77..bfdd480033 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -34,6 +34,7 @@ use std::{ sync::Arc, }; +use crate::chain_storage::HorizonData; use croaring::Bitmap; use tari_common_types::types::{BlockHash, Commitment, HashOutput}; use tari_crypto::tari_utilities::{ @@ -241,11 +242,14 @@ impl DbTransaction { self } - pub fn set_pruned_height(&mut self, height: u64, kernel_sum: Commitment, utxo_sum: Commitment) -> &mut Self { - self.operations.push(WriteOperation::SetPrunedHeight { - height, - kernel_sum, - utxo_sum, + pub fn set_pruned_height(&mut self, height: u64) -> &mut Self { + self.operations.push(WriteOperation::SetPrunedHeight { height }); + self + } + + pub fn set_horizon_data(&mut self, kernel_sum: Commitment, utxo_sum: Commitment) -> &mut Self { + self.operations.push(WriteOperation::SetHorizonData { + horizon_data: HorizonData::new(kernel_sum, utxo_sum), }); self } @@ -320,8 +324,9 @@ pub enum WriteOperation { SetPruningHorizonConfig(u64), SetPrunedHeight { height: u64, - kernel_sum: Commitment, - utxo_sum: Commitment, + }, + SetHorizonData { + horizon_data: HorizonData, }, } @@ -409,6 +414,7 @@ impl fmt::Display for WriteOperation { SetPrunedHeight { height, .. } => write!(f, "Set pruned height to {}", height), DeleteHeader(height) => write!(f, "Delete header at height: {}", height), DeleteOrphan(hash) => write!(f, "Delete orphan with hash: {}", hash.to_hex()), + SetHorizonData { .. } => write!(f, "Set horizon data"), } } } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 2e5d545379..7fd3b421cd 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -383,20 +383,18 @@ impl LMDBDatabase { MetadataValue::PruningHorizon(*pruning_horizon), )?; }, - SetPrunedHeight { - height, - kernel_sum, - utxo_sum, - } => { + SetPrunedHeight { height } => { self.set_metadata( &write_txn, MetadataKey::PrunedHeight, MetadataValue::PrunedHeight(*height), )?; + }, + SetHorizonData { horizon_data } => { self.set_metadata( &write_txn, MetadataKey::HorizonData, - MetadataValue::HorizonData(HorizonData::new(kernel_sum.clone(), utxo_sum.clone())), + MetadataValue::HorizonData(horizon_data.clone()), )?; }, } diff --git a/base_layer/core/src/transactions/transaction_entities/mod.rs b/base_layer/core/src/transactions/transaction_entities/mod.rs index d5f2201eaa..396f9cb1c7 100644 --- a/base_layer/core/src/transactions/transaction_entities/mod.rs +++ b/base_layer/core/src/transactions/transaction_entities/mod.rs @@ -196,7 +196,7 @@ mod test { ) .unwrap(), ); - assert!(tx_output3.verify_range_proof(&factories.range_proof).is_ok()); + tx_output3.verify_range_proof(&factories.range_proof).unwrap_err(); } #[test] diff --git a/base_layer/core/tests/chain_storage_tests/chain_storage.rs b/base_layer/core/tests/chain_storage_tests/chain_storage.rs index ef6a257488..b58bba6766 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_storage.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_storage.rs @@ -1718,7 +1718,7 @@ fn pruned_mode_cleanup_and_fetch_block() { let _block5 = append_block(&store, &block4, vec![], &consensus_manager, 1.into()).unwrap(); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.pruned_height(), 1); + assert_eq!(metadata.pruned_height(), 2); assert_eq!(metadata.height_of_longest_chain(), 5); assert_eq!(metadata.pruning_horizon(), 3); } diff --git a/integration_tests/features/Propagation.feature b/integration_tests/features/Propagation.feature index 3c1f5ef8fb..54f7cfaab5 100644 --- a/integration_tests/features/Propagation.feature +++ b/integration_tests/features/Propagation.feature @@ -101,4 +101,4 @@ Feature: Block Propagation Then TX1 is in the MINED of all nodes When I mine 17 blocks on SENDER Then all nodes are on the same chain at height 21 - Then node PNODE1 has a pruned height of 15 + Then node PNODE1 has a pruned height of 16 From ce99674f0b8d8e7e1c91459f1b5fdb923f123b8d Mon Sep 17 00:00:00 2001 From: Stanimal Date: Fri, 26 Nov 2021 16:43:40 +0400 Subject: [PATCH 11/11] minor debug code cleanup --- .../state_machine_service/state_machine.rs | 17 +++-------------- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 2 -- 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/base_layer/core/src/base_node/state_machine_service/state_machine.rs b/base_layer/core/src/base_node/state_machine_service/state_machine.rs index 35a176a4ca..fb364fc797 100644 --- a/base_layer/core/src/base_node/state_machine_service/state_machine.rs +++ b/base_layer/core/src/base_node/state_machine_service/state_machine.rs @@ -36,13 +36,10 @@ use crate::{ use futures::{future, future::Either}; use log::*; use randomx_rs::RandomXFlag; -use std::{future::Future, sync::Arc, time::Duration}; +use std::{future::Future, sync::Arc}; use tari_comms::{connectivity::ConnectivityRequester, PeerManager}; use tari_shutdown::ShutdownSignal; -use tokio::{ - sync::{broadcast, watch}, - time, -}; +use tokio::sync::{broadcast, watch}; const LOG_TARGET: &str = "c::bn::base_node"; @@ -223,7 +220,7 @@ impl BaseNodeStateMachine { // Get the next `StateEvent`, returning a `UserQuit` state event if the interrupt signal is triggered let mut mdc = vec![]; log_mdc::iter(|k, v| mdc.push((k.to_owned(), v.to_owned()))); - let next_event = select_next_state_event(delayed(interrupt_signal), next_state_future).await; + let next_event = select_next_state_event(interrupt_signal, next_state_future).await; log_mdc::extend(mdc); // Publish the event on the event bus let _ = self.event_publisher.send(Arc::new(next_event.clone())); @@ -275,11 +272,3 @@ where Either::Right((state, _)) => state, } } - -async fn delayed(fut: F) -> R -where F: Future { - let ret = fut.await; - error!(target: LOG_TARGET, "SLEEEPIN",); - time::sleep(Duration::from_secs(100)).await; - ret -} diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 7fd3b421cd..6a8908ad75 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -838,7 +838,6 @@ impl LMDBDatabase { bitmap.remove(block_accum_data.deleted())?; bitmap.finish()?; - info!(target: LOG_TARGET, "delete accum {}", height); lmdb_delete( write_txn, &self.block_accumulated_data_db, @@ -1124,7 +1123,6 @@ impl LMDBDatabase { header_height: u64, data: &BlockAccumulatedData, ) -> Result<(), ChainStorageError> { - info!(target: LOG_TARGET, "insert accum {}", data); lmdb_insert( txn, &self.block_accumulated_data_db,