From fd1692cfa72e3ac50e66e9cdc3c97c705880ca1d Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Tue, 1 Feb 2022 13:41:49 +0200 Subject: [PATCH 01/20] chore: remove debug info from get-mempool-state (#3782) Description --- Cleans up some debug info in `get-mempool-state` command Motivation and Context --- Debug info made `get-mempool-state` basically unreadable when txns had many inputs How Has This Been Tested? --- Manually --- base_layer/core/src/mempool/mod.rs | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/base_layer/core/src/mempool/mod.rs b/base_layer/core/src/mempool/mod.rs index 286cf119d0..9b00310186 100644 --- a/base_layer/core/src/mempool/mod.rs +++ b/base_layer/core/src/mempool/mod.rs @@ -110,7 +110,7 @@ impl Display for StateResponse { for tx in &self.unconfirmed_pool { writeln!( fmt, - " {} Fee: {}, Outputs: {}, Kernels: {}, Inputs: {}, metadata: {} bytes, {}", + " {} Fee: {}, Outputs: {}, Kernels: {}, Inputs: {}, metadata: {} bytes", tx.first_kernel_excess_sig() .map(|sig| sig.get_signature().to_hex()) .unwrap_or_else(|| "N/A".to_string()), @@ -119,22 +119,6 @@ impl Display for StateResponse { tx.body.kernels().len(), tx.body.inputs().len(), tx.body.sum_metadata_size(), - tx.body - .inputs() - .iter() - .map(|i| { - let sopk = i - .sender_offset_public_key() - .map(|s| s.to_hex()) - .unwrap_or_else(|_| "".to_string()); - let commit = i - .commitment() - .map(|s| s.to_hex()) - .unwrap_or_else(|_| "".to_string()); - format!("c: {}, sopk: {}", commit, sopk) - }) - .collect::>() - .join(", "), )?; } writeln!(fmt, "--- Reorg Pool ---")?; From 96a1e4ec144dc17190f396f94ec25c62fb142ce3 Mon Sep 17 00:00:00 2001 From: Martin Stefcek <35243812+Cifko@users.noreply.github.com> Date: Tue, 1 Feb 2022 16:18:47 +0100 Subject: [PATCH 02/20] feat: add logging and config to collectibles (#3781) Description --- Added config and logging to tari collectibles. Motivation and Context --- Unite the behavior (configs, logging) of collectibles with other apps. How Has This Been Tested? --- Manually. --- Cargo.lock | 4 + .../tari_collectibles/src-tauri/Cargo.toml | 4 + .../src-tauri/src/app_state.rs | 31 ++-- .../src-tauri/src/clients/base_node_client.rs | 11 +- .../src/clients/validator_node_client.rs | 15 +- .../src-tauri/src/clients/wallet_client.rs | 9 +- .../src/commands/asset_wallets/mod.rs | 18 +- .../src-tauri/src/commands/assets/mod.rs | 14 +- .../src-tauri/src/commands/tip004/mod.rs | 7 +- .../src-tauri/src/commands/tip721/mod.rs | 5 +- .../tari_collectibles/src-tauri/src/main.rs | 8 +- .../src-tauri/src/settings.rs | 58 ------- common/config/presets/collectibles.toml | 15 ++ common/config/presets/validator_node.toml | 3 +- common/logging/log4rs_collectibles.yml | 154 ++++++++++++++++++ common/src/configuration/bootstrap.rs | 10 ++ .../src/configuration/collectibles_config.rs | 63 +++++++ common/src/configuration/global.rs | 5 +- common/src/configuration/mod.rs | 2 + common/src/configuration/utils.rs | 1 + common/src/lib.rs | 1 + common/src/logging.rs | 1 + config/config.toml | 150 ----------------- package-lock.json | 7 +- package.json | 1 + 25 files changed, 347 insertions(+), 250 deletions(-) delete mode 100644 applications/tari_collectibles/src-tauri/src/settings.rs create mode 100644 common/config/presets/collectibles.toml create mode 100644 common/logging/log4rs_collectibles.yml create mode 100644 common/src/configuration/collectibles_config.rs delete mode 100644 config/config.toml create mode 100644 package.json diff --git a/Cargo.lock b/Cargo.lock index 6f4f884bfc..8980b3efc3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6472,6 +6472,7 @@ dependencies = [ "diesel", "diesel_migrations", "futures 0.3.19", + "log", "prost", "prost-types", "rand 0.8.4", @@ -6479,6 +6480,8 @@ dependencies = [ "serde_json", "structopt", "tari_app_grpc", + "tari_app_utilities", + "tari_common", "tari_common_types", "tari_crypto", "tari_dan_common_types", @@ -6488,6 +6491,7 @@ dependencies = [ "tauri", "tauri-build", "thiserror", + "tokio 1.15.0", "tonic", "uuid", ] diff --git a/applications/tari_collectibles/src-tauri/Cargo.toml b/applications/tari_collectibles/src-tauri/Cargo.toml index 6f1d659d3e..383ddf166f 100644 --- a/applications/tari_collectibles/src-tauri/Cargo.toml +++ b/applications/tari_collectibles/src-tauri/Cargo.toml @@ -17,12 +17,15 @@ tauri-build = { version = "1.0.0-beta.4" } [dependencies] tari_app_grpc = { path = "../../tari_app_grpc" } +tari_app_utilities = { path = "../../tari_app_utilities" } +tari_common = { path = "../../../common" } tari_common_types = { path = "../../../base_layer/common_types" } tari_crypto = { git = "https://github.com/tari-project/tari-crypto.git", branch = "main" } tari_key_manager = { path = "../../../base_layer/key_manager" } tari_mmr = { path = "../../../base_layer/mmr"} tari_utilities = "*" tari_dan_common_types = { path = "../../../dan_layer/common_types"} +log = { version = "0.4.8", features = ["std"] } blake2 = "^0.9.0" futures = "0.3.17" @@ -38,6 +41,7 @@ uuid = { version = "0.8.2", features = ["serde"] } prost = "0.9" prost-types = "0.9" structopt = "0.3.25" +tokio = { version = "1.11", features = ["signal"] } [features] default = [ "custom-protocol" ] diff --git a/applications/tari_collectibles/src-tauri/src/app_state.rs b/applications/tari_collectibles/src-tauri/src/app_state.rs index f2f81edef4..8f2fa32163 100644 --- a/applications/tari_collectibles/src-tauri/src/app_state.rs +++ b/applications/tari_collectibles/src-tauri/src/app_state.rs @@ -24,19 +24,19 @@ use crate::{ clients::{BaseNodeClient, GrpcValidatorNodeClient, WalletClient}, error::CollectiblesError, providers::ConcreteKeyManagerProvider, - settings::Settings, storage::{ sqlite::{SqliteCollectiblesStorage, SqliteDbFactory}, StorageError, }, }; -use std::sync::Arc; +use std::{path::PathBuf, sync::Arc}; +use tari_common::configuration::CollectiblesConfig; use tauri::async_runtime::RwLock; use uuid::Uuid; pub struct AppState { - config: Settings, + config: CollectiblesConfig, db_factory: SqliteDbFactory, current_wallet_id: Option, } @@ -47,27 +47,38 @@ pub struct ConcurrentAppState { } impl ConcurrentAppState { - pub fn new() -> Self { - let settings = Settings::new(); - let db_factory = SqliteDbFactory::new(settings.data_dir.as_path()); + pub fn new(base_path: PathBuf, config: CollectiblesConfig) -> Self { + let db_factory = SqliteDbFactory::new(base_path.as_path()); Self { inner: Arc::new(RwLock::new(AppState { + config, db_factory, - config: settings, current_wallet_id: None, })), } } pub async fn create_wallet_client(&self) -> WalletClient { - WalletClient::new(self.inner.read().await.config.wallet_grpc_address.clone()) + WalletClient::new( + self + .inner + .read() + .await + .config + .wallet_grpc_address + .clone() + .to_string(), + ) } pub async fn connect_base_node_client(&self) -> Result { let lock = self.inner.read().await; - let client = - BaseNodeClient::connect(format!("http://{}", lock.config.base_node_grpc_address)).await?; + let client = BaseNodeClient::connect(format!( + "http://{}", + lock.config.base_node_grpc_address.to_string() + )) + .await?; Ok(client) } diff --git a/applications/tari_collectibles/src-tauri/src/clients/base_node_client.rs b/applications/tari_collectibles/src-tauri/src/clients/base_node_client.rs index 19147a1748..f620a27fef 100644 --- a/applications/tari_collectibles/src-tauri/src/clients/base_node_client.rs +++ b/applications/tari_collectibles/src-tauri/src/clients/base_node_client.rs @@ -22,10 +22,13 @@ use crate::error::CollectiblesError; use futures::StreamExt; +use log::debug; use tari_app_grpc::tari_rpc as grpc; use tari_common_types::types::PublicKey; use tari_utilities::ByteArray; +const LOG_TARGET: &str = "collectibles::base"; + pub struct BaseNodeClient { client: grpc::base_node_client::BaseNodeClient, } @@ -79,7 +82,7 @@ impl BaseNodeClient { let request = grpc::GetAssetMetadataRequest { asset_public_key: Vec::from(asset_public_key.as_bytes()), }; - dbg!(&request); + debug!(target: LOG_TARGET, "request {:?}", request); let response = client .get_asset_metadata(request) .await @@ -88,7 +91,7 @@ impl BaseNodeClient { request: "get_asset_metadata".to_string(), source: s, })?; - dbg!(&response); + debug!(target: LOG_TARGET, "response {:?}", response); Ok(response) } @@ -103,7 +106,7 @@ impl BaseNodeClient { unique_ids: vec![vec![3u8; 32]], }; - dbg!(&request); + debug!(target: LOG_TARGET, "request {:?}", request); let mut stream = client .get_tokens(request) .await @@ -117,7 +120,7 @@ impl BaseNodeClient { if i > 10 { break; } - dbg!(&response); + debug!(target: LOG_TARGET, "response {:?}", response); let features = response .map_err(|status| format!("Got an error status from GRPC:{}", status))? .features; diff --git a/applications/tari_collectibles/src-tauri/src/clients/validator_node_client.rs b/applications/tari_collectibles/src-tauri/src/clients/validator_node_client.rs index c55deefc2f..7f91653cc4 100644 --- a/applications/tari_collectibles/src-tauri/src/clients/validator_node_client.rs +++ b/applications/tari_collectibles/src-tauri/src/clients/validator_node_client.rs @@ -20,10 +20,13 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::error::CollectiblesError; +use log::{debug, error}; use tari_app_grpc::tari_rpc as grpc; use tari_common_types::types::PublicKey; use tari_utilities::ByteArray; +const LOG_TARGET: &str = "collectibles::validator_node"; + pub trait ValidatorNodeClient {} pub struct GrpcValidatorNodeClient { @@ -57,21 +60,21 @@ impl GrpcValidatorNodeClient { method, args, }; - dbg!(&req); + debug!(target: LOG_TARGET, "req {:?}", req); let response = self .client .invoke_read_method(req) .await .map(|resp| resp.into_inner()) .map_err(|e| { - dbg!(&e); + error!(target: LOG_TARGET, "{}", e); CollectiblesError::ClientRequestError { source: e, request: "invoke_read_method".to_string(), } })?; - dbg!(&response); + debug!(target: LOG_TARGET, "response {:?}", response); Ok(response.result) } @@ -88,21 +91,21 @@ impl GrpcValidatorNodeClient { method, args, }; - dbg!(&req); + debug!(target: LOG_TARGET, "req {:?}", req); let response = self .client .invoke_method(req) .await .map(|resp| resp.into_inner()) .map_err(|e| { - dbg!(&e); + error!(target: LOG_TARGET, "{}", e); CollectiblesError::ClientRequestError { source: e, request: "invoke_method".to_string(), } })?; - dbg!(&response); + debug!(target: LOG_TARGET, "response {:?}", response); Ok(response.result) } } diff --git a/applications/tari_collectibles/src-tauri/src/clients/wallet_client.rs b/applications/tari_collectibles/src-tauri/src/clients/wallet_client.rs index ca4ffc16cf..4b416c67ed 100644 --- a/applications/tari_collectibles/src-tauri/src/clients/wallet_client.rs +++ b/applications/tari_collectibles/src-tauri/src/clients/wallet_client.rs @@ -21,10 +21,13 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::error::CollectiblesError; +use log::debug; use tari_app_grpc::{tari_rpc as grpc, tari_rpc::RegisterAssetRequest}; use tari_common_types::types::PublicKey; use tari_utilities::{hex::Hex, ByteArray}; +const LOG_TARGET: &str = "collectibles::wallet"; + pub struct WalletClient { endpoint: String, inner: Option>, @@ -76,7 +79,7 @@ impl WalletClient { source: error, } })?; - dbg!(&result); + debug!(target: LOG_TARGET, "result {:?}", result); Ok(result.into_inner().public_key.to_hex()) } @@ -91,7 +94,7 @@ impl WalletClient { source, } })?; - dbg!(&result); + debug!(target: LOG_TARGET, "result {:?}", result); Ok(result.into_inner()) } @@ -117,7 +120,7 @@ impl WalletClient { request: "create_initial_asset_checkpoint".to_string(), source, })?; - dbg!(&result); + debug!(target: LOG_TARGET, "result {:?}", result); Ok(result.into_inner()) } } diff --git a/applications/tari_collectibles/src-tauri/src/commands/asset_wallets/mod.rs b/applications/tari_collectibles/src-tauri/src/commands/asset_wallets/mod.rs index 72afc38481..a9537bb58a 100644 --- a/applications/tari_collectibles/src-tauri/src/commands/asset_wallets/mod.rs +++ b/applications/tari_collectibles/src-tauri/src/commands/asset_wallets/mod.rs @@ -30,6 +30,7 @@ use crate::{ StorageTransaction, }, }; +use log::{debug, error}; use prost::Message; use tari_common_types::types::PublicKey; use tari_dan_common_types::proto::tips::tip002; @@ -37,6 +38,8 @@ use tari_utilities::{hex::Hex, ByteArray}; use tauri::Manager; use uuid::Uuid; +const LOG_TARGET: &str = "collectibles::asset_wallets"; + #[tauri::command] pub(crate) async fn asset_wallets_create( asset_public_key: String, @@ -84,7 +87,7 @@ pub(crate) async fn asset_wallets_create( } } Err(e) => { - dbg!(e); + error!(target: LOG_TARGET, "{}", e); None } }; @@ -121,7 +124,10 @@ pub(crate) async fn asset_wallets_get_balance( asset_public_key: String, state: tauri::State<'_, ConcurrentAppState>, ) -> Result { - dbg!(&asset_public_key); + debug!( + target: LOG_TARGET, + "asset_public_key {:?}", asset_public_key + ); let asset_public_key = PublicKey::from_hex(&asset_public_key)?; let wallet_id = state @@ -143,7 +149,7 @@ pub(crate) async fn asset_wallets_get_balance( let args = tip002::BalanceOfRequest { owner: Vec::from(owner.public_key.as_bytes()), }; - dbg!(&args); + debug!(target: LOG_TARGET, "args {:?}", args); let mut args_bytes = vec![]; args.encode(&mut args_bytes)?; // let req = grpc::InvokeReadMethodRequest{ @@ -162,7 +168,7 @@ pub(crate) async fn asset_wallets_get_balance( ) .await?; - dbg!(&resp); + debug!(target: LOG_TARGET, "resp {:?}", resp); let proto_resp: tip002::BalanceOfResponse = Message::decode(&*resp)?; total += proto_resp.balance; } @@ -219,7 +225,7 @@ pub(crate) async fn asset_wallets_create_address( public_key: address_public_key, key_manager_path, }; - dbg!(&address); + debug!(target: LOG_TARGET, "address {:?}", address); db.addresses().insert(&address, &transaction)?; transaction.commit()?; Ok(address) @@ -295,6 +301,6 @@ pub(crate) async fn asset_wallets_send_to( .invoke_method(asset_public_key, 2, "transfer".to_string(), args_bytes) .await?; - dbg!(&resp); + debug!(target: LOG_TARGET, "resp {:?}", resp); Ok(()) } diff --git a/applications/tari_collectibles/src-tauri/src/commands/assets/mod.rs b/applications/tari_collectibles/src-tauri/src/commands/assets/mod.rs index d40d94c056..0871aec587 100644 --- a/applications/tari_collectibles/src-tauri/src/commands/assets/mod.rs +++ b/applications/tari_collectibles/src-tauri/src/commands/assets/mod.rs @@ -35,6 +35,7 @@ use crate::{ }, }; +use log::debug; use tari_app_grpc::tari_rpc::{self}; use tari_common_types::types::{Commitment, PublicKey}; use tari_crypto::{hash::blake2::Blake256, ristretto::RistrettoPublicKey}; @@ -42,6 +43,8 @@ use tari_mmr::{MemBackendVec, MerkleMountainRange}; use tari_utilities::{hex::Hex, ByteArray}; use uuid::Uuid; +const LOG_TARGET: &str = "collectibles::assets"; + #[tauri::command] pub(crate) async fn assets_create( name: String, @@ -96,14 +99,17 @@ pub(crate) async fn assets_create( image: Some(image), committee: None, }; - dbg!(&asset_row); + debug!(target: LOG_TARGET, "asset_row {:?}", asset_row); db.assets().insert(&asset_row, &transaction)?; let asset_wallet_row = AssetWalletRow { id: Uuid::new_v4(), asset_id, wallet_id, }; - dbg!(&asset_wallet_row); + debug!( + target: LOG_TARGET, + "asset_wallet_row {:?}", asset_wallet_row + ); db.asset_wallets().insert(&asset_wallet_row, &transaction)?; let address = AddressRow { id: Uuid::new_v4(), @@ -112,7 +118,7 @@ pub(crate) async fn assets_create( public_key: asset_public_key, key_manager_path: key_manager_path.clone(), }; - dbg!(&address); + debug!(target: LOG_TARGET, "address {:?}", address); db.addresses().insert(&address, &transaction)?; if template_ids.contains(&2) { let row = Tip002AddressRow { @@ -259,7 +265,7 @@ pub(crate) async fn assets_get_registration( let asset_pub_key = PublicKey::from_hex(&asset_pub_key)?; let asset = client.get_asset_metadata(&asset_pub_key).await?; - dbg!(&asset); + debug!(target: LOG_TARGET, "asset {:?}", asset); let features = asset.features.unwrap(); let serializer = V1AssetMetadataSerializer {}; let metadata = serializer.deserialize(&features.metadata[1..]); diff --git a/applications/tari_collectibles/src-tauri/src/commands/tip004/mod.rs b/applications/tari_collectibles/src-tauri/src/commands/tip004/mod.rs index 3a8e603985..4f36ed68b9 100644 --- a/applications/tari_collectibles/src-tauri/src/commands/tip004/mod.rs +++ b/applications/tari_collectibles/src-tauri/src/commands/tip004/mod.rs @@ -29,12 +29,15 @@ use crate::{ Tip721TokensTableGateway, }, }; +use log::debug; use prost::Message; use tari_common_types::types::PublicKey; use tari_dan_common_types::proto::tips::tip004; use tari_utilities::{hex::Hex, ByteArray}; use uuid::Uuid; +const LOG_TARGET: &str = "collectibles::tip004"; + #[tauri::command] pub(crate) async fn tip004_mint_token( asset_public_key: String, @@ -62,7 +65,7 @@ pub(crate) async fn tip004_mint_token( let result = client .invoke_method(asset_public_key, 4, "mint".to_string(), bytes) .await?; - dbg!(&result); + debug!(target: LOG_TARGET, "result {:?}", result); Ok(()) } @@ -96,7 +99,7 @@ pub(crate) async fn tip004_list_tokens( args.encode_to_vec(), ) .await?; - dbg!(&result); + debug!(target: LOG_TARGET, "result {:?}", result); db.tip721_tokens().delete_all_for_address(address.id, &tx)?; if !result.is_empty() { let balance_of: tip004::BalanceOfResponse = Message::decode(&*result)?; diff --git a/applications/tari_collectibles/src-tauri/src/commands/tip721/mod.rs b/applications/tari_collectibles/src-tauri/src/commands/tip721/mod.rs index 856ae6a7b1..927fa900bf 100644 --- a/applications/tari_collectibles/src-tauri/src/commands/tip721/mod.rs +++ b/applications/tari_collectibles/src-tauri/src/commands/tip721/mod.rs @@ -25,12 +25,15 @@ use crate::{ status::Status, storage::{AddressesTableGateway, AssetsTableGateway, CollectiblesStorage}, }; +use log::debug; use prost::Message; use tari_common_types::types::PublicKey; use tari_dan_common_types::proto::tips::tip721; use tari_utilities::{hex::Hex, ByteArray}; use uuid::Uuid; +const LOG_TARGET: &str = "collectibles::tip721"; + #[tauri::command] pub(crate) async fn tip721_transfer_from( asset_public_key: String, @@ -72,6 +75,6 @@ pub(crate) async fn tip721_transfer_from( transfer_request, ) .await?; - dbg!(&res); + debug!(target: LOG_TARGET, "res {:?}", res); Ok(()) } diff --git a/applications/tari_collectibles/src-tauri/src/main.rs b/applications/tari_collectibles/src-tauri/src/main.rs index 974a325e0e..85c77fb3b4 100644 --- a/applications/tari_collectibles/src-tauri/src/main.rs +++ b/applications/tari_collectibles/src-tauri/src/main.rs @@ -4,6 +4,9 @@ windows_subsystem = "windows" )] +use tari_app_utilities::initialization::init_configuration; +use tari_common::configuration::bootstrap::ApplicationType; + use crate::app_state::ConcurrentAppState; #[macro_use] @@ -19,12 +22,13 @@ mod error; mod models; mod providers; mod schema; -mod settings; mod status; mod storage; fn main() { - let state = ConcurrentAppState::new(); + #[allow(unused_mut)] // config isn't mutated on windows + let (bootstrap, mut config, _) = init_configuration(ApplicationType::Collectibles).unwrap(); + let state = ConcurrentAppState::new(bootstrap.base_path, config.collectibles_config.unwrap()); tauri::Builder::default() .manage(state) diff --git a/applications/tari_collectibles/src-tauri/src/settings.rs b/applications/tari_collectibles/src-tauri/src/settings.rs deleted file mode 100644 index f5dab2a675..0000000000 --- a/applications/tari_collectibles/src-tauri/src/settings.rs +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2021. The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use std::path::PathBuf; - -use structopt::StructOpt; - -#[derive(Debug, StructOpt)] -pub struct Settings { - #[structopt(short, long, aliases = &["base_path", "base_dir", "base-dir"], env="DATA_DIR", default_value = "data")] - pub(crate) data_dir: PathBuf, - #[structopt( - short, - long, - env = "WALLET_GRPC_ADDRESS", - default_value = "localhost:18143" - )] - pub(crate) wallet_grpc_address: String, - #[structopt( - short, - long, - env = "BASE_NODE_GRPC_ADDRESS", - default_value = "localhost:18142" - )] - pub(crate) base_node_grpc_address: String, - #[structopt( - short, - long, - env = "VALIDATOR_NODE_GRPC_ADDRESS", - default_value = "localhost:18144" - )] - pub(crate) validator_node_grpc_address: String, -} - -impl Settings { - pub fn new() -> Self { - Self::from_args() - } -} diff --git a/common/config/presets/collectibles.toml b/common/config/presets/collectibles.toml new file mode 100644 index 0000000000..8d4910cc96 --- /dev/null +++ b/common/config/presets/collectibles.toml @@ -0,0 +1,15 @@ +######################################################################################################################## +# # +# Collectibles Configuration Options # +# # +######################################################################################################################## + +[collectibles] +# GRPC address of validator node +#validator_node_grpc_address = 127.0.0.1:18144 + +# GRPC address of base node +#base_node_grpc_address = 127.0.0.1:18142 + +# GRPC address of wallet +#wallet_grpc_address = 127.0.0.1:18143 diff --git a/common/config/presets/validator_node.toml b/common/config/presets/validator_node.toml index 68dc1b424e..f6af4517a9 100644 --- a/common/config/presets/validator_node.toml +++ b/common/config/presets/validator_node.toml @@ -1,10 +1,11 @@ -[validator_node] ######################################################################################################################## # # # Validator Node Configuration Options # # # ######################################################################################################################## +[validator_node] + committee = ["2ea0df3059caf4411624d6bf5b9c02238d607d2798c586b3e6c2a054da3f205a"] # cannot be of zero size phase_timeout = 30 template_id = "EditableMetadata" diff --git a/common/logging/log4rs_collectibles.yml b/common/logging/log4rs_collectibles.yml new file mode 100644 index 0000000000..eb71151aca --- /dev/null +++ b/common/logging/log4rs_collectibles.yml @@ -0,0 +1,154 @@ +# A sample log configuration file for running in release mode. By default, this configuration splits up log messages to +# three destinations: +# * Console: For log messages with level INFO and higher +# * log/collectibles/network.log: INFO-level logs related to the comms crate. This file will be quite busy since there +# are lots of P2P debug messages, and so this traffic is segregated from the application log messages +# * log/collectibles/base_layer.log: Non-comms related INFO-level messages and higher are logged into this file +# * log/collectibles/other.log: Third-party crates' messages will be logged here at an ERROR level +# +# See https://docs.rs/log4rs/0.8.3/log4rs/encode/pattern/index.html for deciphering the log pattern. The log format +# used in this sample configuration prints messages as: +# timestamp [target] LEVEL message +refresh_rate: 30 seconds +appenders: + # An appender named "stdout" that writes to stdout + stdout: + kind: console + + encoder: + pattern: "{d(%H:%M)} {h({l}):5} {m}{n}" + filters: + - kind: threshold + level: warn + + # An appender named "network" that writes to a file with a custom pattern encoder + network: + kind: rolling_file + path: "log/collectibles/network.log" + policy: + kind: compound + trigger: + kind: size + limit: 10mb + roller: + kind: fixed_window + base: 1 + count: 5 + pattern: "log/collectibles/network.{}.log" + encoder: + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] {l:5} {m}{n}" + + # An appender named "base_layer" that writes to a file with a custom pattern encoder + base_layer: + kind: rolling_file + path: "log/collectibles/base_layer.log" + policy: + kind: compound + trigger: + kind: size + limit: 10mb + roller: + kind: fixed_window + base: 1 + count: 5 + pattern: "log/collectibles/base_layer.{}.log" + encoder: + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] [{X(node-public-key)},{X(node-id)}] {l:5} {m}{n}" + + # An appender named "other" that writes to a file with a custom pattern encoder + other: + kind: rolling_file + path: "log/collectibles/other.log" + policy: + kind: compound + trigger: + kind: size + limit: 10mb + roller: + kind: fixed_window + base: 1 + count: 5 + pattern: "log/collectibles/other.{}.log" + encoder: + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] {l:5} {m}{n}" + +# Set the default logging level to "info" +root: + level: warn + appenders: + - stdout + +loggers: + # Route log events common to every application to all appenders + tari::application: + level: info + appenders: + - base_layer + - network + - other + additive: false + + # Route log events sent to the "core" logger to the "base_layer" appender + c: + level: info + appenders: + - base_layer + tari: + level: info + appenders: + - base_layer + + # Route log events sent to the "wallet" logger to the "base_layer" appender + wallet: + level: info + appenders: + - base_layer + # Route log events sent to the "comms" logger to the "network" appender + comms: + level: info + appenders: + - network + # Route log events sent to the "p2p" logger to the "network" appender + p2p: + level: info + appenders: + - network + + # Route log events sent to the "yamux" logger to the "network" appender + yamux: + level: info + appenders: + - network + # Route log events sent to the "mio" logger to the "network" appender + mio: + level: error + appenders: + - network + # Route log events sent to the "rustyline" logger to the "other" appender + rustyline: + level: error + appenders: + - other + additive: false + + # Route log events sent to the "tokio_util" logger to the "other" appender + tokio_util: + level: error + appenders: + - other + # Route PGP log events + pgp: + level: warn + appenders: + - other + # Route log events sent to the "tari_mm_proxy" logger to the "base_layer" appender + tari_mm_proxy: + level: info + appenders: + - base_layer + # Route R2D2 log events + r2d2: + level: warn + appenders: + - other + additive: false diff --git a/common/src/configuration/bootstrap.rs b/common/src/configuration/bootstrap.rs index 1c105db748..f1e1b7e3e2 100644 --- a/common/src/configuration/bootstrap.rs +++ b/common/src/configuration/bootstrap.rs @@ -67,6 +67,7 @@ use crate::{ initialize_logging, logging, DEFAULT_BASE_NODE_LOG_CONFIG, + DEFAULT_COLLECTIBLES_LOG_CONFIG, DEFAULT_CONFIG, DEFAULT_MERGE_MINING_PROXY_LOG_CONFIG, DEFAULT_MINING_NODE_LOG_CONFIG, @@ -275,6 +276,12 @@ impl ConfigBootstrap { Some(&self.base_path), )) }, + ApplicationType::Collectibles => { + self.log_config = normalize_path(dir_utils::default_path( + DEFAULT_COLLECTIBLES_LOG_CONFIG, + Some(&self.base_path), + )) + }, } } @@ -356,6 +363,7 @@ pub enum ApplicationType { MiningNode, StratumTranscoder, ValidatorNode, + Collectibles, } impl ApplicationType { @@ -368,6 +376,7 @@ impl ApplicationType { MiningNode => "Tari Mining Node", ValidatorNode => "Digital Assets Network Validator Node", StratumTranscoder => "Tari Stratum Transcoder", + Collectibles => "Tari Collectibles", } } @@ -380,6 +389,7 @@ impl ApplicationType { MiningNode => "miner", StratumTranscoder => "stratum-transcoder", ValidatorNode => "validator-node", + Collectibles => "collectibles", } } } diff --git a/common/src/configuration/collectibles_config.rs b/common/src/configuration/collectibles_config.rs new file mode 100644 index 0000000000..3851cc1a74 --- /dev/null +++ b/common/src/configuration/collectibles_config.rs @@ -0,0 +1,63 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use config::Config; +use serde::Deserialize; + +use crate::ConfigurationError; + +#[derive(Debug, Clone, Deserialize)] +pub struct CollectiblesConfig { + #[serde(default = "default_validator_node_grpc_address")] + pub validator_node_grpc_address: SocketAddr, + #[serde(default = "default_base_node_grpc_address")] + pub base_node_grpc_address: SocketAddr, + #[serde(default = "default_wallet_grpc_address")] + pub wallet_grpc_address: SocketAddr, +} + +fn default_validator_node_grpc_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 18144) +} + +fn default_base_node_grpc_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 18142) +} + +fn default_wallet_grpc_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 18143) +} + +impl CollectiblesConfig { + pub fn convert_if_present(cfg: Config) -> Result, ConfigurationError> { + let section: Self = match cfg.get("collectibles") { + Ok(s) => s, + Err(_e) => { + // dbg!(e); + return Ok(None); + }, + }; + Ok(Some(section)) + } +} diff --git a/common/src/configuration/global.rs b/common/src/configuration/global.rs index c2dbc1e2e7..545ceb83e6 100644 --- a/common/src/configuration/global.rs +++ b/common/src/configuration/global.rs @@ -43,6 +43,7 @@ use crate::{ bootstrap::ApplicationType, name_server::DnsNameServer, BaseNodeConfig, + CollectiblesConfig, MergeMiningConfig, Network, ValidatorNodeConfig, @@ -148,6 +149,7 @@ pub struct GlobalConfig { pub console_wallet_use_libtor: bool, pub merge_mining_config: Option, pub blockchain_track_reorgs: bool, + pub collectibles_config: Option, } impl GlobalConfig { @@ -870,7 +872,7 @@ fn convert_node_config( flood_ban_max_msg_count, mine_on_tip_only, validate_tip_timeout_sec, - validator_node: ValidatorNodeConfig::convert_if_present(cfg)?, + validator_node: ValidatorNodeConfig::convert_if_present(cfg.clone())?, mining_pool_address, mining_wallet_address, mining_worker_name, @@ -880,6 +882,7 @@ fn convert_node_config( console_wallet_use_libtor, merge_mining_config, blockchain_track_reorgs, + collectibles_config: CollectiblesConfig::convert_if_present(cfg)?, }) } diff --git a/common/src/configuration/mod.rs b/common/src/configuration/mod.rs index a8139c09e7..b9530707a1 100644 --- a/common/src/configuration/mod.rs +++ b/common/src/configuration/mod.rs @@ -41,6 +41,7 @@ pub mod loader; mod network; pub use network::Network; mod base_node_config; +mod collectibles_config; mod merge_mining_config; pub mod name_server; pub mod seconds; @@ -50,6 +51,7 @@ mod wallet_config; pub mod writer; pub use base_node_config::BaseNodeConfig; +pub use collectibles_config::CollectiblesConfig; pub use merge_mining_config::MergeMiningConfig; pub use validator_node_config::ValidatorNodeConfig; pub use wallet_config::WalletConfig; diff --git a/common/src/configuration/utils.rs b/common/src/configuration/utils.rs index 4d40de1cb2..c3ba451f18 100644 --- a/common/src/configuration/utils.rs +++ b/common/src/configuration/utils.rs @@ -49,6 +49,7 @@ pub fn config_installer(_app_type: ApplicationType, path: &Path) -> Result<(), s include_str!("../../config/presets/merge_mining_proxy.toml"), include_str!("../../config/presets/stratum_transcoder.toml"), include_str!("../../config/presets/validator_node.toml"), + include_str!("../../config/presets/collectibles.toml"), ] .join("\n"); diff --git a/common/src/lib.rs b/common/src/lib.rs index 03adac789d..10ed3efad0 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -102,5 +102,6 @@ pub const DEFAULT_WALLET_LOG_CONFIG: &str = "config/log4rs_console_wallet.yml"; pub const DEFAULT_MERGE_MINING_PROXY_LOG_CONFIG: &str = "config/log4rs_merge_mining_proxy.yml"; pub const DEFAULT_STRATUM_TRANSCODER_LOG_CONFIG: &str = "config/log4rs_miningcore_transcoder.yml"; pub const DEFAULT_MINING_NODE_LOG_CONFIG: &str = "config/log4rs_mining_node.yml"; +pub const DEFAULT_COLLECTIBLES_LOG_CONFIG: &str = "config/log4rs_collectibles.yml"; pub(crate) const LOG_TARGET: &str = "common::config"; diff --git a/common/src/logging.rs b/common/src/logging.rs index e08e20551a..e87cc95538 100644 --- a/common/src/logging.rs +++ b/common/src/logging.rs @@ -69,6 +69,7 @@ pub fn log_config_installer(application_type: ApplicationType, path: &Path) -> R MergeMiningProxy => include_str!("../logging/log4rs_sample_proxy.yml"), StratumTranscoder => include_str!("../logging/log4rs_sample_transcoder.yml"), ValidatorNode => include_str!("../logging/log4rs_sample_validator_node.yml"), + Collectibles => include_str!("../logging/log4rs_collectibles.yml"), }; if let Some(d) = path.parent() { diff --git a/config/config.toml b/config/config.toml deleted file mode 100644 index 7cf031df49..0000000000 --- a/config/config.toml +++ /dev/null @@ -1,150 +0,0 @@ -######################################################################################################################## -# # -# Common Configuration Options # -# # -######################################################################################################################## - -[common] -# Select the network to connect to. Valid options are: -# mainnet - the "real" Tari network (default) -# weatherwax - the Tari testnet -network = "dibbler" - -# Tari is a 100% peer-to-peer network, so there are no servers to hold messages for you while you're offline. -# Instead, we rely on our peers to hold messages for us while we're offline. This settings sets maximum size of the -# message cache that for holding our peers' messages, in MB. -#message_cache_size = 10 - -# When storing messages for peers, hold onto them for at most this long before discarding them. The default is 1440 -# minutes = or 24 hrs. -#message_cache_ttl = 1440 - -# If peer nodes spam you with messages, or are otherwise badly behaved, they will be added to your denylist and banned -# You can set a time limit to release that ban (in minutes), or otherwise ban them for life (-1). The default is to -# ban them for 10 days. -#denylist_ban_period = 1440 - -# The number of liveness sessions to allow. Liveness sessions can be established by liveness monitors over TCP by -# sending a 0x50 (P) as the first byte. Any messages sent must be followed by newline message no longer than -# 50 characters. That message will be echoed back. -#liveness_max_sessions = 0 -#liveness_allowlist_cidrs = ["127.0.0.1/32"] - -# The buffer size constants for the publish/subscribe connector channel, connecting comms messages to the domain layer: -# - Buffer size for the base node (min value = 30, default value = 1500). -#buffer_size_base_node = 1500 -# - Buffer size for the console wallet (min value = 300, default value = 50000). -#buffer_size_console_wallet = 50000 -# The rate limit constants for the publish/subscribe connector channel, i.e. maximum amount of inbound messages to -# accept - any rate attemting to exceed this limit will be throttled. -# - Rate limit for the base node (min value = 5, default value = 1000). -#buffer_rate_limit_base_node = 1000 -# - Rate limit for the console wallet (min value = 5, default value = 1000). -buffer_rate_limit_console_wallet = 1000 -# The message deduplication persistent cache size - messages with these hashes in the cache will only be processed once. -# The cache will also be trimmed down to size periodically (min value = 0, default value = 2500). -dedup_cache_capacity = 25000 - -# The timeout (s) for requesting blocks from a peer during blockchain sync (min value = 10 s, default value = 150 s). -#fetch_blocks_timeout = 150 - -# The timeout (s) for requesting UTXOs from a base node (min value = 10 s, default value = 600 s). -#fetch_utxos_timeout = 600 - -# The timeout (s) for requesting other base node services (min value = 10 s, default value = 180 s). -#service_request_timeout = 180 - -# The maximum simultaneous comms RPC sessions allowed (default value = 1000). Setting this to -1 will allow unlimited -# sessions. -rpc_max_simultaneous_sessions = 10000 - -[common.weatherwax] -# When first logging onto the Tari network, you need to find a few peers to bootstrap the process. In the absence of -# any servers, this is a little more challenging than usual. Our best strategy is just to try and connect to the peers -# you knew about last time you ran the software. But what about when you run the software for the first time? That's -# where this allowlist comes in. It's a list of known Tari nodes that are likely to be around for a long time and that -# new nodes can use to introduce themselves to the network. -# peer_seeds = ["public_key1::address1", "public_key2::address2",... ] -peer_seeds = [ - # weatherwax - "98bc76afc1c35ad4651bdc9ef57bbe0655a2ea3cd86c0e19b5fd5890546eb040::/onion3/33izgtjkrlxhxybj6luqowkpiy2wvte43osejnbqyieqtdfhovzghxad:18141", #jozi - "9a26e910288213d649b26f9a7a7ee51fe2b2a67ff7d42334523463bf4be94312::/onion3/56kq54ylttnbl5ikotqex3oqvtzlxdpn7zlx4v56rvzf4kq7eezlclid:18141", #london - "6afd5b3c7772ad7d4bb26e0c19668fe04f2d68f99de9e132bee50a6c1846946d::/onion3/may4ajbmcn4dlnzf6fanvqlklxzqiw6qwu6ywqwkjc3bb354rc2i5wid:18141", #ncal - "8e7beec9becdc44fe6015a00d97a77fa3dbafe65127dcc988df6326bd9fd040d::/onion3/3pise36l4imoopsbjic5rtw67adx7rms6w5pgjmccpdwiqx66j7oqcqd:18141", #nvir - "80bb590d943a46e63ae79af5dc2c7d35a3dcd7922c182b28f619dc4cfc366f44::/onion3/oaxwahri7r3h5qjlcdbveyjmg4jsttausik66bicmhixft73nmvecdad:18141", #oregon - "981cc8cd1e4fe2f99ea1bd3e0ab1e7821ca0bfab336a4967cfec053fee86254c::/onion3/7hxpnxrxycdfevirddau7ybofwedaamjrg2ijm57k2kevh5q46ixamid:18141", #seoul - "f2ce179fb733725961a5f7e1e45dacdd443dd43ba6237438d6abe344fb717058::/onion3/nvgdmjf4wucgatz7vemzvi2u4sw5o4gyzwuikagpepoj4w7mkii47zid:18141", #stockholm - "909c0160f4d8e815aba5c2bbccfcceb448877e7b38759fb160f3e9494484d515::/onion3/qw5uxv533sqdn2qoncfyqo35dgecy4rt4x27rexi2her6q6pcpxbm4qd:18141", #sydney -] - -# DNS seeds -# The DNS records in these hostnames should provide TXT records as per https://github.com/tari-project/tari/pull/2319 -# Enter a domain name for the TXT records: -dns_seeds =["seeds.weatherwax.tari.com"] -# The name server used to resolve DNS seeds format: {socket address}/{tls sni dns name} (Default: cloudflare) -# dns_seeds_name_server = "1.1.1.1:853/cloudfare-dns.com" -# Servers addresses, majority of them have to agree. -# autoupdate_dns_hosts = [#server1, #server2, ...] -# Set to true to only accept DNS records that pass DNSSEC validation (Default: true) -dns_seeds_use_dnssec = false - -# Auto Update -# -# This interval in seconds to check for software updates. Setting this to 0 disables checking. -# auto_update.check_interval = 300 -# Customize the hosts that are used to check for updates. These hosts must contain update information in DNS TXT records. -# "auto_update.dns_hosts" = ["updates.weatherwax.taripulse.com"] -# Customize the location of the update SHA hashes and maintainer-signed signature. -# "auto_update.hashes_url" = "https://
/hashes.txt" -# "auto_update.hashes_sig_url" = "https://
/hashes.txt.sig" - -[common.igor] -peer_seeds = [ - # igor - "8e7eb81e512f3d6347bf9b1ca9cd67d2c8e29f2836fc5bd608206505cc72af34::/onion3/l4wouomx42nezhzexjdzfh7pcou5l7df24ggmwgekuih7tkv2rsaokqd:18141", - "00b35047a341401bcd336b2a3d564280a72f6dc72ec4c739d30c502acce4e803::/onion3/ojhxd7z6ga7qrvjlr3px66u7eiwasmffnuklscbh5o7g6wrbysj45vid:18141", - "40a9d8573745072534bce7d0ecafe882b1c79570375a69841c08a98dee9ecb5f::/onion3/io37fylc2pupg4cte4siqlsmuszkeythgjsxs2i3prm6jyz2dtophaad:18141", - "126c7ee64f71aca36398b977dd31fbbe9f9dad615df96473fb655bef5709c540::/onion3/6ilmgndocop7ybgmcvivbdsetzr5ggj4hhsivievoa2dx2b43wqlrlid:18141", -] - -dns_seeds =["seeds.igor.tari.com"] -# dns_seeds_name_server = "1.1.1.1:853/cloudfare-dns.com" -dns_seeds_use_dnssec = false - -# auto_update.check_interval = 300 -# "auto_update.dns_hosts" = ["updates.igor.taripulse.com"] -# "auto_update.hashes_url" = "https://
/hashes.txt" -# "auto_update.hashes_sig_url" = "https://
/hashes.txt.sig" - -[common.dibbler] -dns_seeds =["seeds.dibbler.tari.com"] -peer_seeds = [ - "721e9da488302e69523bca1a9cdcbd2419dddda11698a1e8c6c7bd619659ff21::/onion3/qw4ymrzbanbcr3wwlesxbot72iayd7xdjcrtnacbwgk637vfon47hqad:18141", - # 333388d1cbe3e2bd17453d052f - "c2eca9cf32261a1343e21ed718e79f25bfc74386e9305350b06f62047f519347::/onion3/6yxqk2ybo43u73ukfhyc42qn25echn4zegjpod2ccxzr2jd5atipwzqd:18141", - # 555575715a49fc242d756e52ca - "42fcde82b44af1de95a505d858cb31a422c56c4ac4747fbf3da47d648d4fc346::/onion3/2l3e7ysmihc23zybapdrsbcfg6omtjtfkvwj65dstnfxkwtai2fawtyd:18141", - # 77771f53be07fab4be5f1e1ff7 - "50e6aa8f6c50f1b9d9b3d438dfd2a29cfe1f3e3a650bd9e6b1e10f96b6c38f4d::/onion3/7s6y3cz5bnewlj5ypm7sekhgvqjyrq4bpaj5dyvvo7vxydj7hsmyf5ad:18141", - # 9999016f1f3a6162dddf5a45aa - "36a9df45e1423b5315ffa7a91521924210c8e1d1537ad0968450f20f21e5200d::/onion3/v24qfheti2rztlwzgk6v4kdbes3ra7mo3i2fobacqkbfrk656e3uvnid:18141", - # bbbb8358387d81c388fadb4649 - "be128d570e8ec7b15c101ee1a56d6c56dd7d109199f0bd02f182b71142b8675f::/onion3/ha422qsy743ayblgolui5pg226u42wfcklhc5p7nbhiytlsp4ir2syqd:18141", - # eeeeb0a943ed143e613a135392 - "3e0321c0928ca559ab3c0a396272dfaea705efce88440611a38ff3898b097217::/onion3/sl5ledjoaisst6d4fh7kde746dwweuge4m4mf5nkzdhmy57uwgtb7qqd:18141" -] -[validator_node] -######################################################################################################################## -# # -# Validator Node Configuration Options # -# # -######################################################################################################################## - -# If you are not , you can simply leave everything in this section commented out. Base nodes -# help maintain the security of the Tari token and are the surest way to preserve your privacy and be 100% sure that -# no-one is cheating you out of your money.o - -committee = ["2ea0df3059caf4411624d6bf5b9c02238d607d2798c586b3e6c2a054da3f205a"] # cannot be of zero size -phase_timeout = 30 -template_id = "EditableMetadata" - diff --git a/package-lock.json b/package-lock.json index 7abb96a64d..d3287a6b0b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,4 +1,7 @@ { - "lockfileVersion": 1, - "version": "0.27.2" + "name": "tari", + "version": "0.27.2", + "lockfileVersion": 2, + "requires": true, + "packages": {} } diff --git a/package.json b/package.json new file mode 100644 index 0000000000..0967ef424b --- /dev/null +++ b/package.json @@ -0,0 +1 @@ +{} From c19db9257d2f98b2d1a456816f6ef50018bdcbfe Mon Sep 17 00:00:00 2001 From: Byron Hambly Date: Wed, 2 Feb 2022 08:51:42 +0200 Subject: [PATCH 03/20] feat: ability to compile on stable rust (#3759) Description --- - **keeps** the nightly tool chain, necessary for our cargo fmt settings and avx2, wasm features - but adds ability to compile with default features on stable and CI to check that Closes #3035 --- .github/workflows/ci.yml | 61 +++++++++++++++++-- .../src-tauri/src/app_state.rs | 7 +-- .../tari_collectibles/src-tauri/src/main.rs | 1 - base_layer/key_manager/Cargo.toml | 3 +- base_layer/key_manager/Makefile | 20 +++--- base_layer/key_manager/src/lib.rs | 1 + base_layer/key_manager/src/wasm.rs | 2 +- comms/dht/src/lib.rs | 1 - comms/dht/src/store_forward/local_state.rs | 8 +-- 9 files changed, 75 insertions(+), 29 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ef6183d199..56d5a2bd43 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -61,7 +61,7 @@ jobs: - name: Compile NPM run: | cd applications/launchpad/gui-vue - npm install + npm ci npm run build - name: toolchain uses: actions-rs/toolchain@v1 @@ -80,7 +80,7 @@ jobs: command: clippy args: --all-targets -- -D warnings build: - name: build + name: check nightly runs-on: ubuntu-18.04 steps: - name: checkout @@ -114,7 +114,7 @@ jobs: - name: Compile NPM run: | cd applications/launchpad/gui-vue - npm install + npm ci npm run build - name: toolchain uses: actions-rs/toolchain@v1 @@ -122,6 +122,57 @@ jobs: toolchain: ${{ env.toolchain }} components: clippy, rustfmt override: true + - name: cargo check + uses: actions-rs/cargo@v1 + with: + command: check + args: --release --all-targets + build-stable: + name: check stable + runs-on: ubuntu-18.04 + steps: + - name: checkout + uses: actions/checkout@v2 + - name: caching + uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-${{ runner.cpu-model }}-stable-build-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-${{ runner.cpu-model }}-stable-build- + ${{ runner.os }}-${{ runner.cpu-model }}-stable- + - name: ubuntu dependencies + run: | + sudo apt-get update && \ + sudo apt-get -y install \ + build-essential \ + libgtk-3-dev \ + libwebkit2gtk-4.0-dev \ + libsoup2.4-dev \ + curl \ + wget \ + libappindicator3-dev \ + patchelf \ + librsvg2-dev \ + libprotobuf-dev \ + protobuf-compiler + - name: Compile NPM + run: | + cd applications/launchpad/gui-vue + npm ci + npm run build + - name: toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + profile: minimal + override: true + - name: rustup show + run: | + rustup show - name: cargo build uses: actions-rs/cargo@v1 with: @@ -163,7 +214,7 @@ jobs: - name: Compile NPM run: | cd applications/launchpad/gui-vue - npm install + npm ci npm run build - name: toolchain uses: actions-rs/toolchain@v1 @@ -173,4 +224,4 @@ jobs: uses: actions-rs/cargo@v1 with: command: test - args: --release + args: --release --all-targets diff --git a/applications/tari_collectibles/src-tauri/src/app_state.rs b/applications/tari_collectibles/src-tauri/src/app_state.rs index 8f2fa32163..b13a8bf9d3 100644 --- a/applications/tari_collectibles/src-tauri/src/app_state.rs +++ b/applications/tari_collectibles/src-tauri/src/app_state.rs @@ -74,11 +74,8 @@ impl ConcurrentAppState { pub async fn connect_base_node_client(&self) -> Result { let lock = self.inner.read().await; - let client = BaseNodeClient::connect(format!( - "http://{}", - lock.config.base_node_grpc_address.to_string() - )) - .await?; + let client = + BaseNodeClient::connect(format!("http://{}", lock.config.base_node_grpc_address)).await?; Ok(client) } diff --git a/applications/tari_collectibles/src-tauri/src/main.rs b/applications/tari_collectibles/src-tauri/src/main.rs index 85c77fb3b4..dabc4efb8b 100644 --- a/applications/tari_collectibles/src-tauri/src/main.rs +++ b/applications/tari_collectibles/src-tauri/src/main.rs @@ -1,4 +1,3 @@ -#![feature(array_methods)] #![cfg_attr( all(not(debug_assertions), target_os = "windows"), windows_subsystem = "windows" diff --git a/base_layer/key_manager/Cargo.toml b/base_layer/key_manager/Cargo.toml index cb8d4b9a87..cd281d6ecf 100644 --- a/base_layer/key_manager/Cargo.toml +++ b/base_layer/key_manager/Cargo.toml @@ -32,7 +32,7 @@ serde_json = "1.0.39" thiserror = "1.0.26" strum_macros = "0.22" strum = { version = "0.22", features = ["derive"] } -wasm-bindgen = { version = "0.2", features = ["serde-serialize", "nightly"] } +wasm-bindgen = { version = "0.2", features = ["serde-serialize", "nightly"], optional = true } wasm-bindgen-test = "0.3.28" [dev-dependencies] @@ -41,3 +41,4 @@ sha2 = "0.9.8" [features] avx2 = ["tari_crypto/avx2"] js = ["getrandom/js", "js-sys"] +wasm = ["wasm-bindgen", "js"] diff --git a/base_layer/key_manager/Makefile b/base_layer/key_manager/Makefile index efa926462c..41689d327c 100644 --- a/base_layer/key_manager/Makefile +++ b/base_layer/key_manager/Makefile @@ -1,19 +1,17 @@ -.PHONY: test +toolchain=nightly-2021-11-20 +.phony: test test: - wasm-pack test --node --features js - -.PHONY: build + rustup run $(toolchain) wasm-pack test --node --features wasm +.phony: build build: - wasm-pack build --target bundler . -- --features js - -.PHONY: install + rustup run $(toolchain) wasm-pack build --target bundler . -- --features wasm +.phony: install install: - wasm-pack build --out-dir=../../applications/tari_web_extension/src/key_manager/ --target bundler . -- --features js - -.PHONY: web + rustup run $(toolchain) wasm-pack build --out-dir=../../applications/tari_web_extension/src/key_manager/ --target bundler . -- --features wasm +.phony: web web: - wasm-pack build --target web . -- --features js \ No newline at end of file + rustup run $(toolchain) wasm-pack build --target web . -- --features wasm \ No newline at end of file diff --git a/base_layer/key_manager/src/lib.rs b/base_layer/key_manager/src/lib.rs index e9985c70de..ef4b1825e0 100644 --- a/base_layer/key_manager/src/lib.rs +++ b/base_layer/key_manager/src/lib.rs @@ -13,4 +13,5 @@ pub mod mnemonic; pub mod mnemonic_wordlists; // https://github.com/rustwasm/wasm-bindgen/issues/2774 #[allow(clippy::unused_unit)] +#[cfg(feature = "wasm")] pub mod wasm; diff --git a/base_layer/key_manager/src/wasm.rs b/base_layer/key_manager/src/wasm.rs index aed0835ddb..9534ed1722 100644 --- a/base_layer/key_manager/src/wasm.rs +++ b/base_layer/key_manager/src/wasm.rs @@ -190,7 +190,7 @@ mod test { let next_key = response.key_manager.next_key().unwrap(); assert_eq!( next_key.k.to_hex(), - "5a14f1205cfeb10d53af46e82b70e8832a544206f524b404e7a346148532910a".to_string() + "5c06999ed20e18bbb76245826141f8ae8700a648d87ec4da5a2a7507ce4b5f0e".to_string() ) } diff --git a/comms/dht/src/lib.rs b/comms/dht/src/lib.rs index cb75344910..d2cd185154 100644 --- a/comms/dht/src/lib.rs +++ b/comms/dht/src/lib.rs @@ -1,4 +1,3 @@ -#![feature(map_entry_replace)] #![doc(html_root_url = "https://docs.rs/tower-filter/0.3.0-alpha.2")] #![cfg_attr(not(debug_assertions), deny(unused_variables))] #![cfg_attr(not(debug_assertions), deny(unused_imports))] diff --git a/comms/dht/src/store_forward/local_state.rs b/comms/dht/src/store_forward/local_state.rs index 50965dbdad..3dc064b80c 100644 --- a/comms/dht/src/store_forward/local_state.rs +++ b/comms/dht/src/store_forward/local_state.rs @@ -41,9 +41,9 @@ impl SafLocalState { pub fn register_inflight_request(&mut self, peer: NodeId) { match self.inflight_saf_requests.entry(peer) { - Entry::Occupied(entry) => { + Entry::Occupied(mut entry) => { let (count, _) = *entry.get(); - entry.replace_entry((count + 1, Instant::now())); + *entry.get_mut() = (count + 1, Instant::now()); }, Entry::Vacant(entry) => { entry.insert((1, Instant::now())); @@ -53,11 +53,11 @@ impl SafLocalState { pub fn mark_infight_response_received(&mut self, peer: NodeId) -> Option { match self.inflight_saf_requests.entry(peer) { - Entry::Occupied(entry) => { + Entry::Occupied(mut entry) => { let (count, ts) = *entry.get(); let reduced_count = count - 1; if reduced_count > 0 { - entry.replace_entry((reduced_count, ts)); + *entry.get_mut() = (reduced_count, ts); } else { entry.remove(); } From ffd502d61a709d41723e67c8ec6b2d5004a87edc Mon Sep 17 00:00:00 2001 From: Hansie Odendaal <39146854+hansieodendaal@users.noreply.github.com> Date: Wed, 2 Feb 2022 09:49:56 +0200 Subject: [PATCH 04/20] feat: re-use scanned range proofs (#3764) Description --- - Re-used scanned range proofs for one-sided payments and wallet recovery instead of re-calculating them every time - Consistent creation of rewindable outputs in the wallet database Motivation and Context --- - Scanning of one-sided payments and wallet recovery was inefficient due to wasteful re-calculating of the range proof before adding the output to the wallet database, which in any case would never correspond to the range proof on the blockchain. - A mixture of rewindable and non-rewindable outputs were created in certain transaction protocols; all outputs need to be rewindable. How Has This Been Tested? --- - Unit tests - Cucumber tests (`npm test -- --tags "@critical"`) --- .../core/src/transactions/coinbase_builder.rs | 2 +- .../core/src/transactions/transaction/test.rs | 2 +- .../transaction/unblinded_output.rs | 24 ++-- .../transaction_initializer.rs | 4 +- .../src/output_manager_service/handle.rs | 26 +++- .../recovery/standard_outputs_recoverer.rs | 23 +++- .../src/output_manager_service/service.rs | 125 +++++++++++++++--- .../output_manager_service/storage/models.rs | 5 +- .../wallet/src/transaction_service/service.rs | 7 +- integration_tests/package-lock.json | 100 ++++++-------- integration_tests/package.json | 2 +- 11 files changed, 211 insertions(+), 109 deletions(-) diff --git a/base_layer/core/src/transactions/coinbase_builder.rs b/base_layer/core/src/transactions/coinbase_builder.rs index e73b3e35f4..724b07ef88 100644 --- a/base_layer/core/src/transactions/coinbase_builder.rs +++ b/base_layer/core/src/transactions/coinbase_builder.rs @@ -225,7 +225,7 @@ impl CoinbaseBuilder { // TODO: Verify bullet proof? let output = if let Some(rewind_data) = self.rewind_data.as_ref() { unblinded_output - .as_rewindable_transaction_output(&self.factories, rewind_data) + .as_rewindable_transaction_output(&self.factories, rewind_data, None) .map_err(|e| CoinbaseBuildError::BuildError(e.to_string()))? } else { unblinded_output diff --git a/base_layer/core/src/transactions/transaction/test.rs b/base_layer/core/src/transactions/transaction/test.rs index 087d21a590..2c513bbab6 100644 --- a/base_layer/core/src/transactions/transaction/test.rs +++ b/base_layer/core/src/transactions/transaction/test.rs @@ -398,7 +398,7 @@ fn test_output_rewinding() { ..Default::default() }); let output = unblinded_output - .as_rewindable_transaction_output(&factories, &rewind_data) + .as_rewindable_transaction_output(&factories, &rewind_data, None) .unwrap(); assert!(matches!( diff --git a/base_layer/core/src/transactions/transaction/unblinded_output.rs b/base_layer/core/src/transactions/transaction/unblinded_output.rs index 16f4769cce..8887fb8b54 100644 --- a/base_layer/core/src/transactions/transaction/unblinded_output.rs +++ b/base_layer/core/src/transactions/transaction/unblinded_output.rs @@ -218,6 +218,7 @@ impl UnblindedOutput { &self, factories: &CryptoFactories, rewind_data: &RewindData, + range_proof: Option<&RangeProof>, ) -> Result { if factories.range_proof.range() < 64 && self.value >= MicroTari::from(1u64.shl(&factories.range_proof.range())) { @@ -227,16 +228,19 @@ impl UnblindedOutput { } let commitment = factories.commitment.commit(&self.spending_key, &self.value.into()); - let proof_bytes = factories.range_proof.construct_proof_with_rewind_key( - &self.spending_key, - self.value.into(), - &rewind_data.rewind_key, - &rewind_data.rewind_blinding_key, - &rewind_data.proof_message, - )?; - - let proof = RangeProof::from_bytes(&proof_bytes) - .map_err(|_| TransactionError::RangeProofError(RangeProofError::ProofConstructionError))?; + let proof = if let Some(proof) = range_proof { + proof.clone() + } else { + let proof_bytes = factories.range_proof.construct_proof_with_rewind_key( + &self.spending_key, + self.value.into(), + &rewind_data.rewind_key, + &rewind_data.rewind_blinding_key, + &rewind_data.proof_message, + )?; + RangeProof::from_bytes(&proof_bytes) + .map_err(|_| TransactionError::RangeProofError(RangeProofError::ProofConstructionError))? + }; let output = TransactionOutput::new_current_version( self.features.clone(), diff --git a/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs b/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs index 4b7f34bf3e..e01e580904 100644 --- a/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs +++ b/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs @@ -503,7 +503,7 @@ impl SenderTransactionInitializer { .iter() .map(|o| { if let Some(rewind_data) = self.rewind_data.as_ref() { - o.as_rewindable_transaction_output(factories, rewind_data) + o.as_rewindable_transaction_output(factories, rewind_data, None) } else { o.as_transaction_output(factories) } @@ -527,7 +527,7 @@ impl SenderTransactionInitializer { // If rewind data is present we produce a rewindable output, else a standard output let change_output = if let Some(rewind_data) = self.rewind_data.as_ref() { // TODO: Should proof be verified? - match change_unblinded_output.as_rewindable_transaction_output(factories, rewind_data) { + match change_unblinded_output.as_rewindable_transaction_output(factories, rewind_data, None) { Ok(o) => o, Err(e) => { return self.build_err(e.to_string().as_str()); diff --git a/base_layer/wallet/src/output_manager_service/handle.rs b/base_layer/wallet/src/output_manager_service/handle.rs index 99631df820..315c931a6a 100644 --- a/base_layer/wallet/src/output_manager_service/handle.rs +++ b/base_layer/wallet/src/output_manager_service/handle.rs @@ -32,7 +32,7 @@ use tari_core::{ transactions::{ tari_amount::MicroTari, transaction::{OutputFeatures, Transaction, TransactionOutput, UnblindedOutput, UnblindedOutputBuilder}, - transaction_protocol::sender::TransactionSenderMessage, + transaction_protocol::{sender::TransactionSenderMessage, RewindData}, ReceiverTransactionProtocol, SenderTransactionProtocol, }, @@ -57,6 +57,7 @@ pub enum OutputManagerRequest { GetBalance, AddOutput((Box, Option)), AddOutputWithTxId((TxId, Box, Option)), + AddRewindableOutputWithTxId((TxId, Box, Option, Option)), AddUnvalidatedOutput((TxId, Box, Option)), UpdateOutputMetadataSignature(Box), GetRecipientTransaction(TransactionSenderMessage), @@ -127,6 +128,7 @@ impl fmt::Display for OutputManagerRequest { GetBalance => write!(f, "GetBalance"), AddOutput((v, _)) => write!(f, "AddOutput ({})", v.value), AddOutputWithTxId((t, v, _)) => write!(f, "AddOutputWithTxId ({}: {})", t, v.value), + AddRewindableOutputWithTxId((t, v, _, _)) => write!(f, "AddRewindableOutputWithTxId ({}: {})", t, v.value), AddUnvalidatedOutput((t, v, _)) => { write!(f, "AddUnvalidatedOutput ({}: {})", t, v.value) }, @@ -315,6 +317,28 @@ impl OutputManagerHandle { } } + pub async fn add_rewindable_output_with_tx_id( + &mut self, + tx_id: TxId, + output: UnblindedOutput, + spend_priority: Option, + custom_rewind_data: Option, + ) -> Result<(), OutputManagerError> { + match self + .handle + .call(OutputManagerRequest::AddRewindableOutputWithTxId(( + tx_id, + Box::new(output), + spend_priority, + custom_rewind_data, + ))) + .await?? + { + OutputManagerResponse::OutputAdded => Ok(()), + _ => Err(OutputManagerError::UnexpectedApiResponse), + } + } + pub async fn add_unvalidated_output( &mut self, tx_id: TxId, diff --git a/base_layer/wallet/src/output_manager_service/recovery/standard_outputs_recoverer.rs b/base_layer/wallet/src/output_manager_service/recovery/standard_outputs_recoverer.rs index 9052a10d6c..f1f7c73f83 100644 --- a/base_layer/wallet/src/output_manager_service/recovery/standard_outputs_recoverer.rs +++ b/base_layer/wallet/src/output_manager_service/recovery/standard_outputs_recoverer.rs @@ -24,7 +24,7 @@ use std::{sync::Arc, time::Instant}; use log::*; use rand::rngs::OsRng; -use tari_common_types::types::{PrivateKey, PublicKey}; +use tari_common_types::types::{PrivateKey, PublicKey, RangeProof}; use tari_core::transactions::{ transaction::{TransactionOutput, UnblindedOutput}, CryptoFactories, @@ -75,7 +75,7 @@ where TBackend: OutputManagerBackend + 'static ) -> Result, OutputManagerError> { let start = Instant::now(); let outputs_length = outputs.len(); - let mut rewound_outputs: Vec = outputs + let mut rewound_outputs: Vec<(UnblindedOutput, RangeProof)> = outputs .into_iter() .filter_map(|output| { output @@ -87,14 +87,15 @@ where TBackend: OutputManagerBackend + 'static .ok() .map(|v| ( v, output ) ) }) - //Todo this needs some investigation. We assume Nop script here and recovery here might create an unspendable output if the script does not equal Nop. + //TODO: This needs some investigation. We assume Nop script here and recovery here might create an + //TODO: unspendable output if the script does not equal Nop. .map( |(rewind_result, output)| { // Todo we need to look here that we might want to fail a specific output and not recover it as this // will only work if the script is a Nop script. If this is not a Nop script the recovered input // will not be spendable. let script_key = PrivateKey::random(&mut OsRng); - UnblindedOutput::new( + (UnblindedOutput::new( output.version, rewind_result.committed_value, rewind_result.blinding_factor, @@ -106,7 +107,8 @@ where TBackend: OutputManagerBackend + 'static output.metadata_signature, 0, output.covenant - ) + ), + output.proof) }, ) .collect(); @@ -118,11 +120,17 @@ where TBackend: OutputManagerBackend + 'static rewind_time.as_millis(), ); - for output in rewound_outputs.iter_mut() { + for (output, proof) in rewound_outputs.iter_mut() { self.update_outputs_script_private_key_and_update_key_manager_index(output) .await?; - let db_output = DbUnblindedOutput::from_unblinded_output(output.clone(), &self.factories, None)?; + let db_output = DbUnblindedOutput::rewindable_from_unblinded_output( + output.clone(), + &self.factories, + self.master_key_manager.rewind_data(), + None, + Some(proof), + )?; let output_hex = db_output.commitment.to_hex(); if let Err(e) = self.db.add_unspent_output(db_output).await { match e { @@ -145,6 +153,7 @@ where TBackend: OutputManagerBackend + 'static ); } + let rewound_outputs = rewound_outputs.iter().map(|(ro, _)| ro.clone()).collect(); Ok(rewound_outputs) } diff --git a/base_layer/wallet/src/output_manager_service/service.rs b/base_layer/wallet/src/output_manager_service/service.rs index 3f51a32f8f..3dfb07ecc1 100644 --- a/base_layer/wallet/src/output_manager_service/service.rs +++ b/base_layer/wallet/src/output_manager_service/service.rs @@ -47,7 +47,7 @@ use tari_core::{ UnblindedOutput, UnblindedOutputBuilder, }, - transaction_protocol::sender::TransactionSenderMessage, + transaction_protocol::{sender::TransactionSenderMessage, RewindData}, CoinbaseBuilder, CryptoFactories, ReceiverTransactionProtocol, @@ -206,6 +206,10 @@ where .add_output(Some(tx_id), *uo, spend_priority) .await .map(|_| OutputManagerResponse::OutputAdded), + OutputManagerRequest::AddRewindableOutputWithTxId((tx_id, uo, spend_priority, custom_rewind_data)) => self + .add_rewindable_output(Some(tx_id), *uo, spend_priority, custom_rewind_data) + .await + .map(|_| OutputManagerResponse::OutputAdded), OutputManagerRequest::AddUnvalidatedOutput((tx_id, uo, spend_priority)) => self .add_unvalidated_output(tx_id, *uo, spend_priority) .await @@ -530,6 +534,43 @@ where Ok(()) } + /// Add an unblinded rewindable output to the outputs table and marks is as `Unspent`. + pub async fn add_rewindable_output( + &mut self, + tx_id: Option, + output: UnblindedOutput, + spend_priority: Option, + custom_rewind_data: Option, + ) -> Result<(), OutputManagerError> { + debug!( + target: LOG_TARGET, + "Add output of value {} to Output Manager", output.value + ); + + let rewind_data = if let Some(value) = custom_rewind_data { + value + } else { + self.resources.master_key_manager.rewind_data().clone() + }; + let output = DbUnblindedOutput::rewindable_from_unblinded_output( + output, + &self.resources.factories, + &rewind_data, + spend_priority, + None, + )?; + debug!( + target: LOG_TARGET, + "saving output of hash {} to Output Manager", + output.hash.to_hex() + ); + match tx_id { + None => self.resources.db.add_unspent_output(output).await?, + Some(t) => self.resources.db.add_unspent_output_with_tx_id(t, output).await?, + } + Ok(()) + } + /// Add an unblinded output to the outputs table and marks is as `EncumberedToBeReceived`. This is so that it will /// require a successful validation to confirm that it indeed spendable. pub async fn add_unvalidated_output( @@ -610,7 +651,7 @@ where .get_next_spend_and_script_key() .await?; - let output = DbUnblindedOutput::from_unblinded_output( + let output = DbUnblindedOutput::rewindable_from_unblinded_output( UnblindedOutput::new_current_version( single_round_sender_data.amount, spending_key.clone(), @@ -634,6 +675,8 @@ where single_round_sender_data.covenant.clone(), ), &self.resources.factories, + self.resources.master_key_manager.rewind_data(), + None, None, )?; @@ -828,9 +871,11 @@ where "There should be a change output metadata signature available".to_string(), ) })?; - change_output.push(DbUnblindedOutput::from_unblinded_output( + change_output.push(DbUnblindedOutput::rewindable_from_unblinded_output( unblinded_output, &self.resources.factories, + &self.resources.master_key_manager.rewind_data().clone(), + None, None, )?); } @@ -880,7 +925,13 @@ where .with_rewind_data(self.resources.master_key_manager.rewind_data().clone()) .build_with_reward(&self.resources.consensus_constants, reward)?; - let output = DbUnblindedOutput::from_unblinded_output(unblinded_output, &self.resources.factories, None)?; + let output = DbUnblindedOutput::rewindable_from_unblinded_output( + unblinded_output, + &self.resources.factories, + self.resources.master_key_manager.rewind_data(), + None, + None, + )?; // Clear any existing pending coinbase transactions for this blockheight if they exist if let Err(e) = self @@ -1005,9 +1056,11 @@ where builder .with_output(ub.clone(), sender_offset_private_key.clone()) .map_err(|e| OutputManagerError::BuildError(e.message))?; - db_outputs.push(DbUnblindedOutput::from_unblinded_output( + db_outputs.push(DbUnblindedOutput::rewindable_from_unblinded_output( ub, &self.resources.factories, + self.resources.master_key_manager.rewind_data(), + None, None, )?) } @@ -1064,9 +1117,11 @@ where // } if let Some(unblinded_output) = stp.get_change_unblinded_output()? { - db_outputs.push(DbUnblindedOutput::from_unblinded_output( + db_outputs.push(DbUnblindedOutput::rewindable_from_unblinded_output( unblinded_output, &self.resources.factories, + self.resources.master_key_manager.rewind_data(), + None, None, )?); } @@ -1155,7 +1210,7 @@ where &sender_offset_private_key, &covenant, )?; - let utxo = DbUnblindedOutput::from_unblinded_output( + let utxo = DbUnblindedOutput::rewindable_from_unblinded_output( UnblindedOutput::new_current_version( amount, spending_key.clone(), @@ -1169,6 +1224,8 @@ where covenant, ), &self.resources.factories, + self.resources.master_key_manager.rewind_data(), + None, None, )?; builder @@ -1207,8 +1264,13 @@ where "There should be a change output metadata signature available".to_string(), ) })?; - let change_output = - DbUnblindedOutput::from_unblinded_output(unblinded_output, &self.resources.factories, None)?; + let change_output = DbUnblindedOutput::rewindable_from_unblinded_output( + unblinded_output, + &self.resources.factories, + self.resources.master_key_manager.rewind_data(), + None, + None, + )?; outputs.push(change_output); } @@ -1500,7 +1562,7 @@ where &sender_offset_private_key, &covenant, )?; - let utxo = DbUnblindedOutput::from_unblinded_output( + let utxo = DbUnblindedOutput::rewindable_from_unblinded_output( UnblindedOutput::new_current_version( output_amount, spending_key.clone(), @@ -1514,6 +1576,8 @@ where covenant.clone(), ), &self.resources.factories, + &self.resources.master_key_manager.rewind_data().clone(), + None, None, )?; builder @@ -1560,9 +1624,11 @@ where "There should be a change output metadata signature available".to_string(), ) })?; - outputs.push(DbUnblindedOutput::from_unblinded_output( + outputs.push(DbUnblindedOutput::rewindable_from_unblinded_output( unblinded_output, &self.resources.factories, + &self.resources.master_key_manager.rewind_data().clone(), + None, None, )?); } @@ -1691,8 +1757,13 @@ where let unblinded_output = stp.get_change_unblinded_output()?.ok_or_else(|| { OutputManagerError::BuildError("There should be a change output metadata signature available".to_string()) })?; - let change_output = - DbUnblindedOutput::from_unblinded_output(unblinded_output, &self.resources.factories, None)?; + let change_output = DbUnblindedOutput::rewindable_from_unblinded_output( + unblinded_output, + &self.resources.factories, + self.resources.master_key_manager.rewind_data(), + None, + None, + )?; outputs.push(change_output); trace!(target: LOG_TARGET, "Claiming HTLC with transaction ({}).", tx_id); @@ -1774,8 +1845,13 @@ where OutputManagerError::BuildError("There should be a change output metadata signature available".to_string()) })?; - let change_output = - DbUnblindedOutput::from_unblinded_output(unblinded_output, &self.resources.factories, None)?; + let change_output = DbUnblindedOutput::rewindable_from_unblinded_output( + unblinded_output, + &self.resources.factories, + self.resources.master_key_manager.rewind_data(), + None, + None, + )?; outputs.push(change_output); trace!(target: LOG_TARGET, "Claiming HTLC refund with transaction ({}).", tx_id); @@ -1834,10 +1910,13 @@ where ) .as_bytes(), )?; - let blinding_key = PrivateKey::from_bytes(&hash_secret_key(&spending_key))?; - let rewind_key = PrivateKey::from_bytes(&hash_secret_key(&blinding_key))?; - let rewound = - output.full_rewind_range_proof(&self.resources.factories.range_proof, &rewind_key, &blinding_key); + let rewind_blinding_key = PrivateKey::from_bytes(&hash_secret_key(&spending_key))?; + let rewind_key = PrivateKey::from_bytes(&hash_secret_key(&rewind_blinding_key))?; + let rewound = output.full_rewind_range_proof( + &self.resources.factories.range_proof, + &rewind_key, + &rewind_blinding_key, + ); if let Ok(rewound_result) = rewound { let rewound_output = UnblindedOutput::new( @@ -1853,10 +1932,16 @@ where known_one_sided_payment_scripts[i].script_lock_height, output.covenant, ); - let db_output = DbUnblindedOutput::from_unblinded_output( + let db_output = DbUnblindedOutput::rewindable_from_unblinded_output( rewound_output.clone(), &self.resources.factories, + &RewindData { + rewind_key, + rewind_blinding_key, + proof_message: [0u8; 21], + }, None, + Some(&output.proof), )?; let output_hex = output.commitment.to_hex(); diff --git a/base_layer/wallet/src/output_manager_service/storage/models.rs b/base_layer/wallet/src/output_manager_service/storage/models.rs index 977e9c9584..a013e9aa51 100644 --- a/base_layer/wallet/src/output_manager_service/storage/models.rs +++ b/base_layer/wallet/src/output_manager_service/storage/models.rs @@ -22,7 +22,7 @@ use std::cmp::Ordering; -use tari_common_types::types::{BlockHash, Commitment, HashOutput, PrivateKey}; +use tari_common_types::types::{BlockHash, Commitment, HashOutput, PrivateKey, RangeProof}; use tari_core::transactions::{transaction::UnblindedOutput, transaction_protocol::RewindData, CryptoFactories}; use tari_crypto::script::{ExecutionStack, TariScript}; use tari_utilities::hash::Hashable; @@ -69,8 +69,9 @@ impl DbUnblindedOutput { factory: &CryptoFactories, rewind_data: &RewindData, spending_priority: Option, + proof: Option<&RangeProof>, ) -> Result { - let tx_out = output.as_rewindable_transaction_output(factory, rewind_data)?; + let tx_out = output.as_rewindable_transaction_output(factory, rewind_data, proof)?; Ok(DbUnblindedOutput { hash: tx_out.hash(), commitment: tx_out.commitment, diff --git a/base_layer/wallet/src/transaction_service/service.rs b/base_layer/wallet/src/transaction_service/service.rs index e83538c4f6..7ce9c6a77b 100644 --- a/base_layer/wallet/src/transaction_service/service.rs +++ b/base_layer/wallet/src/transaction_service/service.rs @@ -995,7 +995,12 @@ where .get_fee_amount() .map_err(|e| TransactionServiceProtocolError::new(tx_id, e.into()))?; self.output_manager_service - .add_output_with_tx_id(tx_id, unblinded_output, Some(SpendingPriority::HtlcSpendAsap)) + .add_rewindable_output_with_tx_id( + tx_id, + unblinded_output, + Some(SpendingPriority::HtlcSpendAsap), + Some(rewind_data), + ) .await?; self.submit_transaction( transaction_broadcast_join_handles, diff --git a/integration_tests/package-lock.json b/integration_tests/package-lock.json index 0ff556688c..27e1c628b1 100644 --- a/integration_tests/package-lock.json +++ b/integration_tests/package-lock.json @@ -753,7 +753,7 @@ }, "assertion-error": { "version": "1.1.0", - "resolved": false, + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", "dev": true }, @@ -908,15 +908,16 @@ } }, "chai": { - "version": "4.3.4", - "resolved": false, - "integrity": "sha512-yS5H68VYOCtN1cjfwumDSuzn/9c+yza4f3reKXlE5rUg7SFcCEy90gJvydNgOYtblyf4Zi6jIWRnXOgErta0KA==", + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/chai/-/chai-4.3.6.tgz", + "integrity": "sha512-bbcp3YfHCUzMOvKqsztczerVgBKSsEijCySNlHHbX3VG1nskvqjz5Rfso1gGwD6w6oOV3eI60pKuMOV5MV7p3Q==", "dev": true, "requires": { "assertion-error": "^1.1.0", "check-error": "^1.0.2", "deep-eql": "^3.0.1", "get-func-name": "^2.0.0", + "loupe": "^2.3.1", "pathval": "^1.1.1", "type-detect": "^4.0.5" } @@ -934,7 +935,7 @@ }, "check-error": { "version": "1.0.2", - "resolved": false, + "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=", "dev": true }, @@ -1127,7 +1128,7 @@ }, "deep-eql": { "version": "3.0.1", - "resolved": false, + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==", "dev": true, "requires": { @@ -1881,7 +1882,7 @@ }, "get-func-name": { "version": "2.0.0", - "resolved": false, + "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz", "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=", "dev": true }, @@ -1945,7 +1946,7 @@ }, "globals": { "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "resolved": false, "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", "dev": true }, @@ -2251,7 +2252,7 @@ }, "jsesc": { "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "resolved": false, "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", "dev": true }, @@ -2455,6 +2456,15 @@ "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==", "dev": true }, + "loupe": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.1.tgz", + "integrity": "sha512-EN1D3jyVmaX4tnajVlfbREU4axL647hLec1h/PXAb8CPDMJiYitcWF2UeLVNttRqaIqQs4x+mRvXf+d+TlDrCA==", + "dev": true, + "requires": { + "get-func-name": "^2.0.0" + } + }, "lower-case": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", @@ -2705,7 +2715,7 @@ }, "pathval": { "version": "1.1.1", - "resolved": false, + "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz", "integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==", "dev": true }, @@ -3028,7 +3038,7 @@ }, "source-map": { "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "resolved": false, "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", "dev": true }, @@ -3280,7 +3290,7 @@ }, "to-fast-properties": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "resolved": false, "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", "dev": true }, @@ -3341,7 +3351,7 @@ }, "type-detect": { "version": "4.0.8", - "resolved": false, + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", "dev": true }, @@ -3430,104 +3440,68 @@ "dependencies": { "@grpc/grpc-js": { "version": "1.3.6", - "resolved": false, - "integrity": "sha512-v7+LQFbqZKmd/Tvf5/j1Xlbq6jXL/4d+gUtm2TNX4QiEC3ELWADmGr2dGlUyLl6aKTuYfsN72vAsO5zmavYkEg==", "requires": { "@types/node": ">=12.12.47" } }, "@grpc/proto-loader": { "version": "0.5.6", - "resolved": false, - "integrity": "sha512-DT14xgw3PSzPxwS13auTEwxhMMOoz33DPUKNtmYK/QYbBSpLXJy78FGGs5yVoxVobEqPm4iW9MOIoz0A3bLTRQ==", "requires": { "lodash.camelcase": "^4.3.0", "protobufjs": "^6.8.6" } }, "@protobufjs/aspromise": { - "version": "1.1.2", - "resolved": false, - "integrity": "sha1-m4sMxmPWaafY9vXQiToU00jzD78=" + "version": "1.1.2" }, "@protobufjs/base64": { - "version": "1.1.2", - "resolved": false, - "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" + "version": "1.1.2" }, "@protobufjs/codegen": { - "version": "2.0.4", - "resolved": false, - "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" + "version": "2.0.4" }, "@protobufjs/eventemitter": { - "version": "1.1.0", - "resolved": false, - "integrity": "sha1-NVy8mLr61ZePntCV85diHx0Ga3A=" + "version": "1.1.0" }, "@protobufjs/fetch": { "version": "1.1.0", - "resolved": false, - "integrity": "sha1-upn7WYYUr2VwDBYZ/wbUVLDYTEU=", "requires": { "@protobufjs/aspromise": "^1.1.1", "@protobufjs/inquire": "^1.1.0" } }, "@protobufjs/float": { - "version": "1.0.2", - "resolved": false, - "integrity": "sha1-Xp4avctz/Ap8uLKR33jIy9l7h9E=" + "version": "1.0.2" }, "@protobufjs/inquire": { - "version": "1.1.0", - "resolved": false, - "integrity": "sha1-/yAOPnzyQp4tyvwRQIKOjMY48Ik=" + "version": "1.1.0" }, "@protobufjs/path": { - "version": "1.1.2", - "resolved": false, - "integrity": "sha1-bMKyDFya1q0NzP0hynZz2Nf79o0=" + "version": "1.1.2" }, "@protobufjs/pool": { - "version": "1.1.0", - "resolved": false, - "integrity": "sha1-Cf0V8tbTq/qbZbw2ZQbWrXhG/1Q=" + "version": "1.1.0" }, "@protobufjs/utf8": { - "version": "1.1.0", - "resolved": false, - "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=" + "version": "1.1.0" }, "@types/long": { - "version": "4.0.1", - "resolved": false, - "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" + "version": "4.0.1" }, "@types/node": { - "version": "16.3.2", - "resolved": false, - "integrity": "sha512-jJs9ErFLP403I+hMLGnqDRWT0RYKSvArxuBVh2veudHV7ifEC1WAmjJADacZ7mRbA2nWgHtn8xyECMAot0SkAw==" + "version": "16.3.2" }, "grpc-promise": { - "version": "1.4.0", - "resolved": false, - "integrity": "sha512-4BBXHXb5OjjBh7luylu8vFqL6H6aPn/LeqpQaSBeRzO/Xv95wHW/WkU9TJRqaCTMZ5wq9jTSvlJWp0vRJy1pVA==" + "version": "1.4.0" }, "lodash.camelcase": { - "version": "4.3.0", - "resolved": false, - "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=" + "version": "4.3.0" }, "long": { - "version": "4.0.0", - "resolved": false, - "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" + "version": "4.0.0" }, "protobufjs": { "version": "6.11.2", - "resolved": false, - "integrity": "sha512-4BQJoPooKJl2G9j3XftkIXjoC9C0Av2NOrWmbLWT1vH32GcSUHjM0Arra6UfTsVyfMAuFzaLucXn1sadxJydAw==", "requires": { "@protobufjs/aspromise": "^1.1.2", "@protobufjs/base64": "^1.1.2", diff --git a/integration_tests/package.json b/integration_tests/package.json index 539b4771e8..e072c74b85 100644 --- a/integration_tests/package.json +++ b/integration_tests/package.json @@ -22,7 +22,7 @@ "@grpc/grpc-js": "^1.4.4", "@grpc/proto-loader": "^0.5.5", "blakejs": "^1.1.0", - "chai": "^4.2.0", + "chai": "^4.3.6", "cucumber-html-reporter": "^5.5.0", "eslint": "^7.32.0", "eslint-config-prettier": "^8.3.0", From 3ee13207de37774415bf28ab03419c7843d858c7 Mon Sep 17 00:00:00 2001 From: Martin Stefcek <35243812+Cifko@users.noreply.github.com> Date: Wed, 2 Feb 2022 10:02:55 +0100 Subject: [PATCH 05/20] chore: script to keep .gitkeep file (#3787) Description --- Added a step after build, that will recreate the `.gitkeep` file in the build directory. Motivation and Context --- We need to keep the `.gitkeep` file after the `npm run build` How Has This Been Tested? --- `npm run build` windows and wsl2(ubuntu) on windows --- applications/tari_collectibles/web-app/package.json | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/tari_collectibles/web-app/package.json b/applications/tari_collectibles/web-app/package.json index 378a9aeb79..a75aea0d10 100644 --- a/applications/tari_collectibles/web-app/package.json +++ b/applications/tari_collectibles/web-app/package.json @@ -23,6 +23,7 @@ "scripts": { "start": "react-scripts start", "build": "react-scripts build", + "postbuild": "touch ./build/.gitkeep || type NUL > ./build/.gitkeep", "test": "react-scripts test", "eject": "react-scripts eject", "lint": "eslint src/**/*.js" From 3495e85707f3ffba622feeab42e17276181654c2 Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Wed, 2 Feb 2022 13:12:02 +0200 Subject: [PATCH 06/20] feat(base-node): add number of active sync peers metric (#3784) Description --- - Adds `tari_base_node::sync::active_peers` gauge that tracks number of peers syncing from the base node Motivation and Context --- Visibility into number of peers syncing from the base node How Has This Been Tested? --- Manually --- base_layer/core/src/base_node/metrics.rs | 12 ++++++++ .../core/src/base_node/sync/rpc/service.rs | 19 +++++++++++-- .../src/base_node/sync/rpc/sync_utxos_task.rs | 28 +++++++++++++------ 3 files changed, 47 insertions(+), 12 deletions(-) diff --git a/base_layer/core/src/base_node/metrics.rs b/base_layer/core/src/base_node/metrics.rs index 2129e8fb03..b78b5b8dc7 100644 --- a/base_layer/core/src/base_node/metrics.rs +++ b/base_layer/core/src/base_node/metrics.rs @@ -104,3 +104,15 @@ pub fn rejected_blocks(height: u64, hash: &[u8]) -> IntCounter { METER.with_label_values(&[&height.to_string(), &to_hex(hash)]) } + +pub fn active_sync_peers() -> IntGauge { + static METER: Lazy = Lazy::new(|| { + tari_metrics::register_int_gauge( + "base_node::sync::active_peers", + "Number of active peers syncing from this node", + ) + .unwrap() + }); + + METER.clone() +} diff --git a/base_layer/core/src/base_node/sync/rpc/service.rs b/base_layer/core/src/base_node/sync/rpc/service.rs index 891775ebc2..0ebac3f626 100644 --- a/base_layer/core/src/base_node/sync/rpc/service.rs +++ b/base_layer/core/src/base_node/sync/rpc/service.rs @@ -43,6 +43,7 @@ use tracing::{instrument, span, Instrument, Level}; use crate::{ base_node::{ comms_interface::BlockEvent, + metrics, sync::rpc::{sync_utxos_task::SyncUtxosTask, BaseNodeSyncService}, LocalNodeCommsInterface, }, @@ -95,6 +96,7 @@ impl BaseNodeSyncRpcService { let token = Arc::new(peer); lock.push(Arc::downgrade(&token)); + metrics::active_sync_peers().set(lock.len() as i64); Ok(token) } } @@ -237,6 +239,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } } + metrics::active_sync_peers().dec(); debug!( target: LOG_TARGET, "Block sync round complete for peer `{}`.", peer_node_id, @@ -325,6 +328,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } } + metrics::active_sync_peers().dec(); debug!( target: LOG_TARGET, "Header sync round complete for peer `{}`.", session_token, @@ -430,6 +434,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ &self, request: Request, ) -> Result, RpcStatus> { + let peer_node_id = request.context().peer_node_id().clone(); let req = request.into_message(); let (tx, rx) = mpsc::channel(100); let db = self.db(); @@ -455,6 +460,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ return Err(RpcStatus::bad_request("start header height is after end header")); } + let session_token = self.try_add_exclusive_session(peer_node_id).await?; task::spawn(async move { while current_height <= end_height { if tx.is_closed() { @@ -524,6 +530,12 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } } } + + metrics::active_sync_peers().dec(); + debug!( + target: LOG_TARGET, + "Kernel sync round complete for peer `{}`.", session_token, + ); }); Ok(Streaming::new(rx)) } @@ -531,21 +543,22 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ #[instrument(skip(self), err)] async fn sync_utxos(&self, request: Request) -> Result, RpcStatus> { let req = request.message(); - let peer = request.context().peer_node_id(); + let peer_node_id = request.context().peer_node_id(); debug!( target: LOG_TARGET, "Received sync_utxos request from header {} to {} (start = {}, include_pruned_utxos = {}, \ include_deleted_bitmaps = {})", - peer, + peer_node_id, req.start, req.end_header_hash.to_hex(), req.include_pruned_utxos, req.include_deleted_bitmaps ); + let _session_token = self.try_add_exclusive_session(peer_node_id.clone()).await?; let (tx, rx) = mpsc::channel(200); let task = SyncUtxosTask::new(self.db()); - task.run(request.into_message(), tx).await?; + task.run(request, tx).await?; Ok(Streaming::new(rx)) } diff --git a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs index db1e46f94b..22326ae14f 100644 --- a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs +++ b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs @@ -23,11 +23,15 @@ use std::{sync::Arc, time::Instant}; use log::*; -use tari_comms::{protocol::rpc::RpcStatus, utils}; +use tari_comms::{ + protocol::rpc::{Request, RpcStatus}, + utils, +}; use tari_crypto::tari_utilities::{hex::Hex, Hashable}; use tokio::{sync::mpsc, task}; use crate::{ + base_node::metrics, blocks::BlockHeader, chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend}, proto, @@ -49,12 +53,14 @@ where B: BlockchainBackend + 'static pub(crate) async fn run( self, - request: SyncUtxosRequest, + request: Request, mut tx: mpsc::Sender>, ) -> Result<(), RpcStatus> { + let peer = request.context().peer_node_id().clone(); + let msg = request.into_message(); let start_header = self .db - .fetch_header_containing_utxo_mmr(request.start) + .fetch_header_containing_utxo_mmr(msg.start) .await .map_err(|err| { error!(target: LOG_TARGET, "{}", err); @@ -67,7 +73,7 @@ where B: BlockchainBackend + 'static let end_header = self .db - .fetch_header_by_block_hash(request.end_header_hash.clone()) + .fetch_header_by_block_hash(msg.end_header_hash.clone()) .await .map_err(RpcStatus::log_internal_error(LOG_TARGET))? .ok_or_else(|| RpcStatus::not_found("End header hash is was not found"))?; @@ -81,7 +87,7 @@ where B: BlockchainBackend + 'static } let (skip_outputs, prev_utxo_mmr_size) = if start_header.height() == 0 { - (request.start, 0) + (msg.start, 0) } else { let prev_header = self .db @@ -90,15 +96,16 @@ where B: BlockchainBackend + 'static .map_err(RpcStatus::log_internal_error(LOG_TARGET))? .ok_or_else(|| RpcStatus::not_found("Previous start header hash is was not found"))?; - let skip = request.start.checked_sub(prev_header.output_mmr_size) + let skip = msg.start.checked_sub(prev_header.output_mmr_size) // This is a data inconsistency because fetch_header_containing_utxo_mmr returned the header we are basing this on - .ok_or_else(|| RpcStatus::general(format!("Data inconsistency: output mmr size of header at {} was more than the start index {}", prev_header.height, request.start)))?; + .ok_or_else(|| RpcStatus::general(format!("Data inconsistency: output mmr size of header at {} was more than the start index {}", prev_header.height, msg.start)))?; (skip, prev_header.output_mmr_size) }; - let include_pruned_utxos = request.include_pruned_utxos; - let include_deleted_bitmaps = request.include_deleted_bitmaps; + let include_pruned_utxos = msg.include_pruned_utxos; + let include_deleted_bitmaps = msg.include_deleted_bitmaps; task::spawn(async move { + debug!(target: LOG_TARGET, "Starting UTXO stream for peer '{}'", peer); if let Err(err) = self .start_streaming( &mut tx, @@ -111,8 +118,11 @@ where B: BlockchainBackend + 'static ) .await { + debug!(target: LOG_TARGET, "UTXO stream errored for peer '{}': {}", peer, err); let _ = tx.send(Err(err)).await; } + debug!(target: LOG_TARGET, "UTXO stream completed for peer '{}'", peer); + metrics::active_sync_peers().dec(); }); Ok(()) From beb299e69ee1af7ec4e46889191051ce49dd1d50 Mon Sep 17 00:00:00 2001 From: Philip Robinson Date: Thu, 3 Feb 2022 12:27:25 +0200 Subject: [PATCH 07/20] fix: coinbase output recovery bug (#3789) Description --- A bug was discovered when recovering Coinbase outputs that was revealed when the Key Manager was fixed to actually use branch seeds. Coinbase output keys are derived on a separate key manager branch so when the UTXO scanner tried to update the key manager index it would not find the coinabse key in the main spending key branch which caused an error. This PR updates the UTXO scanner to check if a recovered output has the coinbase flag or not and then searches on the Coinbase branch when looking for that outputs key index and script key. How Has This Been Tested? --- The PR updates the `Wallet recovery with connected base node staying online` cucumber test to include recovering some coinbase outputs. The PR also updates the `Multiple Wallet recovery from seed node` cucumber test to work now that the coinbase issue is fixed. The test is also updated so that it recovered N distinct wallets to the same seed node where as before it would create N wallet with the same seed words and thus network identity. --- .../transaction/output_features.rs | 4 + .../src/output_manager_service/error.rs | 2 + .../master_key_manager.rs | 69 ++++++++++---- .../recovery/standard_outputs_recoverer.rs | 37 +++++--- .../features/WalletRecovery.feature | 30 +++---- .../features/support/mining_node_steps.js | 30 +++++++ .../features/support/node_steps.js | 31 ++----- .../features/support/wallet_steps.js | 89 ++++++++++++++----- integration_tests/features/support/world.js | 26 +++++- integration_tests/package-lock.json | 84 ++++++++++++----- 10 files changed, 288 insertions(+), 114 deletions(-) diff --git a/base_layer/core/src/transactions/transaction/output_features.rs b/base_layer/core/src/transactions/transaction/output_features.rs index 9effe8a30e..f7f4fc0793 100644 --- a/base_layer/core/src/transactions/transaction/output_features.rs +++ b/base_layer/core/src/transactions/transaction/output_features.rs @@ -209,6 +209,10 @@ impl OutputFeatures { pub fn is_non_fungible_burn(&self) -> bool { self.flags.contains(OutputFlags::BURN_NON_FUNGIBLE) } + + pub fn is_coinbase(&self) -> bool { + self.flags.contains(OutputFlags::COINBASE_OUTPUT) + } } impl ConsensusEncoding for OutputFeatures { diff --git a/base_layer/wallet/src/output_manager_service/error.rs b/base_layer/wallet/src/output_manager_service/error.rs index b1175ebd6e..8c0390566b 100644 --- a/base_layer/wallet/src/output_manager_service/error.rs +++ b/base_layer/wallet/src/output_manager_service/error.rs @@ -120,6 +120,8 @@ pub enum OutputManagerError { }, #[error("Invalid message received:{0}")] InvalidMessageError(String), + #[error("Operation not support on this Key Manager branch")] + KeyManagerBranchNotSupported, } #[derive(Debug, Error)] diff --git a/base_layer/wallet/src/output_manager_service/master_key_manager.rs b/base_layer/wallet/src/output_manager_service/master_key_manager.rs index eaaaaaa892..c796625896 100644 --- a/base_layer/wallet/src/output_manager_service/master_key_manager.rs +++ b/base_layer/wallet/src/output_manager_service/master_key_manager.rs @@ -20,6 +20,8 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use std::fmt::{Display, Error, Formatter}; + use futures::lock::Mutex; use log::*; use tari_common_types::types::{PrivateKey, PublicKey}; @@ -41,14 +43,32 @@ use crate::{ }; const LOG_TARGET: &str = "wallet::output_manager_service::master_key_manager"; - -const KEY_MANAGER_COINBASE_BRANCH_KEY: &str = "coinbase"; -const KEY_MANAGER_COINBASE_SCRIPT_BRANCH_KEY: &str = "coinbase_script"; -const KEY_MANAGER_SCRIPT_BRANCH_KEY: &str = "script"; -const KEY_MANAGER_RECOVERY_VIEWONLY_BRANCH_KEY: &str = "recovery_viewonly"; -const KEY_MANAGER_RECOVERY_BLINDING_BRANCH_KEY: &str = "recovery_blinding"; const KEY_MANAGER_MAX_SEARCH_DEPTH: u64 = 1_000_000; +#[derive(Clone, Copy)] +pub enum KeyManagerBranch { + Spend, + SpendScript, + Coinbase, + CoinbaseScript, + RecoveryViewOnly, + RecoveryBlinding, +} + +impl Display for KeyManagerBranch { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + let response = match self { + KeyManagerBranch::Spend => "", + KeyManagerBranch::SpendScript => "script", + KeyManagerBranch::Coinbase => "coinbase", + KeyManagerBranch::CoinbaseScript => "coinbase_script", + KeyManagerBranch::RecoveryViewOnly => "recovery_viewonly", + KeyManagerBranch::RecoveryBlinding => "recovery_blinding", + }; + fmt.write_str(response) + } +} + pub(crate) struct MasterKeyManager { utxo_key_manager: Mutex>, utxo_script_key_manager: Mutex>, @@ -67,7 +87,7 @@ where TBackend: OutputManagerBackend + 'static None => { let starting_state = KeyManagerState { seed: master_seed, - branch_seed: "".to_string(), + branch_seed: KeyManagerBranch::Spend.to_string(), primary_key_index: 0, }; db.set_key_manager_state(starting_state.clone()).await?; @@ -89,32 +109,32 @@ where TBackend: OutputManagerBackend + 'static let utxo_script_key_manager = KeyManager::::from( key_manager_state.seed.clone(), - KEY_MANAGER_SCRIPT_BRANCH_KEY.to_string(), + KeyManagerBranch::SpendScript.to_string(), key_manager_state.primary_key_index, ); let coinbase_key_manager = KeyManager::::from( key_manager_state.seed.clone(), - KEY_MANAGER_COINBASE_BRANCH_KEY.to_string(), + KeyManagerBranch::Coinbase.to_string(), 0, ); let coinbase_script_key_manager = KeyManager::::from( key_manager_state.seed.clone(), - KEY_MANAGER_COINBASE_SCRIPT_BRANCH_KEY.to_string(), + KeyManagerBranch::CoinbaseScript.to_string(), 0, ); let rewind_key_manager = KeyManager::::from( key_manager_state.seed.clone(), - KEY_MANAGER_RECOVERY_VIEWONLY_BRANCH_KEY.to_string(), + KeyManagerBranch::RecoveryViewOnly.to_string(), 0, ); let rewind_key = rewind_key_manager.derive_key(0)?.k; let rewind_blinding_key_manager = KeyManager::::from( key_manager_state.seed, - KEY_MANAGER_RECOVERY_BLINDING_BRANCH_KEY.to_string(), + KeyManagerBranch::RecoveryBlinding.to_string(), 0, ); let rewind_blinding_key = rewind_blinding_key_manager.derive_key(0)?.k; @@ -158,6 +178,12 @@ where TBackend: OutputManagerBackend + 'static Ok(script_key.k) } + pub async fn get_coinbase_script_key_at_index(&self, index: u64) -> Result { + let skm = self.coinbase_script_key_manager.lock().await; + let script_key = skm.derive_key(index)?; + Ok(script_key.k) + } + pub async fn get_coinbase_spend_and_script_key_for_height( &self, height: u64, @@ -185,14 +211,19 @@ where TBackend: OutputManagerBackend + 'static } } - /// Search the current key manager key chain to find the index of the specified key. - pub async fn find_utxo_key_index(&self, key: PrivateKey) -> Result { - let utxo_key_manager = self.utxo_key_manager.lock().await; - let current_index = (*utxo_key_manager).key_index(); + /// Search the specified branch key manager key chain to find the index of the specified key. + pub async fn find_key_index(&self, key: PrivateKey, branch: KeyManagerBranch) -> Result { + let key_manager = match branch { + KeyManagerBranch::Spend => self.utxo_key_manager.lock().await, + KeyManagerBranch::Coinbase => self.coinbase_key_manager.lock().await, + _ => return Err(OutputManagerError::KeyManagerBranchNotSupported), + }; + + let current_index = (*key_manager).key_index(); for i in 0u64..current_index + KEY_MANAGER_MAX_SEARCH_DEPTH { - if (*utxo_key_manager).derive_key(i)?.k == key { - trace!(target: LOG_TARGET, "Key found in Key Chain at index {}", i); + if (*key_manager).derive_key(i)?.k == key { + trace!(target: LOG_TARGET, "Key found in {} Key Chain at index {}", branch, i); return Ok(i); } } @@ -201,7 +232,7 @@ where TBackend: OutputManagerBackend + 'static } /// If the supplied index is higher than the current UTXO key chain indices then they will be updated. - pub async fn update_current_index_if_higher(&self, index: u64) -> Result<(), OutputManagerError> { + pub async fn update_current_spend_key_index_if_higher(&self, index: u64) -> Result<(), OutputManagerError> { let mut utxo_key_manager = self.utxo_key_manager.lock().await; let mut utxo_script_key_manager = self.utxo_script_key_manager.lock().await; let current_index = (*utxo_key_manager).key_index(); diff --git a/base_layer/wallet/src/output_manager_service/recovery/standard_outputs_recoverer.rs b/base_layer/wallet/src/output_manager_service/recovery/standard_outputs_recoverer.rs index f1f7c73f83..d6f4200e0c 100644 --- a/base_layer/wallet/src/output_manager_service/recovery/standard_outputs_recoverer.rs +++ b/base_layer/wallet/src/output_manager_service/recovery/standard_outputs_recoverer.rs @@ -37,6 +37,7 @@ use tari_crypto::{ use crate::output_manager_service::{ error::{OutputManagerError, OutputManagerStorageError}, + master_key_manager::KeyManagerBranch, storage::{ database::{OutputManagerBackend, OutputManagerDatabase}, models::DbUnblindedOutput, @@ -164,18 +165,30 @@ where TBackend: OutputManagerBackend + 'static &mut self, output: &mut UnblindedOutput, ) -> Result<(), OutputManagerError> { - let found_index = self - .master_key_manager - .find_utxo_key_index(output.spending_key.clone()) - .await?; - - self.master_key_manager - .update_current_index_if_higher(found_index) - .await?; - - let script_private_key = self.master_key_manager.get_script_key_at_index(found_index).await?; - output.input_data = inputs!(PublicKey::from_secret_key(&script_private_key)); - output.script_private_key = script_private_key; + let script_key = if output.features.is_coinbase() { + let found_index = self + .master_key_manager + .find_key_index(output.spending_key.clone(), KeyManagerBranch::Coinbase) + .await?; + + self.master_key_manager + .get_coinbase_script_key_at_index(found_index) + .await? + } else { + let found_index = self + .master_key_manager + .find_key_index(output.spending_key.clone(), KeyManagerBranch::Spend) + .await?; + + self.master_key_manager + .update_current_spend_key_index_if_higher(found_index) + .await?; + + self.master_key_manager.get_script_key_at_index(found_index).await? + }; + + output.input_data = inputs!(PublicKey::from_secret_key(&script_key)); + output.script_private_key = script_key; Ok(()) } } diff --git a/integration_tests/features/WalletRecovery.feature b/integration_tests/features/WalletRecovery.feature index bda987d481..55a7eb97a6 100644 --- a/integration_tests/features/WalletRecovery.feature +++ b/integration_tests/features/WalletRecovery.feature @@ -13,41 +13,39 @@ Feature: Wallet Recovery When I wait for wallet WALLET_A to have at least 55000000000 uT Then all nodes are at height 15 And I send 200000 uT from wallet WALLET_A to wallet WALLET_B at fee 100 + And I have mining node MINER_B connected to base node NODE and wallet WALLET_B + When mining node MINER_B mines 2 blocks When I mine 5 blocks on NODE - Then all nodes are at height 20 + Then all nodes are at height 22 Then I stop wallet WALLET_B When I recover wallet WALLET_B into wallet WALLET_C connected to all seed nodes - When I wait for wallet WALLET_C to have at least 200000 uT + When I wait for wallet WALLET_C to have at least 10000200000 uT And I have wallet WALLET_D connected to all seed nodes And I send 100000 uT from wallet WALLET_C to wallet WALLET_D at fee 100 When I mine 5 blocks on NODE - Then all nodes are at height 25 + Then all nodes are at height 27 Then I wait for wallet WALLET_D to have at least 100000 uT - @broken Scenario Outline: Multiple Wallet recovery from seed node Given I have a seed node NODE - And I have wallet WALLET_A connected to all seed nodes - And I have mining node MINER connected to base node NODE and wallet WALLET_A - When mining node MINER mines 15 blocks - When I wait for wallet WALLET_A to have at least 55000000000 uT - Then all nodes are at height 15 - Then I stop wallet WALLET_A - When I recover wallet WALLET_A into wallets connected to all seed nodes - When I wait for wallets to have at least 55000000000 uT - # TODO: having multiple wallet with the same network id is problematic, use N separate wallets or ensure that both are not trying to connect to the same base node - # Then Wallet WALLET_A and wallets have the same balance + And I have non-default wallets connected to all seed nodes using DirectAndStoreAndForward + And I have individual mining nodes connected to each wallet and base node NODE + Then I have each mining node mine 3 blocks + Then all nodes are at height 3* + Then I stop all wallets + When I recover all wallets connected to all seed nodes + Then I wait for recovered wallets to have at least 15000000000 uT @critical Examples: | NumWallets | - | 1 | + | 4 | @long-running Examples: | NumWallets | - | 2 | | 5 | | 10 | + | 20 | @critical diff --git a/integration_tests/features/support/mining_node_steps.js b/integration_tests/features/support/mining_node_steps.js index d1c82a05ef..add119e098 100644 --- a/integration_tests/features/support/mining_node_steps.js +++ b/integration_tests/features/support/mining_node_steps.js @@ -103,6 +103,36 @@ Given( } ); +Given( + /I have individual mining nodes connected to each wallet and (?:base|seed) node (.*)/, + async function (node) { + let walletNames = Object.keys(this.wallets); + const promises = []; + for (let i = 0; i < walletNames.length; i++) { + let name = "Miner_" + String(i).padStart(2, "0"); + promises.push(this.createMiningNode(name, node, walletNames[i])); + } + await Promise.all(promises); + } +); + +Given( + /I have each mining node mine (\d+) blocks?$/, + { timeout: 1200 * 1000 }, // Must allow many blocks to be mined; dynamic time out below limits actual time + async function (numBlocks) { + let miningNodes = Object.keys(this.miners); + for (let i = 0; i < miningNodes.length; i++) { + console.log("MN", miningNodes[i]); + const miningNode = this.getMiningNode(miningNodes[i]); + await miningNode.init(numBlocks, null, 1, i + 2, false, null); + await withTimeout( + (10 + parseInt(numBlocks * miningNodes.length) * 1) * 1000, + await miningNode.startNew() + ); + } + } +); + Given( /I have mine-before-tip mining node (.*) connected to base node (.*) and wallet (.*)/, function (miner, node, wallet) { diff --git a/integration_tests/features/support/node_steps.js b/integration_tests/features/support/node_steps.js index aa3cccaf72..bc99131ad8 100644 --- a/integration_tests/features/support/node_steps.js +++ b/integration_tests/features/support/node_steps.js @@ -354,28 +354,15 @@ Then( "all nodes are at height {int}", { timeout: 800 * 1000 }, async function (height) { - await waitFor( - async () => { - let result = true; - await this.forEachClientAsync(async (client, name) => { - await waitFor( - async () => await client.getTipHeight(), - height, - 5 * height * 1000 /* 5 seconds per block */ - ); - const currTip = await client.getTipHeight(); - console.log( - `Node ${name} is at tip: ${currTip} (should be ${height})` - ); - result = result && currTip == height; - }); - return result; - }, - true, - 600 * 1000, - 5 * 1000, - 5 - ); + await this.all_nodes_are_at_height(height); + } +); + +Then( + "all nodes are at height {int}*{int}", + { timeout: 800 * 1000 }, + async function (a, b) { + await this.all_nodes_are_at_height(a * b); } ); diff --git a/integration_tests/features/support/wallet_steps.js b/integration_tests/features/support/wallet_steps.js index ebf857f8d2..1828e32fc4 100644 --- a/integration_tests/features/support/wallet_steps.js +++ b/integration_tests/features/support/wallet_steps.js @@ -207,7 +207,7 @@ Given( // mechanism: DirectOnly, StoreAndForwardOnly, DirectAndStoreAndForward const promises = []; for (let i = 0; i < n; i++) { - let name = "Wallet_" + String(n).padStart(2, "0"); + let name = "Wallet_" + String(i).padStart(2, "0"); promises.push( this.createAndAddWallet(name, [this.seedAddresses()], { routingMechanism: mechanism, @@ -248,6 +248,42 @@ Given( } ); +Given( + /I recover all wallets connected to all seed nodes/, + { timeout: 120 * 1000 }, + async function () { + for (let walletName in this.wallets) { + let wallet = this.getWallet(walletName); + const seedWords = wallet.getSeedWords(); + let recoveredWalletName = "recovered_" + wallet.name; + console.log( + "Recover " + + wallet.name + + " into " + + recoveredWalletName + + ", seed words:\n " + + seedWords + ); + const walletB = new WalletProcess( + recoveredWalletName, + false, + {}, + this.logFilePathWallet, + seedWords + ); + + walletB.setPeerSeeds([this.seedAddresses()]); + await walletB.startNew(); + this.addWallet(recoveredWalletName, walletB); + let walletClient = await this.getWallet( + recoveredWalletName + ).connectClient(); + let walletInfo = await walletClient.identify(); + this.addWalletPubkey(recoveredWalletName, walletInfo.public_key); + } + } +); + Given( /I recover wallet (.*) into (\d+) wallets connected to all seed nodes/, { timeout: 30 * 1000 }, @@ -281,28 +317,34 @@ Given( ); Then( - /I wait for (\d+) wallets to have at least (\d+) uT/, + /I wait for recovered wallets to have at least (\d+) uT/, { timeout: 60 * 1000 }, - async function (numwallets, amount) { - for (let i = 1; i <= numwallets; i++) { - const walletClient = await this.getWallet(i.toString()).connectClient(); - console.log("\n"); - console.log( - "Waiting for wallet " + i + " balance to be at least " + amount + " uT" - ); + async function (amount) { + for (let walletName in this.wallets) { + if (walletName.split("_")[0] == "recovered") { + const walletClient = await this.getWallet(walletName).connectClient(); + console.log("\n"); + console.log( + "Waiting for wallet " + + walletName + + " balance to be at least " + + amount + + " uT" + ); - await waitFor( - async () => walletClient.isBalanceAtLeast(amount), - true, - 20 * 1000, - 5 * 1000, - 5 - ); - consoleLogBalance(await walletClient.getBalance()); - if (!(await walletClient.isBalanceAtLeast(amount))) { - console.log("Balance not adequate!"); + await waitFor( + async () => walletClient.isBalanceAtLeast(amount), + true, + 20 * 1000, + 5 * 1000, + 5 + ); + consoleLogBalance(await walletClient.getBalance()); + if (!(await walletClient.isBalanceAtLeast(amount))) { + console.log("Balance not adequate!"); + } + expect(await walletClient.isBalanceAtLeast(amount)).to.equal(true); } - expect(await walletClient.isBalanceAtLeast(amount)).to.equal(true); } } ); @@ -326,6 +368,13 @@ When(/I stop wallet ([^\s]+)/, async function (walletName) { await wallet.stop(); }); +When(/I stop all wallets/, async function () { + for (let walletName in this.wallets) { + let wallet = this.getWallet(walletName); + await wallet.stop(); + } +}); + When(/I start wallet (.*)/, async function (walletName) { let wallet = this.getWallet(walletName); await wallet.start(); diff --git a/integration_tests/features/support/world.js b/integration_tests/features/support/world.js index 39f3c6d00c..412e288f0f 100644 --- a/integration_tests/features/support/world.js +++ b/integration_tests/features/support/world.js @@ -20,7 +20,6 @@ const { waitFor, sleep, consoleLogBalance } = require("../../helpers/util"); const { PaymentType } = require("../../helpers/types"); const { expect } = require("chai"); const InterfaceFFI = require("../../helpers/ffi/ffiInterface"); -// const InterfaceFFI = require("../../helpers/ffi/ffiInterface"); class CustomWorld { constructor({ attach, parameters }) { @@ -593,6 +592,31 @@ class CustomWorld { } expect(await walletClient.isBalanceAtLeast(amount)).to.equal(true); } + + async all_nodes_are_at_height(height) { + await waitFor( + async () => { + let result = true; + await this.forEachClientAsync(async (client, name) => { + await waitFor( + async () => await client.getTipHeight(), + height, + 5 * height * 1000 /* 5 seconds per block */ + ); + const currTip = await client.getTipHeight(); + console.log( + `Node ${name} is at tip: ${currTip} (should be ${height})` + ); + result = result && currTip == height; + }); + return result; + }, + true, + 600 * 1000, + 5 * 1000, + 5 + ); + } } setWorldConstructor(CustomWorld); diff --git a/integration_tests/package-lock.json b/integration_tests/package-lock.json index 27e1c628b1..868ccd2cda 100644 --- a/integration_tests/package-lock.json +++ b/integration_tests/package-lock.json @@ -753,7 +753,7 @@ }, "assertion-error": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", + "resolved": false, "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", "dev": true }, @@ -935,7 +935,7 @@ }, "check-error": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", + "resolved": false, "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=", "dev": true }, @@ -1128,7 +1128,7 @@ }, "deep-eql": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", + "resolved": false, "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==", "dev": true, "requires": { @@ -1882,7 +1882,7 @@ }, "get-func-name": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz", + "resolved": false, "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=", "dev": true }, @@ -1946,7 +1946,7 @@ }, "globals": { "version": "11.12.0", - "resolved": false, + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", "dev": true }, @@ -2252,7 +2252,7 @@ }, "jsesc": { "version": "2.5.2", - "resolved": false, + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", "dev": true }, @@ -2715,7 +2715,7 @@ }, "pathval": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz", + "resolved": false, "integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==", "dev": true }, @@ -3038,7 +3038,7 @@ }, "source-map": { "version": "0.5.7", - "resolved": false, + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", "dev": true }, @@ -3290,7 +3290,7 @@ }, "to-fast-properties": { "version": "2.0.0", - "resolved": false, + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", "dev": true }, @@ -3351,7 +3351,7 @@ }, "type-detect": { "version": "4.0.8", - "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "resolved": false, "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", "dev": true }, @@ -3440,68 +3440,104 @@ "dependencies": { "@grpc/grpc-js": { "version": "1.3.6", + "resolved": false, + "integrity": "sha512-v7+LQFbqZKmd/Tvf5/j1Xlbq6jXL/4d+gUtm2TNX4QiEC3ELWADmGr2dGlUyLl6aKTuYfsN72vAsO5zmavYkEg==", "requires": { "@types/node": ">=12.12.47" } }, "@grpc/proto-loader": { "version": "0.5.6", + "resolved": false, + "integrity": "sha512-DT14xgw3PSzPxwS13auTEwxhMMOoz33DPUKNtmYK/QYbBSpLXJy78FGGs5yVoxVobEqPm4iW9MOIoz0A3bLTRQ==", "requires": { "lodash.camelcase": "^4.3.0", "protobufjs": "^6.8.6" } }, "@protobufjs/aspromise": { - "version": "1.1.2" + "version": "1.1.2", + "resolved": false, + "integrity": "sha1-m4sMxmPWaafY9vXQiToU00jzD78=" }, "@protobufjs/base64": { - "version": "1.1.2" + "version": "1.1.2", + "resolved": false, + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" }, "@protobufjs/codegen": { - "version": "2.0.4" + "version": "2.0.4", + "resolved": false, + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" }, "@protobufjs/eventemitter": { - "version": "1.1.0" + "version": "1.1.0", + "resolved": false, + "integrity": "sha1-NVy8mLr61ZePntCV85diHx0Ga3A=" }, "@protobufjs/fetch": { "version": "1.1.0", + "resolved": false, + "integrity": "sha1-upn7WYYUr2VwDBYZ/wbUVLDYTEU=", "requires": { "@protobufjs/aspromise": "^1.1.1", "@protobufjs/inquire": "^1.1.0" } }, "@protobufjs/float": { - "version": "1.0.2" + "version": "1.0.2", + "resolved": false, + "integrity": "sha1-Xp4avctz/Ap8uLKR33jIy9l7h9E=" }, "@protobufjs/inquire": { - "version": "1.1.0" + "version": "1.1.0", + "resolved": false, + "integrity": "sha1-/yAOPnzyQp4tyvwRQIKOjMY48Ik=" }, "@protobufjs/path": { - "version": "1.1.2" + "version": "1.1.2", + "resolved": false, + "integrity": "sha1-bMKyDFya1q0NzP0hynZz2Nf79o0=" }, "@protobufjs/pool": { - "version": "1.1.0" + "version": "1.1.0", + "resolved": false, + "integrity": "sha1-Cf0V8tbTq/qbZbw2ZQbWrXhG/1Q=" }, "@protobufjs/utf8": { - "version": "1.1.0" + "version": "1.1.0", + "resolved": false, + "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=" }, "@types/long": { - "version": "4.0.1" + "version": "4.0.1", + "resolved": false, + "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" }, "@types/node": { - "version": "16.3.2" + "version": "16.3.2", + "resolved": false, + "integrity": "sha512-jJs9ErFLP403I+hMLGnqDRWT0RYKSvArxuBVh2veudHV7ifEC1WAmjJADacZ7mRbA2nWgHtn8xyECMAot0SkAw==" }, "grpc-promise": { - "version": "1.4.0" + "version": "1.4.0", + "resolved": false, + "integrity": "sha512-4BBXHXb5OjjBh7luylu8vFqL6H6aPn/LeqpQaSBeRzO/Xv95wHW/WkU9TJRqaCTMZ5wq9jTSvlJWp0vRJy1pVA==" }, "lodash.camelcase": { - "version": "4.3.0" + "version": "4.3.0", + "resolved": false, + "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=" }, "long": { - "version": "4.0.0" + "version": "4.0.0", + "resolved": false, + "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" }, "protobufjs": { "version": "6.11.2", + "resolved": false, + "integrity": "sha512-4BQJoPooKJl2G9j3XftkIXjoC9C0Av2NOrWmbLWT1vH32GcSUHjM0Arra6UfTsVyfMAuFzaLucXn1sadxJydAw==", "requires": { "@protobufjs/aspromise": "^1.1.2", "@protobufjs/base64": "^1.1.2", From 5aa2a661cdae869a877dda5f3cadc3abb97c374a Mon Sep 17 00:00:00 2001 From: Philip Robinson Date: Thu, 3 Feb 2022 14:18:23 +0200 Subject: [PATCH 08/20] =?UTF-8?q?feat:=20add=20specific=20LibWallet=20erro?= =?UTF-8?q?r=20code=20for=20=E2=80=9CFee=20is=20greater=20than=20amount?= =?UTF-8?q?=E2=80=9D=20(#3793)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Description --- Surfacing a specific error code for the “Fee is greater than amount” Transaction Build error in the LibWalletError for the FFI interface. --- base_layer/wallet_ffi/src/error.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/base_layer/wallet_ffi/src/error.rs b/base_layer/wallet_ffi/src/error.rs index 6b17d8f552..7bb306a3cd 100644 --- a/base_layer/wallet_ffi/src/error.rs +++ b/base_layer/wallet_ffi/src/error.rs @@ -213,6 +213,12 @@ impl From for LibWalletError { code: 204, message: format!("{:?}", w), }, + WalletError::TransactionServiceError(TransactionServiceError::OutputManagerError( + OutputManagerError::BuildError(ref s), + )) if s == &"Fee is greater than amount".to_string() => Self { + code: 212, + message: format!("{:?}", w), + }, WalletError::TransactionServiceError(TransactionServiceError::OutputManagerError(_)) => Self { code: 206, message: format!("{:?}", w), @@ -229,6 +235,7 @@ impl From for LibWalletError { code: 211, message: format!("{:?}", w), }, + // Comms Stack errors WalletError::MultiaddrError(_) => Self { code: 301, From cc41f36b01a42a6f8d48b02d0ed6fe73c99f061d Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Fri, 4 Feb 2022 09:17:33 +0200 Subject: [PATCH 09/20] fix(core): reduce one block behind waiting period (#3798) Description --- Reduces the time taken to switch to sync when one block behind (was 60s, now 20s) Motivation and Context --- With the improvement of block propagation, the time between detecting that a peer/s has progressed and receiving a new block is reduced, so it follows the "one block behind" waiting period should be reduced. How Has This Been Tested? --- Manually, minor change --- .../src/base_node/state_machine_service/states/listening.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/base_layer/core/src/base_node/state_machine_service/states/listening.rs b/base_layer/core/src/base_node/state_machine_service/states/listening.rs index e319f6d7c8..abb86c322b 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/listening.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/listening.rs @@ -55,6 +55,9 @@ use crate::{ const LOG_TARGET: &str = "c::bn::state_machine_service::states::listening"; +/// The length of time to wait for a propagated block when one block behind before proceeding to sync +const ONE_BLOCK_BEHIND_WAIT_PERIOD: Duration = Duration::from_secs(20); + /// This struct contains the info of the peer, and is used to serialised and deserialised. #[derive(Serialize, Deserialize)] pub struct PeerMetadata { @@ -195,7 +198,7 @@ impl Listening { if self.is_synced && best_metadata.height_of_longest_chain() == local.height_of_longest_chain() + 1 && time_since_better_block - .map(|ts: Instant| ts.elapsed() < Duration::from_secs(60)) + .map(|ts: Instant| ts.elapsed() < ONE_BLOCK_BEHIND_WAIT_PERIOD) .unwrap_or(true) { if time_since_better_block.is_none() { From b3cc6f27359ad33fc1c3fdf49d00478f8e27994f Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Fri, 4 Feb 2022 15:33:09 +0200 Subject: [PATCH 10/20] fix(core)!: fix potential panic for sidechain merkle root with incorrect length (#3788) Description --- Fixes potential panic if a side-chain Merkle root of incorrect length is accepted by the blockchain. Motivation and Context --- Using a fixed 32-byte array forces any implementor to produce or validate the correct number of bytes. Esp. now that const generics are available, for performance and correctness, all hashes in the system should be fixed to 32 bytes and not held in a dynamic heap allocated vector. I've changed the merkle_root type in SideChainCheckpoint because this is a relatively minor change. This is a chain storage breaking change. How Has This Been Tested? --- Existing tests, manually --- Cargo.lock | 1 + .../src/conversions/output_features.rs | 11 ++++--- .../src/automation/commands.rs | 31 ++++++++---------- .../src/grpc/wallet_grpc_server.rs | 19 +++++++---- base_layer/common_types/Cargo.toml | 10 +++--- base_layer/common_types/src/array.rs | 32 +++++++++++++++++++ base_layer/common_types/src/lib.rs | 1 + base_layer/common_types/src/types/mod.rs | 2 ++ base_layer/core/src/proto/transaction.rs | 11 +++++-- .../transaction/output_features.rs | 6 ++-- .../side_chain_checkpoint_features.rs | 29 ++++------------- base_layer/wallet/src/assets/asset_manager.rs | 6 ++-- .../wallet/src/assets/asset_manager_handle.rs | 10 +++--- .../wallet/src/assets/infrastructure/mod.rs | 6 ++-- 14 files changed, 103 insertions(+), 72 deletions(-) create mode 100644 base_layer/common_types/src/array.rs diff --git a/Cargo.lock b/Cargo.lock index 8980b3efc3..b98ebe082e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6540,6 +6540,7 @@ dependencies = [ "rand 0.8.4", "serde 1.0.135", "tari_crypto", + "tari_utilities", "thiserror", "tokio 1.15.0", ] diff --git a/applications/tari_app_grpc/src/conversions/output_features.rs b/applications/tari_app_grpc/src/conversions/output_features.rs index fb6fffb921..969f62374d 100644 --- a/applications/tari_app_grpc/src/conversions/output_features.rs +++ b/applications/tari_app_grpc/src/conversions/output_features.rs @@ -22,7 +22,10 @@ use std::convert::{TryFrom, TryInto}; -use tari_common_types::types::{Commitment, PublicKey}; +use tari_common_types::{ + array::copy_into_fixed_array, + types::{Commitment, PublicKey}, +}; use tari_core::transactions::transaction::{ AssetOutputFeatures, MintNonFungibleFeatures, @@ -176,10 +179,8 @@ impl TryFrom for SideChainCheckpointFeatures PublicKey::from_bytes(c).map_err(|err| format!("committee member was not a valid public key: {}", err)) }) .collect::>()?; + let merkle_root = copy_into_fixed_array(&value.merkle_root).map_err(|_| "Invalid merkle_root length")?; - Ok(Self { - merkle_root: value.merkle_root.as_bytes().to_vec(), - committee, - }) + Ok(Self { merkle_root, committee }) } } diff --git a/applications/tari_console_wallet/src/automation/commands.rs b/applications/tari_console_wallet/src/automation/commands.rs index 2b05233ded..7404be6eaa 100644 --- a/applications/tari_console_wallet/src/automation/commands.rs +++ b/applications/tari_console_wallet/src/automation/commands.rs @@ -35,7 +35,7 @@ use log::*; use sha2::Sha256; use strum_macros::{Display, EnumIter, EnumString}; use tari_common::GlobalConfig; -use tari_common_types::{emoji::EmojiId, transaction::TxId, types::PublicKey}; +use tari_common_types::{array::copy_into_fixed_array, emoji::EmojiId, transaction::TxId, types::PublicKey}; use tari_comms::{ connectivity::{ConnectivityEvent, ConnectivityRequester}, multiaddr::Multiaddr, @@ -818,18 +818,17 @@ pub async fn command_runner( _ => Err(CommandError::Argument), }?; - let unique_ids: Vec> = parsed.args[1..] + let unique_ids = parsed.args[1..] .iter() .map(|arg| { let s = arg.to_string(); if let Some(s) = s.strip_prefix("0x") { - let r: Vec = Hex::from_hex(s).unwrap(); - r + Hex::from_hex(s).map_err(|_| CommandError::Argument) } else { - s.into_bytes() + Ok(s.into_bytes()) } }) - .collect(); + .collect::>, _>>()?; let mut asset_manager = wallet.asset_manager.clone(); let asset = asset_manager.get_owned_asset_by_pub_key(&public_key).await?; @@ -856,18 +855,14 @@ pub async fn command_runner( let merkle_root = match parsed.args[1] { ParsedArgument::Text(ref root) => { - let s = root.to_string(); - match &s[0..2] { - "0x" => { - let s = s[2..].to_string(); - let r: Vec = Hex::from_hex(&s).unwrap(); - Ok(r) - }, - _ => Ok(s.into_bytes()), - } + let bytes = match &root[0..2] { + "0x" => Vec::::from_hex(&root[2..]).map_err(|_| CommandError::Argument)?, + _ => Vec::::from_hex(root).map_err(|_| CommandError::Argument)?, + }; + copy_into_fixed_array(&bytes).map_err(|_| CommandError::Argument)? }, - _ => Err(CommandError::Argument), - }?; + _ => return Err(CommandError::Argument), + }; let committee: Vec = parsed.args[2..] .iter() @@ -879,7 +874,7 @@ pub async fn command_runner( let mut asset_manager = wallet.asset_manager.clone(); let (tx_id, transaction) = asset_manager - .create_initial_asset_checkpoint(&public_key, &merkle_root, &committee) + .create_initial_asset_checkpoint(&public_key, merkle_root, &committee) .await?; let _result = transaction_service .submit_transaction( diff --git a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs index f8faa5c431..4cc1bf39f3 100644 --- a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs +++ b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs @@ -71,7 +71,10 @@ use tari_app_grpc::{ TransferResult, }, }; -use tari_common_types::types::{BlockHash, PublicKey, Signature}; +use tari_common_types::{ + array::copy_into_fixed_array, + types::{BlockHash, PublicKey, Signature}, +}; use tari_comms::{types::CommsPublicKey, CommsNode}; use tari_core::transactions::{ tari_amount::MicroTari, @@ -655,12 +658,11 @@ impl wallet_server::Wallet for WalletGrpcServer { .collect::>() .map_err(|err| Status::invalid_argument(format!("Committee did not contain valid pub keys:{}", err)))?; + let merkle_root = copy_into_fixed_array(&message.merkle_root) + .map_err(|_| Status::invalid_argument("Merkle root has an incorrect length"))?; + let (tx_id, transaction) = asset_manager - .create_initial_asset_checkpoint( - &asset_public_key, - message.merkle_root.as_slice(), - committee_public_keys.as_slice(), - ) + .create_initial_asset_checkpoint(&asset_public_key, merkle_root, committee_public_keys.as_slice()) .await .map_err(|e| Status::internal(e.to_string()))?; @@ -691,11 +693,14 @@ impl wallet_server::Wallet for WalletGrpcServer { Status::invalid_argument(format!("Next committee did not contain valid pub keys:{}", err)) })?; + let merkle_root = copy_into_fixed_array(&message.merkle_root) + .map_err(|_| Status::invalid_argument("Incorrect merkle root length"))?; + let (tx_id, transaction) = asset_manager .create_follow_on_asset_checkpoint( &asset_public_key, message.unique_id.as_slice(), - message.merkle_root.as_slice(), + merkle_root, committee_public_keys.as_slice(), ) .await diff --git a/base_layer/common_types/Cargo.toml b/base_layer/common_types/Cargo.toml index b0c045a25f..1720efae0d 100644 --- a/base_layer/common_types/Cargo.toml +++ b/base_layer/common_types/Cargo.toml @@ -7,10 +7,12 @@ version = "0.27.3" edition = "2018" [dependencies] -rand = "0.8" tari_crypto = { git = "https://github.com/tari-project/tari-crypto.git", branch = "main" } -serde = { version = "1.0.106", features = ["derive"] } -tokio = { version = "1.11", features = ["time", "sync"] } -lazy_static = "1.4.0" +tari_utilities = "^0.3" + digest = "0.9.0" +lazy_static = "1.4.0" +rand = "0.8" +serde = { version = "1.0.106", features = ["derive"] } thiserror = "1.0.29" +tokio = { version = "1.11", features = ["time", "sync"] } diff --git a/base_layer/common_types/src/array.rs b/base_layer/common_types/src/array.rs new file mode 100644 index 0000000000..ed2fcbd47a --- /dev/null +++ b/base_layer/common_types/src/array.rs @@ -0,0 +1,32 @@ +// Copyright 2022, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use tari_utilities::ByteArrayError; + +pub fn copy_into_fixed_array(elems: &[T]) -> Result<[T; SZ], ByteArrayError> { + if elems.len() != SZ { + return Err(ByteArrayError::IncorrectLength); + } + let mut buf = [T::default(); SZ]; + buf.copy_from_slice(&elems[0..SZ]); + Ok(buf) +} diff --git a/base_layer/common_types/src/lib.rs b/base_layer/common_types/src/lib.rs index 728c50ede6..bafe0c8fda 100644 --- a/base_layer/common_types/src/lib.rs +++ b/base_layer/common_types/src/lib.rs @@ -20,6 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +pub mod array; pub mod chain_metadata; pub mod emoji; pub mod luhn; diff --git a/base_layer/common_types/src/types/mod.rs b/base_layer/common_types/src/types/mod.rs index 8aaf7c47de..21250d71be 100644 --- a/base_layer/common_types/src/types/mod.rs +++ b/base_layer/common_types/src/types/mod.rs @@ -38,6 +38,8 @@ use tari_crypto::{ pub const BLOCK_HASH_LENGTH: usize = 32; pub type BlockHash = Vec; +pub type FixedHash = [u8; BLOCK_HASH_LENGTH]; + /// Define the explicit Signature implementation for the Tari base layer. A different signature scheme can be /// employed by redefining this type. pub type Signature = RistrettoSchnorr; diff --git a/base_layer/core/src/proto/transaction.rs b/base_layer/core/src/proto/transaction.rs index a6ac8c1800..b212aa1d9a 100644 --- a/base_layer/core/src/proto/transaction.rs +++ b/base_layer/core/src/proto/transaction.rs @@ -27,7 +27,7 @@ use std::{ sync::Arc, }; -use tari_common_types::types::{BlindingFactor, BulletRangeProof, Commitment, PublicKey}; +use tari_common_types::types::{BlindingFactor, BulletRangeProof, Commitment, PublicKey, BLOCK_HASH_LENGTH}; use tari_crypto::{ script::{ExecutionStack, TariScript}, tari_utilities::{ByteArray, ByteArrayError}, @@ -400,7 +400,14 @@ impl TryFrom for SideChainCheckpointF type Error = String; fn try_from(value: proto::types::SideChainCheckpointFeatures) -> Result { - let merkle_root = value.merkle_root.as_bytes().to_vec(); + if value.merkle_root.len() != BLOCK_HASH_LENGTH { + return Err(format!( + "Invalid side chain checkpoint merkle length {}", + value.merkle_root.len() + )); + } + let mut merkle_root = [0u8; BLOCK_HASH_LENGTH]; + merkle_root.copy_from_slice(&value.merkle_root[0..BLOCK_HASH_LENGTH]); let committee = value .committee .into_iter() diff --git a/base_layer/core/src/transactions/transaction/output_features.rs b/base_layer/core/src/transactions/transaction/output_features.rs index f7f4fc0793..4547151fbb 100644 --- a/base_layer/core/src/transactions/transaction/output_features.rs +++ b/base_layer/core/src/transactions/transaction/output_features.rs @@ -29,7 +29,7 @@ use std::{ }; use serde::{Deserialize, Serialize}; -use tari_common_types::types::{Commitment, PublicKey}; +use tari_common_types::types::{Commitment, FixedHash, PublicKey}; use tari_utilities::ByteArray; use super::OutputFeaturesVersion; @@ -181,7 +181,7 @@ impl OutputFeatures { pub fn for_checkpoint( parent_public_key: PublicKey, unique_id: Vec, - merkle_root: Vec, + merkle_root: FixedHash, committee: Vec, is_initial: bool, ) -> OutputFeatures { @@ -321,7 +321,7 @@ mod test { asset_owner_commitment: Default::default(), }), sidechain_checkpoint: Some(SideChainCheckpointFeatures { - merkle_root: vec![1u8; 32], + merkle_root: [1u8; 32], committee: iter::repeat_with(PublicKey::default).take(50).collect(), }), } diff --git a/base_layer/core/src/transactions/transaction/side_chain_checkpoint_features.rs b/base_layer/core/src/transactions/transaction/side_chain_checkpoint_features.rs index 57c411b170..12e178beca 100644 --- a/base_layer/core/src/transactions/transaction/side_chain_checkpoint_features.rs +++ b/base_layer/core/src/transactions/transaction/side_chain_checkpoint_features.rs @@ -20,28 +20,24 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::io::{Error, ErrorKind, Read, Write}; +use std::io::{Error, Read, Write}; use integer_encoding::VarInt; use serde::{Deserialize, Serialize}; -use tari_common_types::types::PublicKey; +use tari_common_types::types::{FixedHash, PublicKey}; use tari_crypto::keys::PublicKey as PublicKeyTrait; use crate::consensus::{ConsensusDecoding, ConsensusEncoding, ConsensusEncodingSized, MaxSizeVec}; #[derive(Debug, Clone, Hash, PartialEq, Deserialize, Serialize, Eq)] pub struct SideChainCheckpointFeatures { - // TODO: This should be fixed size [u8;32] - pub merkle_root: Vec, + pub merkle_root: FixedHash, pub committee: Vec, } impl ConsensusEncoding for SideChainCheckpointFeatures { fn consensus_encode(&self, writer: &mut W) -> Result { - if self.merkle_root.len() != 32 { - return Err(Error::new(ErrorKind::InvalidInput, "merkle_root must be 32 bytes")); - } - writer.write_all(&self.merkle_root[0..32])?; + self.merkle_root.consensus_encode(writer)?; let mut written = 32; written += self.committee.consensus_encode(writer)?; Ok(written) @@ -56,7 +52,7 @@ impl ConsensusEncodingSized for SideChainCheckpointFeatures { impl ConsensusDecoding for SideChainCheckpointFeatures { fn consensus_decode(reader: &mut R) -> Result { - let merkle_root = <[u8; 32] as ConsensusDecoding>::consensus_decode(reader)?.to_vec(); + let merkle_root = FixedHash::consensus_decode(reader)?; const MAX_COMMITTEE_KEYS: usize = 50; let committee = MaxSizeVec::::consensus_decode(reader)?; @@ -78,7 +74,7 @@ mod test { #[test] fn it_encodes_and_decodes_correctly() { let subject = SideChainCheckpointFeatures { - merkle_root: vec![1u8; 32], + merkle_root: [1u8; 32], committee: iter::repeat_with(PublicKey::default).take(50).collect(), }; @@ -88,22 +84,11 @@ mod test { #[test] fn it_fails_for_too_many_committee_pks() { let subject = SideChainCheckpointFeatures { - merkle_root: vec![1u8; 32], + merkle_root: [1u8; 32], committee: iter::repeat_with(PublicKey::default).take(51).collect(), }; let err = check_consensus_encoding_correctness(subject).unwrap_err(); assert_eq!(err.kind(), ErrorKind::InvalidInput); } - - #[test] - fn it_fails_for_incorrect_merkle_root_length() { - let subject = SideChainCheckpointFeatures { - merkle_root: vec![1u8; 31], - committee: vec![], - }; - - let err = check_consensus_encoding_correctness(subject).unwrap_err(); - assert_eq!(err.kind(), ErrorKind::InvalidInput); - } } diff --git a/base_layer/wallet/src/assets/asset_manager.rs b/base_layer/wallet/src/assets/asset_manager.rs index 3577fcdfe9..711d15b6b6 100644 --- a/base_layer/wallet/src/assets/asset_manager.rs +++ b/base_layer/wallet/src/assets/asset_manager.rs @@ -23,7 +23,7 @@ use log::*; use tari_common_types::{ transaction::TxId, - types::{Commitment, PublicKey}, + types::{Commitment, FixedHash, PublicKey}, }; use tari_core::transactions::transaction::{OutputFeatures, OutputFlags, TemplateParameter, Transaction}; @@ -151,7 +151,7 @@ impl AssetManager { pub async fn create_initial_asset_checkpoint( &mut self, asset_pub_key: PublicKey, - merkle_root: Vec, + merkle_root: FixedHash, committee_pub_keys: Vec, ) -> Result<(TxId, Transaction), WalletError> { let output = self @@ -199,7 +199,7 @@ impl AssetManager { &mut self, asset_pub_key: PublicKey, unique_id: Vec, - merkle_root: Vec, + merkle_root: FixedHash, committee_pub_keys: Vec, ) -> Result<(TxId, Transaction), WalletError> { let output = self diff --git a/base_layer/wallet/src/assets/asset_manager_handle.rs b/base_layer/wallet/src/assets/asset_manager_handle.rs index b5ba5b0edf..904a97b980 100644 --- a/base_layer/wallet/src/assets/asset_manager_handle.rs +++ b/base_layer/wallet/src/assets/asset_manager_handle.rs @@ -22,7 +22,7 @@ use tari_common_types::{ transaction::TxId, - types::{Commitment, PublicKey}, + types::{Commitment, FixedHash, PublicKey}, }; use tari_core::transactions::transaction::{OutputFeatures, TemplateParameter, Transaction}; use tari_service_framework::{reply_channel::SenderService, Service}; @@ -74,14 +74,14 @@ impl AssetManagerHandle { pub async fn create_initial_asset_checkpoint( &mut self, public_key: &PublicKey, - merkle_root: &[u8], + merkle_root: FixedHash, committee_public_keys: &[PublicKey], ) -> Result<(TxId, Transaction), WalletError> { match self .handle .call(AssetManagerRequest::CreateInitialCheckpoint { asset_public_key: Box::new(public_key.clone()), - merkle_root: merkle_root.to_vec(), + merkle_root, committee_public_keys: committee_public_keys.to_vec(), }) .await?? @@ -98,14 +98,14 @@ impl AssetManagerHandle { &mut self, public_key: &PublicKey, unique_id: &[u8], - merkle_root: &[u8], + merkle_root: FixedHash, committee_public_keys: &[PublicKey], ) -> Result<(TxId, Transaction), WalletError> { match self .handle .call(AssetManagerRequest::CreateFollowOnCheckpoint { asset_public_key: Box::new(public_key.clone()), - merkle_root: merkle_root.to_vec(), + merkle_root, unique_id: unique_id.to_vec(), committee_public_keys: committee_public_keys.to_vec(), }) diff --git a/base_layer/wallet/src/assets/infrastructure/mod.rs b/base_layer/wallet/src/assets/infrastructure/mod.rs index 0dea7e2c6f..be43bfeab0 100644 --- a/base_layer/wallet/src/assets/infrastructure/mod.rs +++ b/base_layer/wallet/src/assets/infrastructure/mod.rs @@ -24,7 +24,7 @@ mod asset_manager_service; pub use asset_manager_service::AssetManagerService; use tari_common_types::{ transaction::TxId, - types::{Commitment, PublicKey}, + types::{Commitment, FixedHash, PublicKey}, }; use tari_core::transactions::transaction::{OutputFeatures, TemplateParameter, Transaction}; @@ -53,13 +53,13 @@ pub enum AssetManagerRequest { }, CreateInitialCheckpoint { asset_public_key: Box, - merkle_root: Vec, + merkle_root: FixedHash, committee_public_keys: Vec, }, CreateFollowOnCheckpoint { asset_public_key: Box, unique_id: Vec, - merkle_root: Vec, + merkle_root: FixedHash, committee_public_keys: Vec, }, } From 2f9603b88a8db0064f1783df0b8f18be19a24497 Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Fri, 4 Feb 2022 16:20:56 +0200 Subject: [PATCH 11/20] fix(comms): minor edge-case fix to handle inbound connection while dialing (#3785) Description --- - Notifies the dialler if an inbound connection has come in, so that it can notify the waiting diallers of the new connection Motivation and Context --- Noticed this edge case when a wallet start up and dials a base node, I then quickly manually dial the wallet from the same base node. The bn->wallet connection finishes first but the wallet still is in connecting state. The wallet does not immediately flip into connected state because it is waiting for the dial to finish, however this will also cause a tie-break resolution which may take longer. This PR resolves this by notifying the dialler of all inbound connections so that it can notify any waiting dials of the new connection immediately. How Has This Been Tested? --- Partially by existing unit tests, manually --- comms/src/connection_manager/dialer.rs | 29 ++++++++++++++++++------- comms/src/connection_manager/manager.rs | 7 ++++++ 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/comms/src/connection_manager/dialer.rs b/comms/src/connection_manager/dialer.rs index fd39e41923..c0f6487f5e 100644 --- a/comms/src/connection_manager/dialer.rs +++ b/comms/src/connection_manager/dialer.rs @@ -73,6 +73,7 @@ pub(crate) enum DialerRequest { Option>>, ), CancelPendingDial(NodeId), + NotifyNewInboundConnection(PeerConnection), } pub struct Dialer { @@ -168,13 +169,29 @@ where self.handle_dial_peer_request(pending_dials, peer, reply_tx); }, CancelPendingDial(peer_id) => { - if let Some(mut s) = self.cancel_signals.remove(&peer_id) { - let _ = s.trigger(); + self.cancel_dial(&peer_id); + }, + + NotifyNewInboundConnection(conn) => { + if conn.is_connected() { + self.resolve_pending_dials(conn); } }, } } + fn cancel_dial(&mut self, peer_id: &NodeId) { + if let Some(mut s) = self.cancel_signals.remove(peer_id) { + let _ = s.trigger(); + } + } + + fn resolve_pending_dials(&mut self, conn: PeerConnection) { + let peer = conn.peer_node_id().clone(); + self.reply_to_pending_requests(&peer, Ok(conn)); + self.cancel_dial(&peer); + } + fn is_pending_dial(&self, node_id: &NodeId) -> bool { self.cancel_signals.contains_key(node_id) } @@ -223,12 +240,8 @@ where ); } - if self.pending_dial_requests.contains_key(&node_id) { - self.reply_to_pending_requests(&node_id, dial_result); - } - - // Drop cancel signal - let _ = self.cancel_signals.remove(&node_id); + self.reply_to_pending_requests(&node_id, dial_result); + self.cancel_dial(&node_id); } pub async fn notify_connection_manager(&mut self, event: ConnectionManagerEvent) { diff --git a/comms/src/connection_manager/manager.rs b/comms/src/connection_manager/manager.rs index bf027b2426..a61ae8c745 100644 --- a/comms/src/connection_manager/manager.rs +++ b/comms/src/connection_manager/manager.rs @@ -424,6 +424,13 @@ where }, PeerConnected(conn) => { + if conn.direction().is_inbound() { + // Notify the dialer that we have an inbound connection, so that is can resolve any pending dials. + let _ = self + .dialer_tx + .send(DialerRequest::NotifyNewInboundConnection(conn.clone())) + .await; + } metrics::successful_connections(conn.peer_node_id(), conn.direction()).inc(); self.publish_event(PeerConnected(conn)); }, From 51f2f91e9b2e6289b74cf9148b23335cccea5c40 Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Fri, 4 Feb 2022 17:07:13 +0200 Subject: [PATCH 12/20] feat(collectibles): add delete committee member button (#3786) Description --- Adds a delete icon for committee members in the create screen Motivation and Context --- Allows user to remove an added committee public key How Has This Been Tested? --- Manually --- .../tari_collectibles/web-app/src/App.js | 28 ++-- .../tari_collectibles/web-app/src/Create.js | 157 ++++++++++++------ .../tari_collectibles/web-app/src/Manage.js | 2 +- 3 files changed, 125 insertions(+), 62 deletions(-) diff --git a/applications/tari_collectibles/web-app/src/App.js b/applications/tari_collectibles/web-app/src/App.js index c11e37b3ce..e10288d406 100644 --- a/applications/tari_collectibles/web-app/src/App.js +++ b/applications/tari_collectibles/web-app/src/App.js @@ -30,6 +30,7 @@ import { } from "react-router-dom"; import { createTheme } from "@mui/material/styles"; import { + Alert, Box, CssBaseline, Divider, @@ -89,8 +90,8 @@ function IconButtonLink(props) { IconButtonLink.propTypes = { icon: PropTypes.element.isRequired, - to:PropTypes.string.isRequired -} + to: PropTypes.string.isRequired, +}; function ListItemLink(props) { const { icon, primary, to } = props; @@ -164,34 +165,37 @@ const AccountsMenu = (props) => { component="div" disableGutters={true} secondaryAction={ - } - to="/accounts/new" - > + } to="/accounts/new" /> } > My Assets - {error ? {error} : ""} {accounts.map((item) => { return ( - + /> ); })} + {error ? ( + setError(null)}> + {error} + + ) : ( + "" + )} ); }; - AccountsMenu.propTypes = { - walletId: PropTypes.string -} + walletId: PropTypes.string, +}; // only allow access to a Protected Route if the wallet is unlocked const ProtectedRoute = ({ authenticated, path, children }) => { diff --git a/applications/tari_collectibles/web-app/src/Create.js b/applications/tari_collectibles/web-app/src/Create.js index 9f3d8845d9..de95adf2da 100644 --- a/applications/tari_collectibles/web-app/src/Create.js +++ b/applications/tari_collectibles/web-app/src/Create.js @@ -29,12 +29,14 @@ import { FormGroup, List, ListItem, + ListItemIcon, ListItemText, Stack, Switch, TextField, Typography, } from "@mui/material"; +import { DeleteForever } from "@mui/icons-material"; import binding from "./binding"; import { withRouter } from "react-router-dom"; import { appWindow } from "@tauri-apps/api/window"; @@ -70,6 +72,7 @@ class Create extends React.Component { committee: [], }, newCommitteePubKey: "", + committeeEditorError: null, isValid: false, saveErrors: [], }; @@ -77,7 +80,6 @@ class Create extends React.Component { this.cleanup = null; } - componentDidMount() { this.cleanup = appWindow.listen("tauri://file-drop", (obj) => this.dropFile(obj) @@ -217,14 +219,28 @@ class Create extends React.Component { }; onAddCommitteeMember = () => { + let pubKey = this.state.newCommitteePubKey; + if (!pubKey) return; + pubKey = pubKey.trim(); + if (!pubKey) return; + if (this.state.tip003Data.committee.includes(pubKey)) { + this.setState({ committeeEditorError: "Public key already added!" }); + return; + } + let committee = [...this.state.tip003Data.committee]; - committee.push(this.state.newCommitteePubKey); - let tip003Data = { ...this.state.tip003Data, ...{ committee: committee } }; + committee.push(pubKey); + let tip003Data = { + ...this.state.tip003Data, + ...{ committee: committee }, + }; console.log(committee); + this.setState({ tip003Data, saveErrors: [], newCommitteePubKey: "", + committeeEditorError: null, }); }; @@ -376,7 +392,7 @@ class Create extends React.Component { value={this.state.publicKey} disabled style={{ "-webkit-text-fill-color": "#ddd" }} - > + /> + /> + />

Image

this.onTip002DataChanged("symbol", e)} disabled={this.state.isSaving || !this.state.tip002} - > + /> this.onTip002DataChanged("totalSupply", e)} disabled={this.state.isSaving || !this.state.tip002} - > + /> this.onTip002DataChanged("decimals", e)} disabled={this.state.isSaving || !this.state.tip002} - > + /> - - - {this.state.tip003Data.committee.map((item, index) => { - return ( - - - - ); - })} - - - - + 0 ? (
{this.state.saveErrors.map((e) => ( - {e.toString()} + + {e.toString()} + ))}
) : ( @@ -526,8 +529,8 @@ class Create extends React.Component { } Create.propTypes = { - history : PropTypes.object -} + history: PropTypes.object, +}; const ImageSwitch = ({ setMode }) => { return ( @@ -539,8 +542,8 @@ const ImageSwitch = ({ setMode }) => { }; ImageSwitch.propTypes = { - setMode: PropTypes.func -} + setMode: PropTypes.func, +}; const ImageUrl = ({ setImage }) => { const [url, setUrl] = useState(""); @@ -555,15 +558,15 @@ const ImageUrl = ({ setImage }) => { color="primary" value={url} onChange={(e) => setUrl(e.target.value)} - > + /> ); }; ImageUrl.propTypes = { - setImage : PropTypes.func -} + setImage: PropTypes.func, +}; const ImageUpload = ({ selectFile, error }) => { return ( @@ -577,9 +580,9 @@ const ImageUpload = ({ selectFile, error }) => { }; ImageUpload.propTypes = { - selectFile : PropTypes.func, - error: PropTypes.string -} + selectFile: PropTypes.func, + error: PropTypes.string, +}; const ImageSelector = ({ cid, image, selectFile, setImage, setCid, error }) => { const [mode, setMode] = useState(""); @@ -614,13 +617,13 @@ const ImageSelector = ({ cid, image, selectFile, setImage, setCid, error }) => { }; ImageSelector.propTypes = { - cid : PropTypes.string, + cid: PropTypes.string, image: PropTypes.string, selectFile: PropTypes.func, setImage: PropTypes.func, setCid: PropTypes.func, - error: PropTypes.string -} + error: PropTypes.string, +}; const IpfsImage = ({ cid, setCid, error }) => { const [src, setSrc] = useState(""); @@ -670,7 +673,63 @@ const IpfsImage = ({ cid, setCid, error }) => { IpfsImage.propTypes = { cid: PropTypes.string, setCid: PropTypes.func, - error: PropTypes.string -} + error: PropTypes.string, +}; + +const CommitteeEditor = ({ + members, + disabled, + onAddCommitteeMember, + onDeleteCommitteeMember, + onNewCommitteePubKeyChanged, + newCommitteePubKey, + error, +}) => { + return ( + + + {members.map((item, index) => { + return ( + + onDeleteCommitteeMember && onDeleteCommitteeMember(index) + } + disabled={disabled} + > + + + + + + ); + })} + + + + {error ? {error} : } + + ); +}; + +CommitteeEditor.propTypes = { + members: PropTypes.array.isRequired, + disabled: PropTypes.bool, + onAddCommitteeMember: PropTypes.func, + onDeleteCommitteeMember: PropTypes.func, + onNewCommitteePubKeyChanged: PropTypes.func, + newCommitteePubKey: PropTypes.string, + error: PropTypes.string, +}; export default withRouter(Create); diff --git a/applications/tari_collectibles/web-app/src/Manage.js b/applications/tari_collectibles/web-app/src/Manage.js index c5e377a7b2..49ea47d288 100644 --- a/applications/tari_collectibles/web-app/src/Manage.js +++ b/applications/tari_collectibles/web-app/src/Manage.js @@ -88,7 +88,7 @@ class Manage extends React.Component { sx={{ pb: "5%", height: "20vw", width: "20vw" }} image={asset.image} alt="random" - > + /> From 6f3842066cdd5809430544611e857714673d1488 Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Fri, 4 Feb 2022 17:53:27 +0200 Subject: [PATCH 13/20] refactor: outbound message pipeline, threads and mempool improvements (#3792) Description --- - Use bounded executor for outbound pipeline - Release chain storage read lock earlier in transaction validator - Reinstate blocking threads for mempool - Make some non-error comms logs messages look less like errors Motivation and Context --- - If the outbound pipeline is flooded (>100 messages), back-pressure will apply to callers - Release the read lock on the blockchain db before performing potentially multi-second transaction validations - For large transactions, mempool operations can hold up a tokio worker for multiple seconds How Has This Been Tested? --- Existing tests updated as needed, Manually (ping-peer, mini stress test and pruned sync) --- applications/tari_base_node/src/bootstrap.rs | 3 +- .../tari_base_node/src/command_handler.rs | 2 +- .../tari_console_wallet/src/init/mod.rs | 5 +- applications/tari_validator_node/src/comms.rs | 3 +- .../comms_interface/inbound_handlers.rs | 8 +- .../horizon_state_synchronization.rs | 154 +++++++++-------- base_layer/core/src/blocks/block_header.rs | 2 +- base_layer/core/src/chain_storage/async_db.rs | 2 +- .../src/chain_storage/blockchain_database.rs | 17 +- base_layer/core/src/mempool/error.rs | 5 + base_layer/core/src/mempool/mempool.rs | 89 ++++++---- .../core/src/mempool/mempool_storage.rs | 22 +-- .../src/mempool/service/inbound_handlers.rs | 21 +-- .../core/src/mempool/sync_protocol/mod.rs | 12 +- .../core/src/mempool/sync_protocol/test.rs | 13 +- .../unconfirmed_pool/unconfirmed_pool.rs | 14 +- .../src/validation/transaction_validators.rs | 16 +- base_layer/core/tests/mempool.rs | 161 +++++++++++++----- base_layer/p2p/src/initialization.rs | 4 + base_layer/wallet/tests/wallet.rs | 8 +- base_layer/wallet_ffi/src/lib.rs | 5 +- comms/src/bounded_executor.rs | 16 ++ comms/src/multiplexing/yamux.rs | 14 +- comms/src/pipeline/builder.rs | 11 ++ comms/src/pipeline/inbound.rs | 15 +- comms/src/pipeline/outbound.rs | 51 ++++-- comms/src/protocol/messaging/extension.rs | 6 +- comms/src/protocol/rpc/client/mod.rs | 12 +- 28 files changed, 461 insertions(+), 230 deletions(-) diff --git a/applications/tari_base_node/src/bootstrap.rs b/applications/tari_base_node/src/bootstrap.rs index d0a9bb0395..e86fbe0d7d 100644 --- a/applications/tari_base_node/src/bootstrap.rs +++ b/applications/tari_base_node/src/bootstrap.rs @@ -265,7 +265,8 @@ where B: BlockchainBackend + 'static auxilary_tcp_listener_address: self.config.auxilary_tcp_listener_address.clone(), datastore_path: self.config.peer_db_path.clone(), peer_database_name: "peers".to_string(), - max_concurrent_inbound_tasks: 100, + max_concurrent_inbound_tasks: 50, + max_concurrent_outbound_tasks: 100, outbound_buffer_size: 100, dht: DhtConfig { database_url: DbConnectionUrl::File(self.config.data_dir.join("dht.db")), diff --git a/applications/tari_base_node/src/command_handler.rs b/applications/tari_base_node/src/command_handler.rs index e6d8fd76f0..f1ac3905a0 100644 --- a/applications/tari_base_node/src/command_handler.rs +++ b/applications/tari_base_node/src/command_handler.rs @@ -183,7 +183,7 @@ impl CommandHandler { status_line.add_field( "Rpc", format!( - "{}/{} sessions", + "{}/{}", num_active_rpc_sessions, config .rpc_max_simultaneous_sessions diff --git a/applications/tari_console_wallet/src/init/mod.rs b/applications/tari_console_wallet/src/init/mod.rs index 19f7fe7e79..96644b0f04 100644 --- a/applications/tari_console_wallet/src/init/mod.rs +++ b/applications/tari_console_wallet/src/init/mod.rs @@ -374,8 +374,9 @@ pub async fn init_wallet( auxilary_tcp_listener_address: None, datastore_path: config.console_wallet_peer_db_path.clone(), peer_database_name: "peers".to_string(), - max_concurrent_inbound_tasks: 100, - outbound_buffer_size: 100, + max_concurrent_inbound_tasks: 10, + max_concurrent_outbound_tasks: 10, + outbound_buffer_size: 10, dht: DhtConfig { database_url: DbConnectionUrl::File(config.data_dir.join("dht-console-wallet.db")), auto_join: true, diff --git a/applications/tari_validator_node/src/comms.rs b/applications/tari_validator_node/src/comms.rs index 45f748d57d..4211d5a05a 100644 --- a/applications/tari_validator_node/src/comms.rs +++ b/applications/tari_validator_node/src/comms.rs @@ -133,7 +133,8 @@ fn create_comms_config(config: &GlobalConfig, node_identity: Arc) transport_type: create_transport_type(config), datastore_path: config.peer_db_path.clone(), peer_database_name: "peers".to_string(), - max_concurrent_inbound_tasks: 100, + max_concurrent_inbound_tasks: 50, + max_concurrent_outbound_tasks: 100, outbound_buffer_size: 100, dht: DhtConfig { database_url: DbConnectionUrl::File(config.data_dir.join("dht.db")), diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index 0e14200a9d..c800e6385a 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -479,7 +479,7 @@ where B: BlockchainBackend + 'static }) }, NodeCommsRequest::FetchMempoolTransactionsByExcessSigs { excess_sigs } => { - let (transactions, not_found) = self.mempool.retrieve_by_excess_sigs(&excess_sigs).await; + let (transactions, not_found) = self.mempool.retrieve_by_excess_sigs(excess_sigs).await?; Ok(NodeCommsResponse::FetchMempoolTransactionsByExcessSigsResponse( FetchMempoolTransactionsResponse { transactions, @@ -553,7 +553,7 @@ where B: BlockchainBackend + 'static kernel_excess_sigs: excess_sigs, } = new_block; - let (known_transactions, missing_excess_sigs) = self.mempool.retrieve_by_excess_sigs(&excess_sigs).await; + let (known_transactions, missing_excess_sigs) = self.mempool.retrieve_by_excess_sigs(excess_sigs).await?; let known_transactions = known_transactions.into_iter().map(|tx| (*tx).clone()).collect(); metrics::compact_block_tx_misses(header.height).set(missing_excess_sigs.len() as i64); @@ -587,7 +587,7 @@ where B: BlockchainBackend + 'static // Add returned transactions to unconfirmed pool if !transactions.is_empty() { - self.mempool.insert_all(&transactions).await?; + self.mempool.insert_all(transactions.clone()).await?; } if !not_found.is_empty() { @@ -708,8 +708,6 @@ where B: BlockchainBackend + 'static BlockAddResult::ChainReorg { .. } => true, }; - self.blockchain_db.cleanup_orphans().await?; - self.update_block_result_metrics(&block_add_result); self.publish_block_event(BlockEvent::ValidBlockAdded(block.clone(), block_add_result)); diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs index 80937f4d9b..50800cd876 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs @@ -49,7 +49,14 @@ use crate::{ sync::{rpc, SyncPeer}, }, blocks::{BlockHeader, ChainHeader, UpdateBlockAccumulatedData}, - chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainStorageError, MmrTree, PrunedOutput}, + chain_storage::{ + async_db::AsyncBlockchainDb, + BlockchainBackend, + ChainStorageError, + DbTransaction, + MmrTree, + PrunedOutput, + }, proto::base_node::{ sync_utxo as proto_sync_utxo, sync_utxos_response::UtxoOrDeleted, @@ -697,84 +704,89 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let mut prev_mmr = 0; let mut prev_kernel_mmr = 0; + let height = header.height(); let bitmap = self.take_final_bitmap(); - let mut txn = self.db().write_transaction(); - let mut utxo_mmr_position = 0; - let mut prune_positions = vec![]; - - for h in 0..=header.height() { - let curr_header = self.db().fetch_chain_header(h).await?; - - trace!( - target: LOG_TARGET, - "Fetching utxos from db: height:{}, header.output_mmr:{}, prev_mmr:{}, end:{}", - curr_header.height(), - curr_header.header().output_mmr_size, - prev_mmr, - curr_header.header().output_mmr_size - 1 - ); - let (utxos, _) = self.db().fetch_utxos_in_block(curr_header.hash().clone(), None).await?; - trace!( - target: LOG_TARGET, - "Fetching kernels from db: height:{}, header.kernel_mmr:{}, prev_mmr:{}, end:{}", - curr_header.height(), - curr_header.header().kernel_mmr_size, - prev_kernel_mmr, - curr_header.header().kernel_mmr_size - 1 - ); - - trace!(target: LOG_TARGET, "Number of utxos returned: {}", utxos.len()); - let mut prune_counter = 0; - for u in utxos { - match u { - PrunedOutput::NotPruned { output } => { - if bitmap.contains(utxo_mmr_position) { - debug!( - target: LOG_TARGET, - "Found output that needs pruning at height: {} position: {}", h, utxo_mmr_position - ); - prune_positions.push(utxo_mmr_position); - prune_counter += 1; - } else { - pruned_utxo_sum = &output.commitment + &pruned_utxo_sum; - } - }, - _ => { - prune_counter += 1; - }, - } - utxo_mmr_position += 1; - } - if prune_counter > 0 { - trace!(target: LOG_TARGET, "Pruned {} outputs", prune_counter); - } - prev_mmr = curr_header.header().output_mmr_size; + let db = self.db().inner().clone(); + task::spawn_blocking(move || { + let mut txn = DbTransaction::new(); + let mut utxo_mmr_position = 0; + let mut prune_positions = vec![]; - let kernels = self.db().fetch_kernels_in_block(curr_header.hash().clone()).await?; - trace!(target: LOG_TARGET, "Number of kernels returned: {}", kernels.len()); - for k in kernels { - pruned_kernel_sum = &k.excess + &pruned_kernel_sum; - } - prev_kernel_mmr = curr_header.header().kernel_mmr_size; + for h in 0..=height { + let curr_header = db.fetch_chain_header(h)?; - if h % 1000 == 0 { - debug!( + trace!( + target: LOG_TARGET, + "Fetching utxos from db: height:{}, header.output_mmr:{}, prev_mmr:{}, end:{}", + curr_header.height(), + curr_header.header().output_mmr_size, + prev_mmr, + curr_header.header().output_mmr_size - 1 + ); + let (utxos, _) = db.fetch_utxos_in_block(curr_header.hash().clone(), None)?; + trace!( target: LOG_TARGET, - "Final Validation: {:.2}% complete. Height: {}, mmr_position: {} ", - (h as f32 / header.height() as f32) * 100.0, - h, - utxo_mmr_position, + "Fetching kernels from db: height:{}, header.kernel_mmr:{}, prev_mmr:{}, end:{}", + curr_header.height(), + curr_header.header().kernel_mmr_size, + prev_kernel_mmr, + curr_header.header().kernel_mmr_size - 1 ); + + trace!(target: LOG_TARGET, "Number of utxos returned: {}", utxos.len()); + let mut prune_counter = 0; + for u in utxos { + match u { + PrunedOutput::NotPruned { output } => { + if bitmap.contains(utxo_mmr_position) { + debug!( + target: LOG_TARGET, + "Found output that needs pruning at height: {} position: {}", h, utxo_mmr_position + ); + prune_positions.push(utxo_mmr_position); + prune_counter += 1; + } else { + pruned_utxo_sum = &output.commitment + &pruned_utxo_sum; + } + }, + _ => { + prune_counter += 1; + }, + } + utxo_mmr_position += 1; + } + if prune_counter > 0 { + trace!(target: LOG_TARGET, "Pruned {} outputs", prune_counter); + } + prev_mmr = curr_header.header().output_mmr_size; + + let kernels = db.fetch_kernels_in_block(curr_header.hash().clone())?; + trace!(target: LOG_TARGET, "Number of kernels returned: {}", kernels.len()); + for k in kernels { + pruned_kernel_sum = &k.excess + &pruned_kernel_sum; + } + prev_kernel_mmr = curr_header.header().kernel_mmr_size; + + if h % 1000 == 0 { + debug!( + target: LOG_TARGET, + "Final Validation: {:.2}% complete. Height: {}, mmr_position: {} ", + (h as f32 / height as f32) * 100.0, + h, + utxo_mmr_position, + ); + } } - } - if !prune_positions.is_empty() { - debug!(target: LOG_TARGET, "Pruning {} spent outputs", prune_positions.len()); - txn.prune_output_at_positions(prune_positions); - txn.commit().await?; - } + if !prune_positions.is_empty() { + debug!(target: LOG_TARGET, "Pruning {} spent outputs", prune_positions.len()); + txn.prune_outputs_at_positions(prune_positions); + db.write(txn)?; + } - Ok((pruned_utxo_sum, pruned_kernel_sum)) + Ok((pruned_utxo_sum, pruned_kernel_sum)) + }) + .await? } #[inline] diff --git a/base_layer/core/src/blocks/block_header.rs b/base_layer/core/src/blocks/block_header.rs index 815a36f547..1a15a8666a 100644 --- a/base_layer/core/src/blocks/block_header.rs +++ b/base_layer/core/src/blocks/block_header.rs @@ -273,7 +273,7 @@ impl Display for BlockHeader { )?; writeln!( fmt, - "Merkle roots:\nInputs: {},\n Outputs: {} ({})\nWitness: {}\nKernels: {} ({})\n", + "Merkle roots:\nInputs: {},\nOutputs: {} ({})\nWitness: {}\nKernels: {} ({})\n", self.input_mr.to_hex(), self.output_mr.to_hex(), self.output_mmr_size, diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index 6fc32f5955..621ef100b6 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -387,7 +387,7 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { self } - pub fn prune_output_at_positions(&mut self, positions: Vec) -> &mut Self { + pub fn prune_outputs_at_positions(&mut self, positions: Vec) -> &mut Self { self.transaction.prune_outputs_at_positions(positions); self } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index d5f68a7df5..15bbafbcd2 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -888,13 +888,20 @@ where B: BlockchainBackend ); return Err(e.into()); } - trace!( target: LOG_TARGET, - "[add_block] acquired write access db lock for block #{} ", - &new_height + "[add_block] waiting for write access to add block block #{}", + new_height ); + let timer = Instant::now(); let mut db = self.db_write_access()?; + + trace!( + target: LOG_TARGET, + "[add_block] acquired write access db lock for block #{} in {:.2?}", + new_height, + timer.elapsed() + ); let block_add_result = add_block( &mut *db, &self.config, @@ -915,6 +922,10 @@ where B: BlockchainBackend prune_database_if_needed(&mut *db, self.config.pruning_horizon, self.config.pruning_interval)?; } + if let Err(e) = cleanup_orphans(&mut *db, self.config.orphan_storage_capacity) { + warn!(target: LOG_TARGET, "Failed to clean up orphans: {}", e); + } + debug!( target: LOG_TARGET, "Candidate block `add_block` result: {}", block_add_result diff --git a/base_layer/core/src/mempool/error.rs b/base_layer/core/src/mempool/error.rs index 953b20b8af..f0d09db1d7 100644 --- a/base_layer/core/src/mempool/error.rs +++ b/base_layer/core/src/mempool/error.rs @@ -22,6 +22,7 @@ use tari_service_framework::reply_channel::TransportChannelError; use thiserror::Error; +use tokio::task::JoinError; use crate::{mempool::unconfirmed_pool::UnconfirmedPoolError, transactions::transaction::TransactionError}; @@ -35,4 +36,8 @@ pub enum MempoolError { TransportChannelError(#[from] TransportChannelError), #[error("The transaction did not contain any kernels")] TransactionNoKernels, + #[error("Mempool lock poisoned. This indicates that the mempool has panicked while holding a RwLockGuard.")] + RwLockPoisonError, + #[error(transparent)] + BlockingTaskError(#[from] JoinError), } diff --git a/base_layer/core/src/mempool/mempool.rs b/base_layer/core/src/mempool/mempool.rs index 25d597789e..079aa8e21a 100644 --- a/base_layer/core/src/mempool/mempool.rs +++ b/base_layer/core/src/mempool/mempool.rs @@ -20,10 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::sync::Arc; +use std::sync::{Arc, RwLock}; use tari_common_types::types::{PrivateKey, Signature}; -use tokio::sync::RwLock; +use tokio::task; use crate::{ blocks::Block, @@ -62,22 +62,25 @@ impl Mempool { /// Insert an unconfirmed transaction into the Mempool. pub async fn insert(&self, tx: Arc) -> Result { - self.pool_storage.write().await.insert(tx) + self.do_write_task(|storage| storage.insert(tx)).await } /// Inserts all transactions into the mempool. - pub async fn insert_all(&self, transactions: &[Arc]) -> Result<(), MempoolError> { - let mut mempool = self.pool_storage.write().await; - for tx in transactions { - mempool.insert(tx.clone())?; - } - - Ok(()) + pub async fn insert_all(&self, transactions: Vec>) -> Result<(), MempoolError> { + self.do_write_task(|storage| { + for tx in transactions { + storage.insert(tx)?; + } + + Ok(()) + }) + .await } /// Update the Mempool based on the received published block. - pub async fn process_published_block(&self, published_block: &Block) -> Result<(), MempoolError> { - self.pool_storage.write().await.process_published_block(published_block) + pub async fn process_published_block(&self, published_block: Arc) -> Result<(), MempoolError> { + self.do_write_task(move |storage| storage.process_published_block(&published_block)) + .await } /// In the event of a ReOrg, resubmit all ReOrged transactions into the Mempool and process each newly introduced @@ -87,48 +90,74 @@ impl Mempool { removed_blocks: Vec>, new_blocks: Vec>, ) -> Result<(), MempoolError> { - self.pool_storage - .write() + self.do_write_task(move |storage| storage.process_reorg(&removed_blocks, &new_blocks)) .await - .process_reorg(&removed_blocks, &new_blocks) } /// Returns all unconfirmed transaction stored in the Mempool, except the transactions stored in the ReOrgPool. - // TODO: Investigate returning an iterator rather than a large vector of transactions - pub async fn snapshot(&self) -> Vec> { - self.pool_storage.read().await.snapshot() + pub async fn snapshot(&self) -> Result>, MempoolError> { + self.do_read_task(|storage| Ok(storage.snapshot())).await } /// Returns a list of transaction ranked by transaction priority up to a given weight. /// Only transactions that fit into a block will be returned pub async fn retrieve(&self, total_weight: u64) -> Result>, MempoolError> { - self.pool_storage.write().await.retrieve(total_weight) + self.do_write_task(move |storage| storage.retrieve_and_revalidate(total_weight)) + .await } pub async fn retrieve_by_excess_sigs( &self, - excess_sigs: &[PrivateKey], - ) -> (Vec>, Vec) { - self.pool_storage.read().await.retrieve_by_excess_sigs(excess_sigs) + excess_sigs: Vec, + ) -> Result<(Vec>, Vec), MempoolError> { + self.do_read_task(move |storage| Ok(storage.retrieve_by_excess_sigs(&excess_sigs))) + .await } /// Check if the specified excess signature is found in the Mempool. - pub async fn has_tx_with_excess_sig(&self, excess_sig: &Signature) -> TxStorageResponse { - self.pool_storage.read().await.has_tx_with_excess_sig(excess_sig) + pub async fn has_tx_with_excess_sig(&self, excess_sig: Signature) -> Result { + self.do_read_task(move |storage| Ok(storage.has_tx_with_excess_sig(&excess_sig))) + .await } /// Check if the specified transaction is stored in the Mempool. - pub async fn has_transaction(&self, tx: &Transaction) -> Result { - self.pool_storage.read().await.has_transaction(tx) + pub async fn has_transaction(&self, tx: Arc) -> Result { + self.do_read_task(move |storage| storage.has_transaction(&tx)).await } /// Gathers and returns the stats of the Mempool. - pub async fn stats(&self) -> StatsResponse { - self.pool_storage.read().await.stats() + pub async fn stats(&self) -> Result { + self.do_read_task(|storage| Ok(storage.stats())).await } /// Gathers and returns a breakdown of all the transaction in the Mempool. - pub async fn state(&self) -> StateResponse { - self.pool_storage.read().await.state() + pub async fn state(&self) -> Result { + self.do_read_task(|storage| Ok(storage.state())).await + } + + async fn do_read_task(&self, callback: F) -> Result + where + F: FnOnce(&MempoolStorage) -> Result + Send + 'static, + T: Send + 'static, + { + let storage = self.pool_storage.clone(); + task::spawn_blocking(move || { + let lock = storage.read().map_err(|_| MempoolError::RwLockPoisonError)?; + callback(&*lock) + }) + .await? + } + + async fn do_write_task(&self, callback: F) -> Result + where + F: FnOnce(&mut MempoolStorage) -> Result + Send + 'static, + T: Send + 'static, + { + let storage = self.pool_storage.clone(); + task::spawn_blocking(move || { + let mut lock = storage.write().map_err(|_| MempoolError::RwLockPoisonError)?; + callback(&mut *lock) + }) + .await? } } diff --git a/base_layer/core/src/mempool/mempool_storage.rs b/base_layer/core/src/mempool/mempool_storage.rs index 9ad4cd8170..dc6a6a2c17 100644 --- a/base_layer/core/src/mempool/mempool_storage.rs +++ b/base_layer/core/src/mempool/mempool_storage.rs @@ -72,17 +72,19 @@ impl MempoolStorage { /// Insert an unconfirmed transaction into the Mempool. The transaction *MUST* have passed through the validation /// pipeline already and will thus always be internally consistent by this stage pub fn insert(&mut self, tx: Arc) -> Result { - debug!( - target: LOG_TARGET, - "Inserting tx into mempool: {}", - tx.body - .kernels() - .first() - .map(|k| k.excess_sig.get_signature().to_hex()) - .unwrap_or_else(|| "None?!".into()) - ); + let tx_id = tx + .body + .kernels() + .first() + .map(|k| k.excess_sig.get_signature().to_hex()) + .unwrap_or_else(|| "None?!".into()); + debug!(target: LOG_TARGET, "Inserting tx into mempool: {}", tx_id); match self.validator.validate(&tx) { Ok(()) => { + debug!( + target: LOG_TARGET, + "Transaction {} is VALID, inserting in unconfirmed pool", tx_id + ); let weight = self.get_transaction_weighting(0); self.unconfirmed_pool.insert(tx, None, &weight)?; Ok(TxStorageResponse::UnconfirmedPool) @@ -209,7 +211,7 @@ impl MempoolStorage { /// Returns a list of transaction ranked by transaction priority up to a given weight. /// Will only return transactions that will fit into the given weight - pub fn retrieve(&mut self, total_weight: u64) -> Result>, MempoolError> { + pub fn retrieve_and_revalidate(&mut self, total_weight: u64) -> Result>, MempoolError> { let results = self.unconfirmed_pool.fetch_highest_priority_txs(total_weight)?; self.insert_txs(results.transactions_to_insert)?; Ok(results.retrieved_transactions) diff --git a/base_layer/core/src/mempool/service/inbound_handlers.rs b/base_layer/core/src/mempool/service/inbound_handlers.rs index 054f6a5d9d..eeecc00ebc 100644 --- a/base_layer/core/src/mempool/service/inbound_handlers.rs +++ b/base_layer/core/src/mempool/service/inbound_handlers.rs @@ -59,10 +59,10 @@ impl MempoolInboundHandlers { debug!(target: LOG_TARGET, "Handling remote request: {}", request); use MempoolRequest::*; match request { - GetStats => Ok(MempoolResponse::Stats(self.mempool.stats().await)), - GetState => Ok(MempoolResponse::State(self.mempool.state().await)), + GetStats => Ok(MempoolResponse::Stats(self.mempool.stats().await?)), + GetState => Ok(MempoolResponse::State(self.mempool.state().await?)), GetTxStateByExcessSig(excess_sig) => Ok(MempoolResponse::TxStorage( - self.mempool.has_tx_with_excess_sig(&excess_sig).await, + self.mempool.has_tx_with_excess_sig(excess_sig).await?, )), SubmitTransaction(tx) => { debug!( @@ -102,7 +102,8 @@ impl MempoolInboundHandlers { ) -> Result { trace!(target: LOG_TARGET, "submit_transaction: {}.", tx); - let tx_storage = self.mempool.has_transaction(&tx).await?; + let tx = Arc::new(tx); + let tx_storage = self.mempool.has_transaction(tx.clone()).await?; let kernel_excess_sig = tx .first_kernel_excess_sig() .ok_or(MempoolServiceError::TransactionNoKernels)? @@ -115,7 +116,6 @@ impl MempoolInboundHandlers { ); return Ok(tx_storage); } - let tx = Arc::new(tx); match self.mempool.insert(tx.clone()).await { Ok(tx_storage) => { if tx_storage.is_stored() { @@ -146,9 +146,10 @@ impl MempoolInboundHandlers { } async fn update_pool_size_metrics(&self) { - let stats = self.mempool.stats().await; - metrics::unconfirmed_pool_size().set(stats.unconfirmed_txs as i64); - metrics::reorg_pool_size().set(stats.reorg_txs as i64); + if let Ok(stats) = self.mempool.stats().await { + metrics::unconfirmed_pool_size().set(stats.unconfirmed_txs as i64); + metrics::reorg_pool_size().set(stats.reorg_txs as i64); + } } /// Handle inbound block events from the local base node service. @@ -156,7 +157,7 @@ impl MempoolInboundHandlers { use BlockEvent::*; match block_event { ValidBlockAdded(block, BlockAddResult::Ok(_)) => { - self.mempool.process_published_block(block).await?; + self.mempool.process_published_block(block.clone()).await?; }, ValidBlockAdded(_, BlockAddResult::ChainReorg { added, removed }) => { self.mempool @@ -173,7 +174,7 @@ impl MempoolInboundHandlers { .await?; }, BlockSyncComplete(tip_block) => { - self.mempool.process_published_block(tip_block.block()).await?; + self.mempool.process_published_block(tip_block.to_arc_block()).await?; }, AddBlockFailed(_) => {}, } diff --git a/base_layer/core/src/mempool/sync_protocol/mod.rs b/base_layer/core/src/mempool/sync_protocol/mod.rs index f0f0900492..488fcd28e0 100644 --- a/base_layer/core/src/mempool/sync_protocol/mod.rs +++ b/base_layer/core/src/mempool/sync_protocol/mod.rs @@ -306,7 +306,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin self.peer_node_id.short_str() ); - let transactions = self.mempool.snapshot().await; + let transactions = self.mempool.snapshot().await?; let items = transactions .iter() .take(self.config.initial_sync_max_transactions) @@ -392,7 +392,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin inventory.items.len() ); - let transactions = self.mempool.snapshot().await; + let transactions = self.mempool.snapshot().await?; let mut duplicate_inventory_items = Vec::new(); let (transactions, _) = transactions.into_iter().partition::, _>(|transaction| { @@ -483,7 +483,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin } } - let stats = self.mempool.stats().await; + let stats = self.mempool.stats().await?; metrics::unconfirmed_pool_size().set(stats.unconfirmed_txs as i64); metrics::reorg_pool_size().set(stats.reorg_txs as i64); @@ -509,13 +509,13 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin excess_sig_hex, self.peer_node_id.short_str() ); - - let store_state = self.mempool.has_transaction(&txn).await?; + let txn = Arc::new(txn); + let store_state = self.mempool.has_transaction(txn.clone()).await?; if store_state.is_stored() { return Ok(()); } - let stored_result = self.mempool.insert(Arc::new(txn)).await?; + let stored_result = self.mempool.insert(txn).await?; if stored_result.is_stored() { metrics::inbound_transactions(Some(&self.peer_node_id)).inc(); debug!( diff --git a/base_layer/core/src/mempool/sync_protocol/test.rs b/base_layer/core/src/mempool/sync_protocol/test.rs index e1aea8d650..0e268df30e 100644 --- a/base_layer/core/src/mempool/sync_protocol/test.rs +++ b/base_layer/core/src/mempool/sync_protocol/test.rs @@ -122,10 +122,10 @@ async fn empty_set() { .await .unwrap(); - let transactions = mempool2.snapshot().await; + let transactions = mempool2.snapshot().await.unwrap(); assert_eq!(transactions.len(), 0); - let transactions = mempool1.snapshot().await; + let transactions = mempool1.snapshot().await.unwrap(); assert_eq!(transactions.len(), 0); } @@ -319,7 +319,14 @@ async fn responder_messages() { } async fn get_snapshot(mempool: &Mempool) -> Vec { - mempool.snapshot().await.iter().map(|t| &**t).cloned().collect() + mempool + .snapshot() + .await + .unwrap() + .iter() + .map(|t| &**t) + .cloned() + .collect() } async fn read_message(reader: &mut S) -> T diff --git a/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs b/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs index 72d7bac4e0..3450d20368 100644 --- a/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs +++ b/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs @@ -223,12 +223,14 @@ impl UnconfirmedPool { } } } - // we need to remove all transactions that need to be rechecked. - debug!( - target: LOG_TARGET, - "Removing {} transaction(s) from unconfirmed pool because they need re-evaluation", - transactions_to_remove_and_recheck.len() - ); + if !transactions_to_remove_and_recheck.is_empty() { + // we need to remove all transactions that need to be rechecked. + debug!( + target: LOG_TARGET, + "Removing {} transaction(s) from unconfirmed pool because they need re-evaluation", + transactions_to_remove_and_recheck.len() + ); + } for (tx_key, _) in &transactions_to_remove_and_recheck { self.remove_transaction(*tx_key); } diff --git a/base_layer/core/src/validation/transaction_validators.rs b/base_layer/core/src/validation/transaction_validators.rs index d1a0b0cd9e..1676ced387 100644 --- a/base_layer/core/src/validation/transaction_validators.rs +++ b/base_layer/core/src/validation/transaction_validators.rs @@ -63,8 +63,10 @@ impl TxInternalConsistencyValidator { impl MempoolTransactionValidation for TxInternalConsistencyValidator { fn validate(&self, tx: &Transaction) -> Result<(), ValidationError> { - let db = self.db.db_read_access()?; - let tip = db.fetch_chain_metadata()?; + let tip = { + let db = self.db.db_read_access()?; + db.fetch_chain_metadata() + }?; tx.validate_internal_consistency( self.bypass_range_proof_verification, @@ -181,11 +183,13 @@ impl TxInputAndMaturityValidator { impl MempoolTransactionValidation for TxInputAndMaturityValidator { fn validate(&self, tx: &Transaction) -> Result<(), ValidationError> { let constants = self.db.consensus_constants()?; - let db = self.db.db_read_access()?; - check_inputs_are_utxos(&*db, tx.body())?; - check_outputs(&*db, constants, tx.body())?; + let tip_height = { + let db = self.db.db_read_access()?; + check_inputs_are_utxos(&*db, tx.body())?; + check_outputs(&*db, constants, tx.body())?; + db.fetch_chain_metadata()?.height_of_longest_chain() + }; - let tip_height = db.fetch_chain_metadata()?.height_of_longest_chain(); verify_timelocks(tx, tip_height)?; verify_no_duplicated_inputs_outputs(tx)?; Ok(()) diff --git a/base_layer/core/tests/mempool.rs b/base_layer/core/tests/mempool.rs index fdf3de1496..51d9fddcf5 100644 --- a/base_layer/core/tests/mempool.rs +++ b/base_layer/core/tests/mempool.rs @@ -115,37 +115,50 @@ async fn test_insert_and_process_published_block() { mempool.insert(tx2.clone()).await.unwrap(); mempool.insert(tx3.clone()).await.unwrap(); mempool.insert(tx5.clone()).await.unwrap(); - mempool.process_published_block(blocks[1].block()).await.unwrap(); + mempool.process_published_block(blocks[1].to_arc_block()).await.unwrap(); assert_eq!( mempool - .has_tx_with_excess_sig(&orphan.body.kernels()[0].excess_sig) - .await, + .has_tx_with_excess_sig(orphan.body.kernels()[0].excess_sig.clone()) + .await + .unwrap(), TxStorageResponse::NotStored ); assert_eq!( - mempool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig).await, + mempool + .has_tx_with_excess_sig(tx2.body.kernels()[0].excess_sig.clone()) + .await + .unwrap(), TxStorageResponse::UnconfirmedPool ); assert_eq!( - mempool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig).await, + mempool + .has_tx_with_excess_sig(tx3.body.kernels()[0].excess_sig.clone()) + .await + .unwrap(), TxStorageResponse::NotStored ); assert_eq!( - mempool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig).await, + mempool + .has_tx_with_excess_sig(tx5.body.kernels()[0].excess_sig.clone()) + .await + .unwrap(), TxStorageResponse::NotStored ); assert_eq!( - mempool.has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig).await, + mempool + .has_tx_with_excess_sig(tx6.body.kernels()[0].excess_sig.clone()) + .await + .unwrap(), TxStorageResponse::NotStored ); - let snapshot_txs = mempool.snapshot().await; + let snapshot_txs = mempool.snapshot().await.unwrap(); assert_eq!(snapshot_txs.len(), 1); assert!(snapshot_txs.contains(&tx2)); - let stats = mempool.stats().await; + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.total_txs, 1); assert_eq!(stats.unconfirmed_txs, 1); assert_eq!(stats.reorg_txs, 0); @@ -159,35 +172,48 @@ async fn test_insert_and_process_published_block() { // Spend tx2, so it goes in Reorg pool generate_block(&store, &mut blocks, vec![tx2.deref().clone()], &consensus_manager).unwrap(); - mempool.process_published_block(blocks[2].block()).await.unwrap(); + mempool.process_published_block(blocks[2].to_arc_block()).await.unwrap(); assert_eq!( mempool - .has_tx_with_excess_sig(&orphan.body.kernels()[0].excess_sig) - .await, + .has_tx_with_excess_sig(orphan.body.kernels()[0].excess_sig.clone()) + .await + .unwrap(), TxStorageResponse::NotStored ); assert_eq!( - mempool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig).await, + mempool + .has_tx_with_excess_sig(tx2.body.kernels()[0].excess_sig.clone()) + .await + .unwrap(), TxStorageResponse::ReorgPool ); assert_eq!( - mempool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig).await, + mempool + .has_tx_with_excess_sig(tx3.body.kernels()[0].excess_sig.clone()) + .await + .unwrap(), TxStorageResponse::NotStored ); assert_eq!( - mempool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig).await, + mempool + .has_tx_with_excess_sig(tx5.body.kernels()[0].excess_sig.clone()) + .await + .unwrap(), TxStorageResponse::NotStored ); assert_eq!( - mempool.has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig).await, + mempool + .has_tx_with_excess_sig(tx6.body.kernels()[0].excess_sig.clone()) + .await + .unwrap(), TxStorageResponse::NotStored ); - let snapshot_txs = mempool.snapshot().await; + let snapshot_txs = mempool.snapshot().await.unwrap(); assert_eq!(snapshot_txs.len(), 0); - let stats = mempool.stats().await; + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.total_txs, 1); assert_eq!(stats.unconfirmed_txs, 0); assert_eq!(stats.reorg_txs, 1); @@ -211,7 +237,7 @@ async fn test_time_locked() { to: vec![2 * T, 2 * T, 2 * T, 2 * T], fee: 5*uT, lock: 0, features: OutputFeatures::default() )]; generate_new_block(&mut store, &mut blocks, &mut outputs, txs, &consensus_manager).unwrap(); - mempool.process_published_block(blocks[1].block()).await.unwrap(); + mempool.process_published_block(blocks[1].to_arc_block()).await.unwrap(); // Block height should be 1 let mut tx2 = txn_schema!(from: vec![outputs[1][0].clone()], to: vec![1*T], fee: 20*uT, lock: 0, features: OutputFeatures::default()); tx2.lock_height = 3; @@ -239,7 +265,7 @@ async fn test_time_locked() { // Spend tx3, so that the height of the chain will increase generate_block(&store, &mut blocks, vec![tx3.deref().clone()], &consensus_manager).unwrap(); - mempool.process_published_block(blocks[2].block()).await.unwrap(); + mempool.process_published_block(blocks[2].to_arc_block()).await.unwrap(); // Block height increased, so tx2 should no go in. assert_eq!(mempool.insert(tx2).await.unwrap(), TxStorageResponse::UnconfirmedPool); @@ -262,7 +288,7 @@ async fn test_retrieve() { )]; // "Mine" Block 1 generate_new_block(&mut store, &mut blocks, &mut outputs, txs, &consensus_manager).unwrap(); - mempool.process_published_block(blocks[1].block()).await.unwrap(); + mempool.process_published_block(blocks[1].to_arc_block()).await.unwrap(); // 1-Block, 8 UTXOs, empty mempool let txs = vec![ txn_schema!(from: vec![outputs[1][0].clone()], to: vec![], fee: 30*uT, lock: 0, features: OutputFeatures::default()), @@ -291,7 +317,7 @@ async fn test_retrieve() { assert!(retrieved_txs.contains(&tx[6])); assert!(retrieved_txs.contains(&tx[2])); assert!(retrieved_txs.contains(&tx[3])); - let stats = mempool.stats().await; + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.unconfirmed_txs, 7); // assert_eq!(stats.timelocked_txs, 1); assert_eq!(stats.reorg_txs, 0); @@ -306,9 +332,9 @@ async fn test_retrieve() { // "Mine" block 2 generate_block(&store, &mut blocks, block2_txns, &consensus_manager).unwrap(); outputs.push(utxos); - mempool.process_published_block(blocks[2].block()).await.unwrap(); + mempool.process_published_block(blocks[2].to_arc_block()).await.unwrap(); // 2-blocks, 2 unconfirmed txs in mempool - let stats = mempool.stats().await; + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.unconfirmed_txs, 2); // assert_eq!(stats.timelocked_txs, 0); assert_eq!(stats.reorg_txs, 5); @@ -327,7 +353,7 @@ async fn test_retrieve() { // Top 2 txs are tx[3] (fee/g = 50) and tx2[1] (fee/g = 40). tx2[0] (fee/g = 80) is still not matured. let weight = tx[3].calculate_weight(weighting) + tx2[1].calculate_weight(weighting); let retrieved_txs = mempool.retrieve(weight).await.unwrap(); - let stats = mempool.stats().await; + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.unconfirmed_txs, 3); // assert_eq!(stats.timelocked_txs, 1); @@ -354,7 +380,7 @@ async fn test_zero_conf() { )]; // "Mine" Block 1 generate_new_block(&mut store, &mut blocks, &mut outputs, txs, &consensus_manager).unwrap(); - mempool.process_published_block(blocks[1].block()).await.unwrap(); + mempool.process_published_block(blocks[1].to_arc_block()).await.unwrap(); // This transaction graph will be created, containing 3 levels of zero-conf transactions, inheriting dependent // outputs from multiple parents @@ -551,7 +577,10 @@ async fn test_zero_conf() { ); // Try to retrieve all transactions in the mempool (a couple of our transactions should be missing from retrieved) - let retrieved_txs = mempool.retrieve(mempool.stats().await.total_weight).await.unwrap(); + let retrieved_txs = mempool + .retrieve(mempool.stats().await.unwrap().total_weight) + .await + .unwrap(); assert_eq!(retrieved_txs.len(), 10); assert!(retrieved_txs.contains(&Arc::new(tx01.clone()))); assert!(!retrieved_txs.contains(&Arc::new(tx02.clone()))); // Missing @@ -600,7 +629,10 @@ async fn test_zero_conf() { ); // Try to retrieve all transactions in the mempool (all transactions should be retrieved) - let retrieved_txs = mempool.retrieve(mempool.stats().await.total_weight).await.unwrap(); + let retrieved_txs = mempool + .retrieve(mempool.stats().await.unwrap().total_weight) + .await + .unwrap(); assert_eq!(retrieved_txs.len(), 16); assert!(retrieved_txs.contains(&Arc::new(tx01.clone()))); assert!(retrieved_txs.contains(&Arc::new(tx02.clone()))); @@ -621,7 +653,7 @@ async fn test_zero_conf() { // Verify that a higher priority transaction is not retrieved due to its zero-conf dependency instead of the lowest // priority transaction - let weight = mempool.stats().await.total_weight - 1; + let weight = mempool.stats().await.unwrap().total_weight - 1; let retrieved_txs = mempool.retrieve(weight).await.unwrap(); assert_eq!(retrieved_txs.len(), 15); assert!(retrieved_txs.contains(&Arc::new(tx01))); @@ -659,7 +691,7 @@ async fn test_reorg() { txn_schema!(from: vec![outputs[0][0].clone()], to: vec![1 * T, 1 * T], fee: 25*uT, lock: 0, features: OutputFeatures::default()), ]; generate_new_block(&mut db, &mut blocks, &mut outputs, txs, &consensus_manager).unwrap(); - mempool.process_published_block(blocks[1].block()).await.unwrap(); + mempool.process_published_block(blocks[1].to_arc_block()).await.unwrap(); // "Mine" block 2 let schemas = vec![ @@ -672,11 +704,11 @@ async fn test_reorg() { for tx in &txns2 { mempool.insert(tx.clone()).await.unwrap(); } - let stats = mempool.stats().await; + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.unconfirmed_txs, 3); let txns2 = txns2.iter().map(|t| t.deref().clone()).collect(); generate_block(&db, &mut blocks, txns2, &consensus_manager).unwrap(); - mempool.process_published_block(blocks[2].block()).await.unwrap(); + mempool.process_published_block(blocks[2].to_arc_block()).await.unwrap(); // "Mine" block 3 let schemas = vec![ @@ -698,9 +730,9 @@ async fn test_reorg() { &consensus_manager, ) .unwrap(); - mempool.process_published_block(blocks[3].block()).await.unwrap(); + mempool.process_published_block(blocks[3].to_arc_block()).await.unwrap(); - let stats = mempool.stats().await; + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.unconfirmed_txs, 0); // assert_eq!(stats.timelocked_txs, 1); assert_eq!(stats.reorg_txs, 5); @@ -714,7 +746,7 @@ async fn test_reorg() { .process_reorg(vec![blocks[3].to_arc_block()], vec![reorg_block3.into()]) .await .unwrap(); - let stats = mempool.stats().await; + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.unconfirmed_txs, 2); // assert_eq!(stats.timelocked_txs, 1); assert_eq!(stats.reorg_txs, 3); @@ -803,20 +835,32 @@ async fn receive_and_propagate_transaction() { .unwrap(); async_assert_eventually!( - bob_node.mempool.has_tx_with_excess_sig(&tx_excess_sig).await, + bob_node + .mempool + .has_tx_with_excess_sig(tx_excess_sig.clone()) + .await + .unwrap(), expect = TxStorageResponse::NotStored, max_attempts = 20, interval = Duration::from_millis(1000) ); async_assert_eventually!( - carol_node.mempool.has_tx_with_excess_sig(&tx_excess_sig).await, + carol_node + .mempool + .has_tx_with_excess_sig(tx_excess_sig.clone()) + .await + .unwrap(), expect = TxStorageResponse::NotStored, max_attempts = 10, interval = Duration::from_millis(1000) ); // Carol got sent the orphan tx directly, so it will be in her mempool async_assert_eventually!( - carol_node.mempool.has_tx_with_excess_sig(&orphan_excess_sig).await, + carol_node + .mempool + .has_tx_with_excess_sig(orphan_excess_sig.clone()) + .await + .unwrap(), expect = TxStorageResponse::NotStored, max_attempts = 10, interval = Duration::from_millis(1000) @@ -824,7 +868,11 @@ async fn receive_and_propagate_transaction() { // It's difficult to test a negative here, but let's at least make sure that the orphan TX was not propagated // by the time we check it async_assert_eventually!( - bob_node.mempool.has_tx_with_excess_sig(&orphan_excess_sig).await, + bob_node + .mempool + .has_tx_with_excess_sig(orphan_excess_sig.clone()) + .await + .unwrap(), expect = TxStorageResponse::NotStored, ); } @@ -1134,7 +1182,11 @@ async fn block_event_and_reorg_event_handling() { // Add Block1 - tx1 will be moved to the ReorgPool. assert!(bob.local_nci.submit_block(block1.clone(),).await.is_ok()); async_assert_eventually!( - alice.mempool.has_tx_with_excess_sig(&tx1_excess_sig).await, + alice + .mempool + .has_tx_with_excess_sig(tx1_excess_sig.clone()) + .await + .unwrap(), expect = TxStorageResponse::ReorgPool, max_attempts = 20, interval = Duration::from_millis(1000) @@ -1164,27 +1216,46 @@ async fn block_event_and_reorg_event_handling() { assert!(bob.local_nci.submit_block(block2a.clone(),).await.is_ok()); async_assert_eventually!( - bob.mempool.has_tx_with_excess_sig(&tx2a_excess_sig).await, + bob.mempool + .has_tx_with_excess_sig(tx2a_excess_sig.clone()) + .await + .unwrap(), expect = TxStorageResponse::ReorgPool, max_attempts = 20, interval = Duration::from_millis(1000) ); async_assert_eventually!( - alice.mempool.has_tx_with_excess_sig(&tx2a_excess_sig).await, + alice + .mempool + .has_tx_with_excess_sig(tx2a_excess_sig.clone()) + .await + .unwrap(), expect = TxStorageResponse::ReorgPool, max_attempts = 20, interval = Duration::from_millis(1000) ); assert_eq!( - alice.mempool.has_tx_with_excess_sig(&tx3a_excess_sig).await, + alice + .mempool + .has_tx_with_excess_sig(tx3a_excess_sig.clone()) + .await + .unwrap(), TxStorageResponse::ReorgPool ); assert_eq!( - alice.mempool.has_tx_with_excess_sig(&tx2b_excess_sig).await, + alice + .mempool + .has_tx_with_excess_sig(tx2b_excess_sig.clone()) + .await + .unwrap(), TxStorageResponse::ReorgPool ); assert_eq!( - alice.mempool.has_tx_with_excess_sig(&tx3b_excess_sig).await, + alice + .mempool + .has_tx_with_excess_sig(tx3b_excess_sig.clone()) + .await + .unwrap(), TxStorageResponse::ReorgPool ); } diff --git a/base_layer/p2p/src/initialization.rs b/base_layer/p2p/src/initialization.rs index 29e12178a4..565f9a34ad 100644 --- a/base_layer/p2p/src/initialization.rs +++ b/base_layer/p2p/src/initialization.rs @@ -121,6 +121,9 @@ pub struct P2pConfig { pub peer_database_name: String, /// The maximum number of concurrent Inbound tasks allowed before back-pressure is applied to peers pub max_concurrent_inbound_tasks: usize, + /// The maximum number of concurrent outbound tasks allowed before back-pressure is applied to outbound messaging + /// queue + pub max_concurrent_outbound_tasks: usize, /// The size of the buffer (channel) which holds pending outbound message requests pub outbound_buffer_size: usize, /// Configuration for DHT @@ -386,6 +389,7 @@ async fn configure_comms_and_dht( ServiceBuilder::new().layer(dht_outbound_layer).service(sink) }) .max_concurrent_inbound_tasks(config.max_concurrent_inbound_tasks) + .max_concurrent_outbound_tasks(config.max_concurrent_outbound_tasks) .with_inbound_pipeline( ServiceBuilder::new() .layer(dht.inbound_middleware_layer()) diff --git a/base_layer/wallet/tests/wallet.rs b/base_layer/wallet/tests/wallet.rs index 6565bfc990..5adbc50215 100644 --- a/base_layer/wallet/tests/wallet.rs +++ b/base_layer/wallet/tests/wallet.rs @@ -133,7 +133,8 @@ async fn create_wallet( auxilary_tcp_listener_address: None, datastore_path: data_path.to_path_buf(), peer_database_name: random::string(8), - max_concurrent_inbound_tasks: 100, + max_concurrent_inbound_tasks: 10, + max_concurrent_outbound_tasks: 10, outbound_buffer_size: 100, dht: DhtConfig { discovery_request_timeout: Duration::from_secs(1), @@ -708,8 +709,9 @@ async fn test_import_utxo() { auxilary_tcp_listener_address: None, datastore_path: temp_dir.path().to_path_buf(), peer_database_name: random::string(8), - max_concurrent_inbound_tasks: 100, - outbound_buffer_size: 100, + max_concurrent_inbound_tasks: 10, + max_concurrent_outbound_tasks: 10, + outbound_buffer_size: 10, dht: Default::default(), allow_test_addresses: true, listener_liveness_allowlist_cidrs: Vec::new(), diff --git a/base_layer/wallet_ffi/src/lib.rs b/base_layer/wallet_ffi/src/lib.rs index 7265e7229b..91fc6d8e16 100644 --- a/base_layer/wallet_ffi/src/lib.rs +++ b/base_layer/wallet_ffi/src/lib.rs @@ -3010,8 +3010,9 @@ pub unsafe extern "C" fn comms_config_create( auxilary_tcp_listener_address: None, datastore_path, peer_database_name: database_name_string, - max_concurrent_inbound_tasks: 100, - outbound_buffer_size: 100, + max_concurrent_inbound_tasks: 25, + max_concurrent_outbound_tasks: 50, + outbound_buffer_size: 50, dht: DhtConfig { discovery_request_timeout: Duration::from_secs(discovery_timeout_in_secs), database_url: DbConnectionUrl::File(dht_database_path), diff --git a/comms/src/bounded_executor.rs b/comms/src/bounded_executor.rs index e1aa1a44f6..7558bfd610 100644 --- a/comms/src/bounded_executor.rs +++ b/comms/src/bounded_executor.rs @@ -227,6 +227,22 @@ impl OptionallyBoundedExecutor { Either::Right(exec) => exec.num_available(), } } + + /// Returns the max number tasks that can be performed concurrenly + pub fn max_available(&self) -> Option { + match &self.inner { + Either::Left(_) => None, + Either::Right(exec) => Some(exec.max_available()), + } + } +} + +impl From for OptionallyBoundedExecutor { + fn from(handle: runtime::Handle) -> Self { + Self { + inner: Either::Left(handle), + } + } } #[cfg(test)] diff --git a/comms/src/multiplexing/yamux.rs b/comms/src/multiplexing/yamux.rs index f17b48a9e4..f11da2172a 100644 --- a/comms/src/multiplexing/yamux.rs +++ b/comms/src/multiplexing/yamux.rs @@ -282,14 +282,14 @@ where TSocket: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static biased; _ = self.shutdown_signal.wait() => { - debug!( - target: LOG_TARGET, - "{} Yamux connection shutdown", self.connection - ); let mut control = self.connection.control(); if let Err(err) = control.close().await { error!(target: LOG_TARGET, "Failed to close yamux connection: {}", err); } + debug!( + target: LOG_TARGET, + "{} Yamux connection has closed", self.connection + ); break } @@ -300,7 +300,7 @@ where TSocket: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static if self.sender.send(stream).await.is_err() { debug!( target: LOG_TARGET, - "{} Incoming peer substream task is shutting down because the internal stream sender channel \ + "{} Incoming peer substream task is stopping because the internal stream sender channel \ was closed", self.connection ); @@ -310,7 +310,7 @@ where TSocket: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static Ok(None) =>{ debug!( target: LOG_TARGET, - "{} Incoming peer substream completed. IncomingWorker exiting", + "{} Incoming peer substream ended.", self.connection ); break; @@ -334,8 +334,6 @@ where TSocket: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static } } } - - debug!(target: LOG_TARGET, "Incoming peer substream task is shutting down"); } } diff --git a/comms/src/pipeline/builder.rs b/comms/src/pipeline/builder.rs index 66c23661fc..c4963a5c71 100644 --- a/comms/src/pipeline/builder.rs +++ b/comms/src/pipeline/builder.rs @@ -37,6 +37,7 @@ type OutboundMessageSinkService = SinkService>; #[derive(Default)] pub struct Builder { max_concurrent_inbound_tasks: usize, + max_concurrent_outbound_tasks: Option, outbound_buffer_size: usize, inbound: Option, outbound_rx: Option>, @@ -47,6 +48,7 @@ impl Builder<(), (), ()> { pub fn new() -> Self { Self { max_concurrent_inbound_tasks: DEFAULT_MAX_CONCURRENT_TASKS, + max_concurrent_outbound_tasks: None, outbound_buffer_size: DEFAULT_OUTBOUND_BUFFER_SIZE, inbound: None, outbound_rx: None, @@ -61,6 +63,11 @@ impl Builder { self } + pub fn max_concurrent_outbound_tasks(mut self, max_tasks: usize) -> Self { + self.max_concurrent_outbound_tasks = Some(max_tasks); + self + } + pub fn outbound_buffer_size(mut self, buf_size: usize) -> Self { self.outbound_buffer_size = buf_size; self @@ -77,6 +84,7 @@ impl Builder { outbound_pipeline_factory: Some(Box::new(factory)), max_concurrent_inbound_tasks: self.max_concurrent_inbound_tasks, + max_concurrent_outbound_tasks: self.max_concurrent_outbound_tasks, inbound: self.inbound, outbound_buffer_size: self.outbound_buffer_size, } @@ -88,6 +96,7 @@ impl Builder { inbound: Some(inbound), max_concurrent_inbound_tasks: self.max_concurrent_inbound_tasks, + max_concurrent_outbound_tasks: self.max_concurrent_outbound_tasks, outbound_rx: self.outbound_rx, outbound_pipeline_factory: self.outbound_pipeline_factory, outbound_buffer_size: self.outbound_buffer_size, @@ -126,6 +135,7 @@ where Ok(Config { max_concurrent_inbound_tasks: self.max_concurrent_inbound_tasks, + max_concurrent_outbound_tasks: self.max_concurrent_outbound_tasks, inbound, outbound, }) @@ -147,6 +157,7 @@ pub struct OutboundPipelineConfig { pub struct Config { pub max_concurrent_inbound_tasks: usize, + pub max_concurrent_outbound_tasks: Option, pub inbound: TInSvc, pub outbound: OutboundPipelineConfig, } diff --git a/comms/src/pipeline/inbound.rs b/comms/src/pipeline/inbound.rs index 2e762a52e8..35d910c8a1 100644 --- a/comms/src/pipeline/inbound.rs +++ b/comms/src/pipeline/inbound.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::fmt::Display; +use std::{fmt::Display, time::Instant}; use futures::future::FusedFuture; use log::*; @@ -66,6 +66,7 @@ where } pub async fn run(mut self) { + let mut current_id = 0; while let Some(item) = self.stream.recv().await { // Check if the shutdown signal has been triggered. // If there are messages in the stream, drop them. Otherwise the stream is empty, @@ -90,12 +91,24 @@ where max_available ); } + + let id = current_id; + current_id = (current_id + 1) % u64::MAX; + // Call the service in it's own spawned task self.executor .spawn(async move { + let timer = Instant::now(); + trace!(target: LOG_TARGET, "Start inbound pipeline {}", id); if let Err(err) = service.oneshot(item).await { warn!(target: LOG_TARGET, "Inbound pipeline returned an error: '{}'", err); } + trace!( + target: LOG_TARGET, + "Finished inbound pipeline {} in {:.2?}", + id, + timer.elapsed() + ); }) .await; } diff --git a/comms/src/pipeline/outbound.rs b/comms/src/pipeline/outbound.rs index 53de72191e..37fe074ec2 100644 --- a/comms/src/pipeline/outbound.rs +++ b/comms/src/pipeline/outbound.rs @@ -20,14 +20,15 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::fmt::Display; +use std::{fmt::Display, time::Instant}; use futures::future::Either; use log::*; -use tokio::{runtime, sync::mpsc}; +use tokio::sync::mpsc; use tower::{Service, ServiceExt}; use crate::{ + bounded_executor::OptionallyBoundedExecutor, message::OutboundMessage, pipeline::builder::OutboundPipelineConfig, protocol::messaging::MessagingRequest, @@ -37,7 +38,7 @@ const LOG_TARGET: &str = "comms::pipeline::outbound"; pub struct Outbound { /// Executor used to spawn a pipeline for each received item on the stream - executor: runtime::Handle, + executor: OptionallyBoundedExecutor, /// Outbound pipeline configuration containing the pipeline and it's in and out streams config: OutboundPipelineConfig, /// Request sender for Messaging @@ -52,7 +53,7 @@ where TPipeline::Future: Send, { pub fn new( - executor: runtime::Handle, + executor: OptionallyBoundedExecutor, config: OutboundPipelineConfig, messaging_request_tx: mpsc::Sender, ) -> Self { @@ -64,6 +65,7 @@ where } pub async fn run(mut self) { + let mut current_id = 0; loop { let either = tokio::select! { next = self.config.in_receiver.recv() => Either::Left(next), @@ -72,12 +74,41 @@ where match either { // Pipeline IN received a message. Spawn a new task for the pipeline Either::Left(Some(msg)) => { - let pipeline = self.config.pipeline.clone(); - self.executor.spawn(async move { - if let Err(err) = pipeline.oneshot(msg).await { - error!(target: LOG_TARGET, "Outbound pipeline returned an error: '{}'", err); + let num_available = self.executor.num_available(); + if let Some(max_available) = self.executor.max_available() { + // Only emit this message if there is any concurrent usage + if num_available < max_available { + debug!( + target: LOG_TARGET, + "Outbound pipeline usage: {}/{}", + max_available - num_available, + max_available + ); } - }); + } + let pipeline = self.config.pipeline.clone(); + let id = current_id; + current_id = (current_id + 1) % u64::MAX; + + self.executor + .spawn(async move { + let timer = Instant::now(); + trace!(target: LOG_TARGET, "Start outbound pipeline {}", id); + if let Err(err) = pipeline.oneshot(msg).await { + error!( + target: LOG_TARGET, + "Outbound pipeline {} returned an error: '{}'", id, err + ); + } + + trace!( + target: LOG_TARGET, + "Finished outbound pipeline {} in {:.2?}", + id, + timer.elapsed() + ); + }) + .await; }, // Pipeline IN channel closed Either::Left(None) => { @@ -144,7 +175,7 @@ mod test { let executor = Handle::current(); let pipeline = Outbound::new( - executor.clone(), + executor.clone().into(), OutboundPipelineConfig { in_receiver, out_receiver: out_rx, diff --git a/comms/src/protocol/messaging/extension.rs b/comms/src/protocol/messaging/extension.rs index fe656e8de3..f3c625c6f6 100644 --- a/comms/src/protocol/messaging/extension.rs +++ b/comms/src/protocol/messaging/extension.rs @@ -27,7 +27,7 @@ use tower::Service; use super::MessagingProtocol; use crate::{ - bounded_executor::BoundedExecutor, + bounded_executor::{BoundedExecutor, OptionallyBoundedExecutor}, message::InboundMessage, pipeline, protocol::{ @@ -36,7 +36,6 @@ use crate::{ ProtocolExtensionContext, ProtocolExtensionError, }, - runtime, runtime::task, }; @@ -104,8 +103,9 @@ where ); task::spawn(inbound.run()); + let executor = OptionallyBoundedExecutor::from_current(self.pipeline.max_concurrent_outbound_tasks); // Spawn outbound pipeline - let outbound = pipeline::Outbound::new(runtime::current(), self.pipeline.outbound, messaging_request_tx); + let outbound = pipeline::Outbound::new(executor, self.pipeline.outbound, messaging_request_tx); task::spawn(outbound.run()); Ok(()) diff --git a/comms/src/protocol/rpc/client/mod.rs b/comms/src/protocol/rpc/client/mod.rs index b4ae9ac68a..f62e8b9597 100644 --- a/comms/src/protocol/rpc/client/mod.rs +++ b/comms/src/protocol/rpc/client/mod.rs @@ -781,12 +781,21 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + StreamId } async fn read_response(&mut self, request_id: u16) -> Result { - let mut reader = RpcResponseReader::new(&mut self.framed, self.config, request_id); + let stream_id = self.stream_id(); + let protocol_name = self.protocol_name().to_string(); + let mut reader = RpcResponseReader::new(&mut self.framed, self.config, request_id); let mut num_ignored = 0; let resp = loop { match reader.read_response().await { Ok(resp) => { + debug!( + target: LOG_TARGET, + "(stream: {}, {}) Received body len = {}", + stream_id, + protocol_name, + reader.bytes_read() + ); metrics::inbound_response_bytes(&self.node_id, &self.protocol_id) .observe(reader.bytes_read() as f64); break resp; @@ -879,6 +888,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin let mut chunk_count = 1; let mut last_chunk_flags = RpcMessageFlags::from_bits_truncate(resp.flags as u8); let mut last_chunk_size = resp.payload.len(); + self.bytes_read += last_chunk_size; loop { trace!( target: LOG_TARGET, From cfc42ddcc5d6fd96d05922662eea43929b46c81a Mon Sep 17 00:00:00 2001 From: Byron Hambly Date: Sat, 5 Feb 2022 10:14:54 +0200 Subject: [PATCH 14/20] fix: minor fixes on collectibles (#3795) Description --- - bumps anyhow crate, wanted to use it for collectibles but the tauri error doesn't implement sync for some reason - adds a default impl for CollectiblesConfig, since trying to run it with npm start from the folder would error - fix asset class image --- applications/tari_base_node/Cargo.toml | 2 +- .../tari_collectibles/src-tauri/src/main.rs | 19 ++++++++++++------- .../web-app/src/Dashboard.js | 2 +- .../tari_merge_mining_proxy/Cargo.toml | 2 +- applications/tari_validator_node/Cargo.toml | 2 +- base_layer/p2p/Cargo.toml | 2 +- base_layer/service_framework/Cargo.toml | 2 +- common/Cargo.toml | 4 ++-- .../src/configuration/collectibles_config.rs | 10 ++++++++++ comms/Cargo.toml | 2 +- comms/dht/Cargo.toml | 2 +- dan_layer/core/Cargo.toml | 2 +- infrastructure/metrics/Cargo.toml | 2 +- 13 files changed, 34 insertions(+), 19 deletions(-) diff --git a/applications/tari_base_node/Cargo.toml b/applications/tari_base_node/Cargo.toml index b05e262890..1f6582cef5 100644 --- a/applications/tari_base_node/Cargo.toml +++ b/applications/tari_base_node/Cargo.toml @@ -23,7 +23,7 @@ tari_service_framework = { path = "../../base_layer/service_framework" } tari_shutdown = { path = "../../infrastructure/shutdown" } tari_utilities = "0.3.0" -anyhow = "1.0.32" +anyhow = "1.0.53" bincode = "1.3.1" chrono = { version = "0.4.19", default-features = false } config = { version = "0.9.3" } diff --git a/applications/tari_collectibles/src-tauri/src/main.rs b/applications/tari_collectibles/src-tauri/src/main.rs index dabc4efb8b..1e080a08d5 100644 --- a/applications/tari_collectibles/src-tauri/src/main.rs +++ b/applications/tari_collectibles/src-tauri/src/main.rs @@ -3,6 +3,8 @@ windows_subsystem = "windows" )] +use std::error::Error; + use tari_app_utilities::initialization::init_configuration; use tari_common::configuration::bootstrap::ApplicationType; @@ -24,12 +26,14 @@ mod schema; mod status; mod storage; -fn main() { - #[allow(unused_mut)] // config isn't mutated on windows - let (bootstrap, mut config, _) = init_configuration(ApplicationType::Collectibles).unwrap(); - let state = ConcurrentAppState::new(bootstrap.base_path, config.collectibles_config.unwrap()); +fn main() -> Result<(), Box> { + let (bootstrap, config, _) = init_configuration(ApplicationType::Collectibles)?; + let state = ConcurrentAppState::new( + bootstrap.base_path, + config.collectibles_config.unwrap_or_default(), + ); - tauri::Builder::default() + let result = tauri::Builder::default() .manage(state) .invoke_handler(tauri::generate_handler![ commands::create_db, @@ -53,6 +57,7 @@ fn main() { commands::wallets::wallets_unlock, commands::wallets::wallets_seed_words, ]) - .run(tauri::generate_context!()) - .expect("error while running tauri application"); + .run(tauri::generate_context!())?; + + Ok(result) } diff --git a/applications/tari_collectibles/web-app/src/Dashboard.js b/applications/tari_collectibles/web-app/src/Dashboard.js index 1ef251b04d..19ecfc881a 100644 --- a/applications/tari_collectibles/web-app/src/Dashboard.js +++ b/applications/tari_collectibles/web-app/src/Dashboard.js @@ -54,7 +54,7 @@ class DashboardContent extends React.Component { name: o.name, description: o.description, public_key: toHexString(o.unique_id), - image_url: o.image_url || "asset-no-img.png", + image_url: o.image || "asset-no-img.png", })), isLoading: false, }); diff --git a/applications/tari_merge_mining_proxy/Cargo.toml b/applications/tari_merge_mining_proxy/Cargo.toml index 7c0149dd2a..7cdd0751dc 100644 --- a/applications/tari_merge_mining_proxy/Cargo.toml +++ b/applications/tari_merge_mining_proxy/Cargo.toml @@ -20,7 +20,7 @@ tari_app_utilities = { path = "../tari_app_utilities" } tari_crypto = { git = "https://github.com/tari-project/tari-crypto.git", branch = "main" } tari_utilities = "^0.3" -anyhow = "1.0.40" +anyhow = "1.0.53" bincode = "1.3.1" bytes = "1.1" chrono = { version = "0.4.6", default-features = false } diff --git a/applications/tari_validator_node/Cargo.toml b/applications/tari_validator_node/Cargo.toml index 19c2c7bb0f..5911551612 100644 --- a/applications/tari_validator_node/Cargo.toml +++ b/applications/tari_validator_node/Cargo.toml @@ -25,7 +25,7 @@ tari_dan_core = {path = "../../dan_layer/core"} tari_dan_storage_sqlite = {path = "../../dan_layer/storage_sqlite"} tari_common_types = {path = "../../base_layer/common_types"} -anyhow = "1.0.32" +anyhow = "1.0.53" async-trait = "0.1.50" blake2 = "0.9.2" clap = "2.33.3" diff --git a/base_layer/p2p/Cargo.toml b/base_layer/p2p/Cargo.toml index a5759c2ea1..900913dfb8 100644 --- a/base_layer/p2p/Cargo.toml +++ b/base_layer/p2p/Cargo.toml @@ -19,7 +19,7 @@ tari_shutdown = { version = "^0.27", path = "../../infrastructure/shutdown" } tari_storage = { version = "^0.27", path = "../../infrastructure/storage" } tari_utilities = "^0.3" -anyhow = "1.0.32" +anyhow = "1.0.53" bytes = "0.5" chrono = { version = "0.4.19", default-features = false, features = ["serde"] } fs2 = "0.3.0" diff --git a/base_layer/service_framework/Cargo.toml b/base_layer/service_framework/Cargo.toml index baa264e3de..1380a58851 100644 --- a/base_layer/service_framework/Cargo.toml +++ b/base_layer/service_framework/Cargo.toml @@ -12,7 +12,7 @@ license = "BSD-3-Clause" [dependencies] tari_shutdown = { version = "^0.27", path = "../../infrastructure/shutdown" } -anyhow = "1.0.32" +anyhow = "1.0.53" async-trait = "0.1.50" futures = { version = "^0.3.16", features = ["async-await"] } log = "0.4.8" diff --git a/common/Cargo.toml b/common/Cargo.toml index 527261d940..b8cdb837e1 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -27,7 +27,7 @@ sha2 = "0.9.5" path-clean = "0.1.0" tari_storage = { version = "^0.27", path = "../infrastructure/storage"} -anyhow = { version = "1.0", optional = true } +anyhow = { version = "1.0.53", optional = true } git2 = { version = "0.8", optional = true } prost-build = { version = "0.9.0", optional = true } toml = { version = "0.5", optional = true } @@ -37,4 +37,4 @@ tempfile = "3.1.0" [dev-dependencies] tari_test_utils = { version = "^0.27", path = "../infrastructure/test_utils"} -anyhow = "1.0" +anyhow = "1.0.53" diff --git a/common/src/configuration/collectibles_config.rs b/common/src/configuration/collectibles_config.rs index 3851cc1a74..f1a1a5e7e9 100644 --- a/common/src/configuration/collectibles_config.rs +++ b/common/src/configuration/collectibles_config.rs @@ -37,6 +37,16 @@ pub struct CollectiblesConfig { pub wallet_grpc_address: SocketAddr, } +impl Default for CollectiblesConfig { + fn default() -> Self { + Self { + validator_node_grpc_address: default_validator_node_grpc_address(), + base_node_grpc_address: default_base_node_grpc_address(), + wallet_grpc_address: default_wallet_grpc_address(), + } + } +} + fn default_validator_node_grpc_address() -> SocketAddr { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 18144) } diff --git a/comms/Cargo.toml b/comms/Cargo.toml index dfd1835f7b..8210503733 100644 --- a/comms/Cargo.toml +++ b/comms/Cargo.toml @@ -14,7 +14,7 @@ tari_crypto = { git = "https://github.com/tari-project/tari-crypto.git", branch tari_storage = { version = "^0.27", path = "../infrastructure/storage" } tari_shutdown = { version = "^0.27", path = "../infrastructure/shutdown" } -anyhow = "1.0.32" +anyhow = "1.0.53" async-trait = "0.1.36" bitflags = "1.0.4" blake2 = "0.9.0" diff --git a/comms/dht/Cargo.toml b/comms/dht/Cargo.toml index feafaad61d..1b0ed31c25 100644 --- a/comms/dht/Cargo.toml +++ b/comms/dht/Cargo.toml @@ -18,7 +18,7 @@ tari_shutdown = { version = "^0.27", path = "../../infrastructure/shutdown" } tari_storage = { version = "^0.27", path = "../../infrastructure/storage" } tari_common_sqlite = { path = "../../common_sqlite" } -anyhow = "1.0.32" +anyhow = "1.0.53" bitflags = "1.2.0" bytes = "0.5" chacha20 = "0.7.1" diff --git a/dan_layer/core/Cargo.toml b/dan_layer/core/Cargo.toml index 23a666afa7..f3b44c3f6e 100644 --- a/dan_layer/core/Cargo.toml +++ b/dan_layer/core/Cargo.toml @@ -20,7 +20,7 @@ tari_core = {path = "../../base_layer/core"} tari_dan_common_types = {path = "../common_types"} tari_common_types = {path = "../../base_layer/common_types"} -anyhow = "1.0.32" +anyhow = "1.0.53" async-trait = "0.1.50" blake2 = "0.9.2" clap = "2.33.3" diff --git a/infrastructure/metrics/Cargo.toml b/infrastructure/metrics/Cargo.toml index 3753e4c02c..6766e36492 100644 --- a/infrastructure/metrics/Cargo.toml +++ b/infrastructure/metrics/Cargo.toml @@ -19,7 +19,7 @@ reqwest = { version = "0.11.4", default-features = false, optional = true } tokio = { version = "1.7.1", optional = true, features = ["time", "rt-multi-thread"] } warp = { version = "0.3.1", optional = true, default-features = false } thiserror = "1.0.25" -anyhow = "1.0.41" +anyhow = "1.0.53" [features] pull = ["warp"] From 66ea697395286ca89b34c77f3d857f1c3f16b421 Mon Sep 17 00:00:00 2001 From: Philip Robinson Date: Sat, 5 Feb 2022 11:39:54 +0200 Subject: [PATCH 15/20] feat(wallet_ffi)!: add base node connectivity callback to wallet ffi (#3796) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Description --- This PR adds a new callback to the FFI interface that fires when there is a change in the wallet’s connectivity to the set base node. This information was already available from the Wallet Connectivity Service via a watch channel and this PR just hooks up this watch channel to the Wallet FFI callback handler. How Has This Been Tested? --- The unit test for the callback handler is updated to test the addition. --- base_layer/wallet_ffi/src/callback_handler.rs | 27 ++++++++++++++++++ .../wallet_ffi/src/callback_handler_tests.rs | 28 ++++++++++++++++++- base_layer/wallet_ffi/src/lib.rs | 25 +++++++++++++++++ base_layer/wallet_ffi/wallet.h | 8 ++++++ integration_tests/helpers/ffi/ffiInterface.js | 8 +++++- integration_tests/helpers/ffi/wallet.js | 13 ++++++++- 6 files changed, 106 insertions(+), 3 deletions(-) diff --git a/base_layer/wallet_ffi/src/callback_handler.rs b/base_layer/wallet_ffi/src/callback_handler.rs index b4ec645952..f3a4c7d3b5 100644 --- a/base_layer/wallet_ffi/src/callback_handler.rs +++ b/base_layer/wallet_ffi/src/callback_handler.rs @@ -54,6 +54,7 @@ use tari_comms::types::CommsPublicKey; use tari_comms_dht::event::{DhtEvent, DhtEventReceiver}; use tari_shutdown::ShutdownSignal; use tari_wallet::{ + connectivity_service::OnlineStatus, output_manager_service::{ handle::{OutputManagerEvent, OutputManagerEventReceiver, OutputManagerHandle}, service::Balance, @@ -66,6 +67,7 @@ use tari_wallet::{ }, }, }; +use tokio::sync::watch; const LOG_TARGET: &str = "wallet::transaction_service::callback_handler"; @@ -85,6 +87,7 @@ where TBackend: TransactionBackend + 'static callback_balance_updated: unsafe extern "C" fn(*mut Balance), callback_transaction_validation_complete: unsafe extern "C" fn(u64, bool), callback_saf_messages_received: unsafe extern "C" fn(), + callback_connectivity_status: unsafe extern "C" fn(u64), db: TransactionDatabase, transaction_service_event_stream: TransactionEventReceiver, output_manager_service_event_stream: OutputManagerEventReceiver, @@ -93,6 +96,7 @@ where TBackend: TransactionBackend + 'static shutdown_signal: Option, comms_public_key: CommsPublicKey, balance_cache: Balance, + connectivity_status_watch: watch::Receiver, } #[allow(clippy::too_many_arguments)] @@ -107,6 +111,7 @@ where TBackend: TransactionBackend + 'static dht_event_stream: DhtEventReceiver, shutdown_signal: ShutdownSignal, comms_public_key: CommsPublicKey, + connectivity_status_watch: watch::Receiver, callback_received_transaction: unsafe extern "C" fn(*mut InboundTransaction), callback_received_transaction_reply: unsafe extern "C" fn(*mut CompletedTransaction), callback_received_finalized_transaction: unsafe extern "C" fn(*mut CompletedTransaction), @@ -120,6 +125,7 @@ where TBackend: TransactionBackend + 'static callback_balance_updated: unsafe extern "C" fn(*mut Balance), callback_transaction_validation_complete: unsafe extern "C" fn(u64, bool), callback_saf_messages_received: unsafe extern "C" fn(), + callback_connectivity_status: unsafe extern "C" fn(u64), ) -> Self { info!( target: LOG_TARGET, @@ -173,6 +179,10 @@ where TBackend: TransactionBackend + 'static target: LOG_TARGET, "SafMessagesReceivedCallback -> Assigning Fn: {:?}", callback_saf_messages_received ); + info!( + target: LOG_TARGET, + "ConnectivityStatusCallback -> Assigning Fn: {:?}", callback_connectivity_status + ); Self { callback_received_transaction, @@ -188,6 +198,7 @@ where TBackend: TransactionBackend + 'static callback_balance_updated, callback_transaction_validation_complete, callback_saf_messages_received, + callback_connectivity_status, db, transaction_service_event_stream, output_manager_service_event_stream, @@ -196,6 +207,7 @@ where TBackend: TransactionBackend + 'static shutdown_signal: Some(shutdown_signal), comms_public_key, balance_cache: Balance::zero(), + connectivity_status_watch, } } @@ -302,6 +314,11 @@ where TBackend: TransactionBackend + 'static Err(_e) => error!(target: LOG_TARGET, "Error reading from DHT event broadcast channel"), } } + Ok(_) = self.connectivity_status_watch.changed() => { + let status = *self.connectivity_status_watch.borrow(); + trace!(target: LOG_TARGET, "Connectivity status change detected: {:?}", status); + self.connectivity_status_changed(status); + }, _ = shutdown_signal.wait() => { info!(target: LOG_TARGET, "Transaction Callback Handler shutting down because the shutdown signal was received"); break; @@ -516,4 +533,14 @@ where TBackend: TransactionBackend + 'static (self.callback_saf_messages_received)(); } } + + fn connectivity_status_changed(&mut self, status: OnlineStatus) { + debug!( + target: LOG_TARGET, + "Calling Connectivity Status changed callback function" + ); + unsafe { + (self.callback_connectivity_status)(status as u64); + } + } } diff --git a/base_layer/wallet_ffi/src/callback_handler_tests.rs b/base_layer/wallet_ffi/src/callback_handler_tests.rs index e01b1c0410..64732fc9b1 100644 --- a/base_layer/wallet_ffi/src/callback_handler_tests.rs +++ b/base_layer/wallet_ffi/src/callback_handler_tests.rs @@ -45,6 +45,7 @@ mod test { use tari_service_framework::reply_channel; use tari_shutdown::Shutdown; use tari_wallet::{ + connectivity_service::OnlineStatus, output_manager_service::{ handle::{OutputManagerEvent, OutputManagerHandle}, service::Balance, @@ -60,7 +61,11 @@ mod test { }, }, }; - use tokio::{runtime::Runtime, sync::broadcast, time::Instant}; + use tokio::{ + runtime::Runtime, + sync::{broadcast, watch}, + time::Instant, + }; use crate::{callback_handler::CallbackHandler, output_manager_service_mock::MockOutputManagerService}; @@ -81,6 +86,7 @@ mod test { pub callback_balance_updated: u32, pub callback_transaction_validation_complete: u32, pub saf_messages_received: bool, + pub connectivity_status_callback_called: u64, } impl CallbackState { @@ -101,6 +107,7 @@ mod test { tx_cancellation_callback_called_inbound: false, tx_cancellation_callback_called_outbound: false, saf_messages_received: false, + connectivity_status_callback_called: 0, } } } @@ -200,6 +207,12 @@ mod test { drop(lock); } + unsafe extern "C" fn connectivity_status_callback(status: u64) { + let mut lock = CALLBACK_STATE.lock().unwrap(); + lock.connectivity_status_callback_called += status + 1; + drop(lock); + } + #[test] fn test_callback_handler() { let runtime = Runtime::new().unwrap(); @@ -300,6 +313,8 @@ mod test { runtime.spawn(mock_output_manager_service.run()); assert_eq!(balance, runtime.block_on(oms_handle.get_balance()).unwrap()); + let (connectivity_tx, connectivity_rx) = watch::channel(OnlineStatus::Offline); + let callback_handler = CallbackHandler::new( db, transaction_event_receiver, @@ -308,6 +323,7 @@ mod test { dht_event_receiver, shutdown_signal.to_signal(), PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), + connectivity_rx, received_tx_callback, received_tx_reply_callback, received_tx_finalized_callback, @@ -321,6 +337,7 @@ mod test { balance_updated_callback, transaction_validation_complete_callback, saf_messages_received_callback, + connectivity_status_callback, ); runtime.spawn(callback_handler.start()); @@ -506,6 +523,14 @@ mod test { dht_event_sender .send(Arc::new(DhtEvent::StoreAndForwardMessagesReceived)) .unwrap(); + thread::sleep(Duration::from_secs(2)); + connectivity_tx.send(OnlineStatus::Offline).unwrap(); + thread::sleep(Duration::from_secs(2)); + connectivity_tx.send(OnlineStatus::Connecting).unwrap(); + thread::sleep(Duration::from_secs(2)); + connectivity_tx.send(OnlineStatus::Online).unwrap(); + thread::sleep(Duration::from_secs(2)); + connectivity_tx.send(OnlineStatus::Connecting).unwrap(); thread::sleep(Duration::from_secs(10)); @@ -525,6 +550,7 @@ mod test { assert_eq!(lock.callback_txo_validation_complete, 3); assert_eq!(lock.callback_balance_updated, 5); assert_eq!(lock.callback_transaction_validation_complete, 7); + assert_eq!(lock.connectivity_status_callback_called, 7); drop(lock); } diff --git a/base_layer/wallet_ffi/src/lib.rs b/base_layer/wallet_ffi/src/lib.rs index 91fc6d8e16..5041dd5084 100644 --- a/base_layer/wallet_ffi/src/lib.rs +++ b/base_layer/wallet_ffi/src/lib.rs @@ -139,6 +139,7 @@ use tari_p2p::{ use tari_shutdown::Shutdown; use tari_utilities::{hex, hex::Hex}; use tari_wallet::{ + connectivity_service::WalletConnectivityInterface, contacts_service::storage::database::Contact, error::{WalletError, WalletStorageError}, storage::{ @@ -3260,6 +3261,13 @@ unsafe fn init_logging( /// `callback_saf_message_received` - The callback function pointer that will be called when the Dht has determined that /// is has connected to enough of its neighbours to be confident that it has received any SAF messages that were waiting /// for it. +/// `callback_connectivity_status` - This callback is called when the status of connection to the set base node +/// changes. it will return an enum encoded as an integer as follows: +/// pub enum OnlineStatus { +/// Connecting, // 0 +/// Online, // 1 +/// Offline, // 2 +/// } /// `recovery_in_progress` - Pointer to an bool which will be modified to indicate if there is an outstanding recovery /// that should be completed or not to an error code should one occur, may not be null. Functions as an out parameter. /// `error_out` - Pointer to an int which will be modified @@ -3292,6 +3300,7 @@ pub unsafe extern "C" fn wallet_create( callback_balance_updated: unsafe extern "C" fn(*mut TariBalance), callback_transaction_validation_complete: unsafe extern "C" fn(u64, bool), callback_saf_messages_received: unsafe extern "C" fn(), + callback_connectivity_status: unsafe extern "C" fn(u64), recovery_in_progress: *mut bool, error_out: *mut c_int, ) -> *mut TariWallet { @@ -3483,6 +3492,7 @@ pub unsafe extern "C" fn wallet_create( w.dht_service.subscribe_dht_events(), w.comms.shutdown_signal(), w.comms.node_identity().public_key().clone(), + w.wallet_connectivity.get_connectivity_status_watch(), callback_received_transaction, callback_received_transaction_reply, callback_received_finalized_transaction, @@ -3496,6 +3506,7 @@ pub unsafe extern "C" fn wallet_create( callback_balance_updated, callback_transaction_validation_complete, callback_saf_messages_received, + callback_connectivity_status, ); runtime.spawn(callback_handler.start()); @@ -6199,6 +6210,10 @@ mod test { // assert!(true); //optimized out by compiler } + unsafe extern "C" fn connectivity_status_callback(_status: u64) { + // assert!(true); //optimized out by compiler + } + const NETWORK_STRING: &str = "dibbler"; #[test] @@ -6571,6 +6586,7 @@ mod test { balance_updated_callback, transaction_validation_complete_callback, saf_messages_received_callback, + connectivity_status_callback, recovery_in_progress_ptr, error_ptr, ); @@ -6607,6 +6623,7 @@ mod test { balance_updated_callback, transaction_validation_complete_callback, saf_messages_received_callback, + connectivity_status_callback, recovery_in_progress_ptr, error_ptr, ); @@ -6709,6 +6726,7 @@ mod test { balance_updated_callback, transaction_validation_complete_callback, saf_messages_received_callback, + connectivity_status_callback, recovery_in_progress_ptr, error_ptr, ); @@ -6756,6 +6774,7 @@ mod test { balance_updated_callback, transaction_validation_complete_callback, saf_messages_received_callback, + connectivity_status_callback, recovery_in_progress_ptr, error_ptr, ); @@ -6786,6 +6805,7 @@ mod test { balance_updated_callback, transaction_validation_complete_callback, saf_messages_received_callback, + connectivity_status_callback, recovery_in_progress_ptr, error_ptr, ); @@ -6811,6 +6831,7 @@ mod test { balance_updated_callback, transaction_validation_complete_callback, saf_messages_received_callback, + connectivity_status_callback, recovery_in_progress_ptr, error_ptr, ); @@ -6857,6 +6878,7 @@ mod test { balance_updated_callback, transaction_validation_complete_callback, saf_messages_received_callback, + connectivity_status_callback, recovery_in_progress_ptr, error_ptr, ); @@ -6932,6 +6954,7 @@ mod test { balance_updated_callback, transaction_validation_complete_callback, saf_messages_received_callback, + connectivity_status_callback, recovery_in_progress_ptr, error_ptr, ); @@ -7138,6 +7161,7 @@ mod test { balance_updated_callback, transaction_validation_complete_callback, saf_messages_received_callback, + connectivity_status_callback, recovery_in_progress_ptr, error_ptr, ); @@ -7192,6 +7216,7 @@ mod test { balance_updated_callback, transaction_validation_complete_callback, saf_messages_received_callback, + connectivity_status_callback, recovery_in_progress_ptr, error_ptr, ); diff --git a/base_layer/wallet_ffi/wallet.h b/base_layer/wallet_ffi/wallet.h index 742f3971f8..889aaea741 100644 --- a/base_layer/wallet_ffi/wallet.h +++ b/base_layer/wallet_ffi/wallet.h @@ -478,6 +478,13 @@ struct TariPublicKeys *comms_list_connected_public_keys(struct TariWallet *walle /// `callback_saf_message_received` - The callback function pointer that will be called when the Dht has determined that /// is has connected to enough of its neighbours to be confident that it has received any SAF messages that were waiting /// for it. +/// `callback_connectivity_status` - This callback is called when the status of connection to the set base node changes. +/// it will return an enum encoded as an integer as follows: +/// pub enum OnlineStatus { +/// Connecting, // 0 +/// Online, // 1 +/// Offline, // 2 +/// } /// `recovery_in_progress` - Pointer to an bool which will be modified to indicate if there is an outstanding recovery /// that should be completed or not to an error code should one occur, may not be null. Functions as an out parameter. /// `error_out` - Pointer to an int which will be modified @@ -515,6 +522,7 @@ struct TariWallet *wallet_create(struct TariCommsConfig *config, void (*callback_balance_updated)(struct TariBalance *), void (*callback_transaction_validation_complete)(unsigned long long, bool), void (*callback_saf_message_received)(), + void (*callback_connectivity_status)(unsigned long long), bool *recovery_in_progress, int *error_out); diff --git a/integration_tests/helpers/ffi/ffiInterface.js b/integration_tests/helpers/ffi/ffiInterface.js index 048041480a..2f2f455fa2 100644 --- a/integration_tests/helpers/ffi/ffiInterface.js +++ b/integration_tests/helpers/ffi/ffiInterface.js @@ -291,6 +291,7 @@ class InterfaceFFI { this.ptr, this.ptr, this.ptr, + this.ptr, this.boolPtr, this.intPtr, ], @@ -1165,6 +1166,9 @@ class InterfaceFFI { fn ); } + static createCallbackConnectivityStatus(fn) { + return ffi.Callback(this.void, [this.ulonglong], fn); + } //endregion static walletCreate( @@ -1186,7 +1190,8 @@ class InterfaceFFI { callback_txo_validation_complete, callback_balance_updated, callback_transaction_validation_complete, - callback_saf_message_received + callback_saf_message_received, + callback_connectivity_status ) { let error = this.initError(); let recovery_in_progress = this.initBool(); @@ -1211,6 +1216,7 @@ class InterfaceFFI { callback_balance_updated, callback_transaction_validation_complete, callback_saf_message_received, + callback_connectivity_status, recovery_in_progress, error ); diff --git a/integration_tests/helpers/ffi/wallet.js b/integration_tests/helpers/ffi/wallet.js index d48a4e3cda..d307660dd2 100644 --- a/integration_tests/helpers/ffi/wallet.js +++ b/integration_tests/helpers/ffi/wallet.js @@ -43,6 +43,7 @@ class Wallet { callback_balance_updated; callback_transaction_validation_complete; callback_saf_message_received; + callback_connectivity_status; recoveryProgressCallback; getTxoValidationStatus() { @@ -141,6 +142,10 @@ class Wallet { this.recoveryProgressCallback = InterfaceFFI.createRecoveryProgressCallback( this.onRecoveryProgress ); + this.callback_connectivity_status = + InterfaceFFI.createCallbackConnectivityStatus( + this.onConnectivityStatusChange + ); //endregion this.receivedTransaction = 0; @@ -180,7 +185,8 @@ class Wallet { this.callback_txo_validation_complete, this.callback_balance_updated, this.callback_transaction_validation_complete, - this.callback_saf_message_received + this.callback_saf_message_received, + this.callback_connectivity_status ); } @@ -328,6 +334,10 @@ class Wallet { return InterfaceFFI.walletIsRecoveryInProgress(this.ptr); } + onConnectivityStatusChange = (status) => { + console.log("Connectivity Status Changed to ", status); + }; + getPublicKey() { let ptr = InterfaceFFI.walletGetPublicKey(this.ptr); let pk = new PublicKey(); @@ -463,6 +473,7 @@ class Wallet { this.callback_transaction_validation_complete = this.callback_saf_message_received = this.recoveryProgressCallback = + this.callback_connectivity_status = undefined; // clear callback function pointers } } From 10f6f5130a3a53e647a4b709cc08e04775b08a41 Mon Sep 17 00:00:00 2001 From: Aaron Feickert <66188213+AaronFeickert@users.noreply.github.com> Date: Sun, 6 Feb 2022 13:10:56 -0600 Subject: [PATCH 16/20] docs: readme fixes (#3800) - Fixes dead links. - Updates binary hash instructions. - Fixes a minor typo. --- README.md | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 9fcda7013d..6c1048d5ee 100644 --- a/README.md +++ b/README.md @@ -17,11 +17,11 @@ Only the first four applications will be discussed in this README (see [wallet-a ### Download -[Download binaries](https://tari.com/downloads) from . This is the easiest way to run a Tari node, but you're +[Download binaries](https://tari.com/downloads/) from [tari.com](https://www.tari.com/). This is the easiest way to run a Tari node, but you're essentially trusting the person that built and uploaded them that nothing untoward has happened. -We've limited the risks by publishing hashes of the binaries alongside the binaries on our website. You can check -that the binaries match the hash by opening a terminal (or command prompt) and by running +Hashes of the binaries are available alongside the downloads. +You can get the hash of your download by opening a terminal or command prompt and running the following: (\*nix) @@ -31,6 +31,10 @@ that the binaries match the hash by opening a terminal (or command prompt) and b certUtil -hashfile SHA256 +If the result doesn't match the published hash, don't run the binary. +Note that this only checks that your binary was downloaded correctly; it cannot detect if the binary was replaced by a bad actor. +If you need to ensure that your binary matches the source, see [Building from source](#building-from-source) below. + ### Install After you have downloaded the binaries, you need to install them. This is easy to do, and works as follows: @@ -422,7 +426,7 @@ The Tari Base Node, Tari Console Wallet, Tari Stratum Transcoder and Tari Mining default installation as described in [Installing using binaries](#installing-using-binaries), all these applications will be available. -For MiningCore see [here](https://github.com/tari-project/miningcore/master/tari#runtime-requirements-on-linux) and [here](https://github.com/tari-project/miningcore/tree/master#runtime-requirements-on-windows). +For MiningCore see the [Linux](https://github.com/tari-project/miningcore/#building-on-debianubuntu) and [Windows](https://github.com/tari-project/miningcore/#building-on-windows) build instructions. #### Configuration prerequisites @@ -667,7 +671,7 @@ they are, choose different ports. You will need to update the ports in the steps The `monerod_url` set must contain valid addresses (`host:port`) for `monerod` that is running Monero mainnet (e.g. `["http://18.132.124.81:18081"]`) or stagenet (e.g. `["http://monero-stagenet.exan.tech:38081"]`), which can be a -[public node hosted by XMR.to](https://community.xmr.to/nodes.html), or to a local instance. To test if the +public node or local instance. To test if the `monerod_url` address is working properly, try to paste `host:port/get_height` in an internet browser, for example: ``` @@ -959,7 +963,7 @@ your wallet identity under `Username:` to see your shares, or try `taritest` if ## RFC documents -The RFCs are long-form technical documents proposing changes and features to the Tari network and ecosystem. They are hosted at https://rfc.tari.com, but you can easily build and serve alocal version yourself. +The RFCs are long-form technical documents proposing changes and features to the Tari network and ecosystem. They are hosted at https://rfc.tari.com, but you can easily build and serve a local version yourself. Firstly, install `mdbook`. Assuming you have Rust and cargo installed, run From bbd0e1e54e3eded861b004fd2d4aeba41bc6e423 Mon Sep 17 00:00:00 2001 From: Byron Hambly Date: Mon, 7 Feb 2022 11:33:48 +0200 Subject: [PATCH 17/20] fix: bump flood ban messages config (#3799) Description --- - bump the allowable max message rate to 100_000 per 100 seconds (up from 10_000) - add a short ban duration for max message rate ban (60 mins instead of 6 hours) - update ban message to include the configured values and received rate --- applications/launchpad/backend/assets/config.toml | 4 ++-- applications/launchpad/docker_rig/config.toml | 4 ++-- common/config/presets/base_node.toml | 12 ++++++------ common/src/configuration/utils.rs | 8 ++++---- comms/dht/src/config.rs | 10 +++++++--- comms/dht/src/connectivity/mod.rs | 7 +++++-- 6 files changed, 26 insertions(+), 19 deletions(-) diff --git a/applications/launchpad/backend/assets/config.toml b/applications/launchpad/backend/assets/config.toml index c0219f3e6d..dc360f5ae5 100644 --- a/applications/launchpad/backend/assets/config.toml +++ b/applications/launchpad/backend/assets/config.toml @@ -46,7 +46,7 @@ track_reorgs = true [base_node.dibbler] db_type = "lmdb" -flood_ban_max_msg_count = 10000 +flood_ban_max_msg_count = 100_000 allow_test_addresses = false use_libtor = false base_node_identity_file = "/var/tari/base_node/config/dibbler/tari_base_node_id.json" @@ -65,7 +65,7 @@ grpc_base_node_address = "0.0.0.0:18142" [base_node.igor] db_type = "lmdb" -flood_ban_max_msg_count = 10000 +flood_ban_max_msg_count = 100_000 allow_test_addresses = false use_libtor = false base_node_identity_file = "/var/tari/base_node/config/igor/base_node_id.json" diff --git a/applications/launchpad/docker_rig/config.toml b/applications/launchpad/docker_rig/config.toml index aeca39bbbf..b53f2ec91e 100644 --- a/applications/launchpad/docker_rig/config.toml +++ b/applications/launchpad/docker_rig/config.toml @@ -42,7 +42,7 @@ track_reorgs = true [base_node.dibbler] db_type = "lmdb" -flood_ban_max_msg_count = 10000 +flood_ban_max_msg_count = 100_000 data_dir = "/blockchain/dibbler" force_sync_peers = [] allow_test_addresses = false @@ -64,7 +64,7 @@ console_wallet_tor_identity_file = "config/dibbler/console_wallet_tor.json" [base_node.igor] db_type = "lmdb" -flood_ban_max_msg_count = 10000 +flood_ban_max_msg_count = 100_000 data_dir = "/blockchain/igor" force_sync_peers = [] allow_test_addresses = false diff --git a/common/config/presets/base_node.toml b/common/config/presets/base_node.toml index 4748f37d7d..0efbabd79d 100644 --- a/common/config/presets/base_node.toml +++ b/common/config/presets/base_node.toml @@ -53,9 +53,9 @@ db_type = "lmdb" # is "0", which indicates an archival node without any pruning. #pruning_horizon = 0 -# The amount of messages that will be permitted in the flood ban timespan of 100s (Default dibbler = 1000, -# default mainnet = 10000) -flood_ban_max_msg_count = 10000 +# The amount of messages that will be permitted in the flood ban timespan of 100s (Default dibbler = 100000, +# default mainnet = 100000) +flood_ban_max_msg_count = 100_000 # The relative path to store persistent data data_dir = "dibbler" @@ -223,9 +223,9 @@ db_type = "lmdb" # is "0", which indicates an archival node without any pruning. #pruning_horizon = 0 -# The amount of messages that will be permitted in the flood ban timespan of 100s (Default weatherwax = 1000, -# default mainnet = 10000) -flood_ban_max_msg_count = 10000 +# The amount of messages that will be permitted in the flood ban timespan of 100s (Default igor = 100000, +# default mainnet = 100000) +flood_ban_max_msg_count = 100_000 # The relative path to store persistent data data_dir = "igor" diff --git a/common/src/configuration/utils.rs b/common/src/configuration/utils.rs index c3ba451f18..8d15322bbd 100644 --- a/common/src/configuration/utils.rs +++ b/common/src/configuration/utils.rs @@ -162,7 +162,7 @@ pub fn default_config(bootstrap: &ConfigBootstrap) -> Config { cfg.set_default("base_node.mainnet.grpc_base_node_address", "127.0.0.1:18142") .unwrap(); cfg.set_default("wallet.grpc_address", "127.0.0.1:18143").unwrap(); - cfg.set_default("base_node.mainnet.flood_ban_max_msg_count", 10000) + cfg.set_default("base_node.mainnet.flood_ban_max_msg_count", 100_000) .unwrap(); //---------------------------------- Weatherwax Defaults --------------------------------------------// @@ -175,7 +175,7 @@ pub fn default_config(bootstrap: &ConfigBootstrap) -> Config { cfg.set_default("base_node.weatherwax.pruning_horizon", 0).unwrap(); cfg.set_default("base_node.weatherwax.pruned_mode_cleanup_interval", 50) .unwrap(); - cfg.set_default("base_node.weatherwax.flood_ban_max_msg_count", 10000) + cfg.set_default("base_node.weatherwax.flood_ban_max_msg_count", 100_000) .unwrap(); cfg.set_default("base_node.weatherwax.peer_seeds", Vec::::new()) .unwrap(); @@ -220,7 +220,7 @@ pub fn default_config(bootstrap: &ConfigBootstrap) -> Config { cfg.set_default("base_node.igor.pruning_horizon", 0).unwrap(); cfg.set_default("base_node.igor.pruned_mode_cleanup_interval", 50) .unwrap(); - cfg.set_default("base_node.igor.flood_ban_max_msg_count", 10000) + cfg.set_default("base_node.igor.flood_ban_max_msg_count", 100_000) .unwrap(); cfg.set_default("base_node.igor.grpc_enabled", false).unwrap(); cfg.set_default("base_node.igor.grpc_base_node_address", "127.0.0.1:18142") @@ -236,7 +236,7 @@ pub fn default_config(bootstrap: &ConfigBootstrap) -> Config { } fn set_common_network_defaults(cfg: &mut Config) { - for network in ["mainnet", "weatherwax", "igor", "localnet"] { + for network in ["mainnet", "dibbler", "igor", "localnet"] { let key = format!("base_node.{}.dns_seeds_name_server", network); cfg.set_default(&key, "1.1.1.1:853/cloudflare-dns.com").unwrap(); diff --git a/comms/dht/src/config.rs b/comms/dht/src/config.rs index 8340d7cd13..6993756ba3 100644 --- a/comms/dht/src/config.rs +++ b/comms/dht/src/config.rs @@ -83,11 +83,14 @@ pub struct DhtConfig { /// Length of time to ban a peer if the peer misbehaves at the DHT-level. /// Default: 6 hrs pub ban_duration: Duration, + /// Length of time to ban a peer for a "short" duration. + /// Default: 30 mins + pub ban_duration_short: Duration, /// This allows the use of test addresses in the network. /// Default: false pub allow_test_addresses: bool, - /// The maximum number of messages over `flood_ban_timespan` to allow before banning the peer (for `ban_duration`) - /// Default: 1000 messages + /// The maximum number of messages over `flood_ban_timespan` to allow before banning the peer (for + /// `ban_duration_short`) Default: 100_000 messages pub flood_ban_max_msg_count: usize, /// The timespan over which to calculate the max message rate. /// `flood_ban_max_count / flood_ban_timespan (as seconds) = avg. messages per second over the timespan` @@ -151,8 +154,9 @@ impl Default for DhtConfig { join_cooldown_interval: Duration::from_secs(10 * 60), network_discovery: Default::default(), ban_duration: Duration::from_secs(6 * 60 * 60), + ban_duration_short: Duration::from_secs(60 * 60), allow_test_addresses: false, - flood_ban_max_msg_count: 10000, + flood_ban_max_msg_count: 100_000, flood_ban_timespan: Duration::from_secs(100), offline_peer_cooldown: Duration::from_secs(2 * 60 * 60), } diff --git a/comms/dht/src/connectivity/mod.rs b/comms/dht/src/connectivity/mod.rs index a3d8042521..4afc7dab30 100644 --- a/comms/dht/src/connectivity/mod.rs +++ b/comms/dht/src/connectivity/mod.rs @@ -266,8 +266,11 @@ impl DhtConnectivity { self.connectivity .ban_peer_until( peer, - self.config.ban_duration, - "Exceeded maximum message rate".to_string(), + self.config.ban_duration_short, + format!( + "Exceeded maximum message rate. Config: {}/{:#?}. Rate: {:.2} m/s", + self.config.flood_ban_max_msg_count, self.config.flood_ban_timespan, mps + ), ) .await?; } From 86de08baa5e7648f68efcbec150d7b8652437ca9 Mon Sep 17 00:00:00 2001 From: Martin Stefcek <35243812+Cifko@users.noreply.github.com> Date: Mon, 7 Feb 2022 17:43:28 +0100 Subject: [PATCH 18/20] feat: read asset definitions from base layer (#3802) Description --- Load assets from base layer. And periodically check for new assets. How Has This Been Tested? --- Manually. --- applications/tari_app_grpc/proto/wallet.proto | 6 + .../src-tauri/src/clients/wallet_client.rs | 15 ++ .../src/commands/asset_wallets/mod.rs | 10 ++ .../tari_collectibles/src-tauri/src/main.rs | 1 + .../tari_collectibles/web-app/src/Create.js | 5 + .../tari_collectibles/web-app/src/binding.js | 5 + .../src/grpc/wallet_grpc_server.rs | 20 +++ .../tari_validator_node/src/dan_node.rs | 141 ++++++++---------- .../src/grpc/services/base_node_client.rs | 46 +++++- common/config/presets/validator_node.toml | 7 + .../configuration/validator_node_config.rs | 3 + .../core/src/services/base_node_client.rs | 7 +- dan_layer/core/src/services/mocks/mod.rs | 7 + 13 files changed, 190 insertions(+), 83 deletions(-) diff --git a/applications/tari_app_grpc/proto/wallet.proto b/applications/tari_app_grpc/proto/wallet.proto index 6c051b5cad..3ba81bbf0f 100644 --- a/applications/tari_app_grpc/proto/wallet.proto +++ b/applications/tari_app_grpc/proto/wallet.proto @@ -44,6 +44,8 @@ service Wallet { rpc GetCompletedTransactions (GetCompletedTransactionsRequest) returns (stream GetCompletedTransactionsResponse); // Returns the balance rpc GetBalance (GetBalanceRequest) returns (GetBalanceResponse); + // Returns unspent amounts + rpc GetUnspentAmounts (Empty) returns (GetUnspentAmountsResponse); // Request the wallet perform a coinsplit rpc CoinSplit (CoinSplitRequest) returns (CoinSplitResponse); // Import Utxo to wallet @@ -206,6 +208,10 @@ message GetBalanceResponse { uint64 pending_outgoing_balance = 3; } +message GetUnspentAmountsResponse { + repeated uint64 amount = 1; +} + message GetCoinbaseRequest { uint64 reward = 1; uint64 fee = 2; diff --git a/applications/tari_collectibles/src-tauri/src/clients/wallet_client.rs b/applications/tari_collectibles/src-tauri/src/clients/wallet_client.rs index 4b416c67ed..6e50e615b6 100644 --- a/applications/tari_collectibles/src-tauri/src/clients/wallet_client.rs +++ b/applications/tari_collectibles/src-tauri/src/clients/wallet_client.rs @@ -123,4 +123,19 @@ impl WalletClient { debug!(target: LOG_TARGET, "result {:?}", result); Ok(result.into_inner()) } + + pub async fn get_unspent_amounts( + &mut self, + ) -> Result { + let inner = self.inner.as_mut().unwrap(); + let request = grpc::Empty {}; + let result = inner.get_unspent_amounts(request).await.map_err(|source| { + CollectiblesError::ClientRequestError { + request: "get_unspent_amounts".to_string(), + source, + } + })?; + debug!(target: LOG_TARGET, "result {:?}", result); + Ok(result.into_inner()) + } } diff --git a/applications/tari_collectibles/src-tauri/src/commands/asset_wallets/mod.rs b/applications/tari_collectibles/src-tauri/src/commands/asset_wallets/mod.rs index a9537bb58a..0963975423 100644 --- a/applications/tari_collectibles/src-tauri/src/commands/asset_wallets/mod.rs +++ b/applications/tari_collectibles/src-tauri/src/commands/asset_wallets/mod.rs @@ -175,6 +175,16 @@ pub(crate) async fn asset_wallets_get_balance( Ok(total) } +#[tauri::command] +pub(crate) async fn asset_wallets_get_unspent_amounts( + state: tauri::State<'_, ConcurrentAppState>, +) -> Result, Status> { + let mut client = state.create_wallet_client().await; + client.connect().await?; + let result = client.get_unspent_amounts().await?; + Ok(result.amount) +} + #[tauri::command] pub(crate) async fn asset_wallets_list( state: tauri::State<'_, ConcurrentAppState>, diff --git a/applications/tari_collectibles/src-tauri/src/main.rs b/applications/tari_collectibles/src-tauri/src/main.rs index 1e080a08d5..0254de8b01 100644 --- a/applications/tari_collectibles/src-tauri/src/main.rs +++ b/applications/tari_collectibles/src-tauri/src/main.rs @@ -45,6 +45,7 @@ fn main() -> Result<(), Box> { commands::asset_wallets::asset_wallets_create, commands::asset_wallets::asset_wallets_list, commands::asset_wallets::asset_wallets_get_balance, + commands::asset_wallets::asset_wallets_get_unspent_amounts, commands::asset_wallets::asset_wallets_get_latest_address, commands::asset_wallets::asset_wallets_create_address, commands::asset_wallets::asset_wallets_send_to, diff --git a/applications/tari_collectibles/web-app/src/Create.js b/applications/tari_collectibles/web-app/src/Create.js index de95adf2da..93c5a496d3 100644 --- a/applications/tari_collectibles/web-app/src/Create.js +++ b/applications/tari_collectibles/web-app/src/Create.js @@ -156,6 +156,11 @@ class Create extends React.Component { templateIds.push(721); } + let outputs = await binding.command_asset_wallets_get_unspent_amounts(); + + if (outputs.length <= 1) { + throw { message: "You need at least two unspent outputs" }; + } let publicKey = await binding.command_assets_create( name, description, diff --git a/applications/tari_collectibles/web-app/src/binding.js b/applications/tari_collectibles/web-app/src/binding.js index e45b3fc584..b774099e8a 100644 --- a/applications/tari_collectibles/web-app/src/binding.js +++ b/applications/tari_collectibles/web-app/src/binding.js @@ -137,6 +137,10 @@ async function command_asset_wallets_get_balance(assetPublicKey) { return await invoke("asset_wallets_get_balance", { assetPublicKey }); } +async function command_asset_wallets_get_unspent_amounts() { + return await invoke("asset_wallets_get_unspent_amounts", {}); +} + const commands = { command_create_db, command_assets_create, @@ -147,6 +151,7 @@ const commands = { command_next_asset_public_key, command_asset_wallets_create, command_asset_wallets_get_balance, + command_asset_wallets_get_unspent_amounts, command_asset_wallets_list, command_asset_wallets_get_latest_address, command_asset_wallets_create_address, diff --git a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs index 4cc1bf39f3..d394bcb338 100644 --- a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs +++ b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs @@ -51,6 +51,7 @@ use tari_app_grpc::{ GetOwnedAssetsResponse, GetTransactionInfoRequest, GetTransactionInfoResponse, + GetUnspentAmountsResponse, GetVersionRequest, GetVersionResponse, ImportUtxosRequest, @@ -163,6 +164,25 @@ impl wallet_server::Wallet for WalletGrpcServer { })) } + async fn get_unspent_amounts( + &self, + _: Request, + ) -> Result, Status> { + let mut output_service = self.get_output_manager_service(); + let unspent_amounts; + match output_service.get_unspent_outputs().await { + Ok(uo) => unspent_amounts = uo, + Err(e) => return Err(Status::not_found(format!("GetUnspentAmounts error! {}", e))), + } + Ok(Response::new(GetUnspentAmountsResponse { + amount: unspent_amounts + .into_iter() + .map(|o| o.value.as_u64()) + .filter(|&a| a > 0) + .collect(), + })) + } + async fn revalidate_all_transactions( &self, _request: Request, diff --git a/applications/tari_validator_node/src/dan_node.rs b/applications/tari_validator_node/src/dan_node.rs index bc737d6883..1ab749cefc 100644 --- a/applications/tari_validator_node/src/dan_node.rs +++ b/applications/tari_validator_node/src/dan_node.rs @@ -20,10 +20,9 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{fs, fs::File, io::BufReader, path::Path, sync::Arc, time::Duration}; +use std::{collections::HashMap, sync::Arc, time::Duration}; -use futures::future::try_join_all; -use log::*; +use log::info; use tari_common::{configuration::ValidatorNodeConfig, GlobalConfig}; use tari_comms::{types::CommsPublicKey, NodeIdentity}; use tari_comms_dht::Dht; @@ -31,6 +30,7 @@ use tari_crypto::tari_utilities::hex::Hex; use tari_dan_core::{ models::{AssetDefinition, Committee}, services::{ + BaseNodeClient, ConcreteAssetProcessor, ConcreteCheckpointManager, ConcreteCommitteeManager, @@ -46,7 +46,7 @@ use tari_dan_storage_sqlite::{SqliteDbFactory, SqliteStorageService}; use tari_p2p::{comms_connector::SubscriptionFactory, tari_message::TariMessageType}; use tari_service_framework::ServiceHandles; use tari_shutdown::ShutdownSignal; -use tokio::task; +use tokio::{task, time}; use crate::{ default_service_specification::DefaultServiceSpecification, @@ -58,7 +58,7 @@ use crate::{ ExitCodes, }; -const LOG_TARGET: &str = "tari::dan::dan_node"; +const LOG_TARGET: &str = "tari::validator_node::app"; pub struct DanNode { config: GlobalConfig, @@ -84,84 +84,63 @@ impl DanNode { .as_ref() .ok_or_else(|| ExitCodes::ConfigError("Missing dan section".to_string()))?; - let asset_definitions = self.read_asset_definitions(&dan_config.asset_config_directory)?; - if asset_definitions.is_empty() { - warn!( - target: LOG_TARGET, - "No assets to process. Add assets by putting definitions in the `assets` folder with a `.asset` \ - extension." - ); - } - - let mut tasks = vec![]; - for asset in asset_definitions { - let node_identitiy = node_identity.as_ref().clone(); - let mempool = mempool_service.clone(); - let handles = handles.clone(); - let subscription_factory = subscription_factory.clone(); - let shutdown = shutdown.clone(); - let dan_config = dan_config.clone(); - let db_factory = db_factory.clone(); - - tasks.push(task::spawn(async move { - DanNode::start_asset_worker( - asset, - node_identitiy, - mempool, - handles, - subscription_factory, - shutdown, - dan_config, - db_factory, - ) - .await - })); - } - - if tasks.is_empty() { - // If there are no assets to process, work in proxy mode - tasks.push(task::spawn(DanNode::wait_for_exit())); - } - try_join_all(tasks) - .await - .map_err(|err| ExitCodes::UnknownError(format!("Join error occurred. {}", err)))? - .into_iter() - .collect::>()?; - - Ok(()) - } - - fn read_asset_definitions(&self, path: &Path) -> Result, ExitCodes> { - if !path.exists() { - fs::create_dir_all(path).expect("Could not create dir"); - } - let paths = fs::read_dir(path).expect("Could not read asset definitions"); - - let mut result = vec![]; - for path in paths { - let path = path.expect("Not a valid file").path(); - - if !path.is_dir() && path.extension().unwrap_or_default() == "asset" { - let file = File::open(path).expect("could not open file"); - let reader = BufReader::new(file); - - let def: AssetDefinition = serde_json::from_reader(reader).expect("lol not a valid json"); - result.push(def); - } - } - Ok(result) - } - - async fn wait_for_exit() -> Result<(), ExitCodes> { - println!("Type `exit` to exit"); + let mut base_node_client = GrpcBaseNodeClient::new(dan_config.base_node_grpc_address); + let mut tasks = HashMap::new(); + let mut next_scanned_height = 0u64; loop { - let mut line = String::new(); - let _ = std::io::stdin().read_line(&mut line).expect("Failed to read line"); - if line.to_lowercase().trim() == "exit" { - return Err(ExitCodes::UnknownError("User cancelled".to_string())); - } else { - println!("Type `exit` to exit"); + let tip = base_node_client.get_tip_info().await.unwrap(); + if tip.height_of_longest_chain >= next_scanned_height { + info!( + target: LOG_TARGET, + "Scanning base layer (tip : {}) for new assets", tip.height_of_longest_chain + ); + if dan_config.scan_for_assets { + next_scanned_height = tip.height_of_longest_chain + dan_config.new_asset_scanning_interval; + info!(target: LOG_TARGET, "Next scanning height {}", next_scanned_height); + } else { + next_scanned_height = u64::MAX; // Never run again. + } + + let assets = base_node_client + .get_assets_for_dan_node(node_identity.public_key().clone()) + .await + .unwrap(); + for asset in assets { + if tasks.contains_key(&asset.public_key) { + continue; + } + if let Some(allow_list) = &dan_config.assets_allow_list { + if !allow_list.contains(&asset.public_key.to_hex()) { + continue; + } + } + info!(target: LOG_TARGET, "Adding asset {:?}", asset.public_key); + let node_identitiy = node_identity.as_ref().clone(); + let mempool = mempool_service.clone(); + let handles = handles.clone(); + let subscription_factory = subscription_factory.clone(); + let shutdown = shutdown.clone(); + let dan_config = dan_config.clone(); + let db_factory = db_factory.clone(); + tasks.insert( + asset.public_key.clone(), + task::spawn(async move { + DanNode::start_asset_worker( + asset.clone(), + node_identitiy, + mempool, + handles, + subscription_factory, + shutdown, + dan_config, + db_factory, + ) + .await + }), + ); + } } + time::sleep(Duration::from_secs(120)).await; } } diff --git a/applications/tari_validator_node/src/grpc/services/base_node_client.rs b/applications/tari_validator_node/src/grpc/services/base_node_client.rs index 8654288a24..236627f353 100644 --- a/applications/tari_validator_node/src/grpc/services/base_node_client.rs +++ b/applications/tari_validator_node/src/grpc/services/base_node_client.rs @@ -27,7 +27,7 @@ use tari_app_grpc::tari_rpc as grpc; use tari_common_types::types::PublicKey; use tari_crypto::tari_utilities::ByteArray; use tari_dan_core::{ - models::{BaseLayerMetadata, BaseLayerOutput}, + models::{AssetDefinition, BaseLayerMetadata, BaseLayerOutput}, services::BaseNodeClient, DigitalAssetError, }; @@ -100,4 +100,48 @@ impl BaseNodeClient for GrpcBaseNodeClient { .transpose()?; Ok(output) } + + async fn get_assets_for_dan_node( + &mut self, + dan_node_public_key: PublicKey, + ) -> Result, DigitalAssetError> { + let inner = match self.inner.as_mut() { + Some(i) => i, + None => { + self.connect().await?; + self.inner.as_mut().unwrap() + }, + }; + let request = grpc::ListAssetRegistrationsRequest { offset: 0, count: 0 }; + let mut result = inner.list_asset_registrations(request).await.unwrap().into_inner(); + let mut assets: Vec = vec![]; + let tip = self.get_tip_info().await?; + while let Some(r) = result.message().await.unwrap() { + if let Ok(asset_public_key) = PublicKey::from_bytes(r.unique_id.as_bytes()) { + if let Some(checkpoint) = self + .get_current_checkpoint(tip.height_of_longest_chain, asset_public_key.clone(), vec![3u8; 32]) + .await? + { + if let Some(committee) = checkpoint.get_side_chain_committee() { + if committee.contains(&dan_node_public_key) { + assets.push(AssetDefinition { + public_key: asset_public_key, + template_parameters: r + .features + .unwrap() + .asset + .unwrap() + .template_parameters + .into_iter() + .map(|tp| tp.into()) + .collect(), + ..Default::default() + }); + } + } + } + } + } + Ok(assets) + } } diff --git a/common/config/presets/validator_node.toml b/common/config/presets/validator_node.toml index f6af4517a9..735cd0def8 100644 --- a/common/config/presets/validator_node.toml +++ b/common/config/presets/validator_node.toml @@ -9,3 +9,10 @@ committee = ["2ea0df3059caf4411624d6bf5b9c02238d607d2798c586b3e6c2a054da3f205a"] # cannot be of zero size phase_timeout = 30 template_id = "EditableMetadata" + +# If set to false, there will be no scanning at all. +scan_for_assets = true +# How often do we want to scan the base layer for changes. +new_asset_scanning_interval = 10 +# If set then only the specific assets will be checked. +# assets_allow_list = [""] diff --git a/common/src/configuration/validator_node_config.rs b/common/src/configuration/validator_node_config.rs index 243b42aa16..79c9eba215 100644 --- a/common/src/configuration/validator_node_config.rs +++ b/common/src/configuration/validator_node_config.rs @@ -41,6 +41,9 @@ pub struct ValidatorNodeConfig { pub base_node_grpc_address: SocketAddr, #[serde(default = "default_wallet_grpc_address")] pub wallet_grpc_address: SocketAddr, + pub scan_for_assets: bool, + pub new_asset_scanning_interval: u64, + pub assets_allow_list: Option>, } fn default_asset_config_directory() -> PathBuf { diff --git a/dan_layer/core/src/services/base_node_client.rs b/dan_layer/core/src/services/base_node_client.rs index 99cacf2a4b..ae77144a8d 100644 --- a/dan_layer/core/src/services/base_node_client.rs +++ b/dan_layer/core/src/services/base_node_client.rs @@ -25,7 +25,7 @@ use tari_common_types::types::PublicKey; use crate::{ digital_assets_error::DigitalAssetError, - models::{BaseLayerMetadata, BaseLayerOutput}, + models::{AssetDefinition, BaseLayerMetadata, BaseLayerOutput}, }; #[async_trait] @@ -38,4 +38,9 @@ pub trait BaseNodeClient { asset_public_key: PublicKey, checkpoint_unique_id: Vec, ) -> Result, DigitalAssetError>; + + async fn get_assets_for_dan_node( + &mut self, + dan_node_public_key: PublicKey, + ) -> Result, DigitalAssetError>; } diff --git a/dan_layer/core/src/services/mocks/mod.rs b/dan_layer/core/src/services/mocks/mod.rs index 67e520f52e..b5989823bd 100644 --- a/dan_layer/core/src/services/mocks/mod.rs +++ b/dan_layer/core/src/services/mocks/mod.rs @@ -197,6 +197,13 @@ impl BaseNodeClient for MockBaseNodeClient { ) -> Result, DigitalAssetError> { todo!(); } + + async fn get_assets_for_dan_node( + &mut self, + _dan_node_public_key: PublicKey, + ) -> Result, DigitalAssetError> { + todo!(); + } } pub fn mock_base_node_client() -> MockBaseNodeClient { From e6f6423cc77782487d6e3cd10cdbe441a7908198 Mon Sep 17 00:00:00 2001 From: "C.Lee Taylor" <47312074+leet4tari@users.noreply.github.com> Date: Mon, 7 Feb 2022 18:44:48 +0200 Subject: [PATCH 19/20] ci: move Apple IDs into envs (#3797) Description Simple move Text into envs. Motivation and Context Future forks would not have tari ID's hard coded into GHA --- .github/workflows/base_node_binaries.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/base_node_binaries.yml b/.github/workflows/base_node_binaries.yml index 805dd66d67..aec488f199 100644 --- a/.github/workflows/base_node_binaries.yml +++ b/.github/workflows/base_node_binaries.yml @@ -189,8 +189,10 @@ jobs: if: startsWith(runner.os,'macOS') env: MACOS_KEYCHAIN_PASS: ${{ secrets.MACOS_KEYCHAIN_PASS }} + MACOS_APPLICATION_ID: ${{ secrets.MACOS_APPLICATION_ID }} MACOS_APPLICATION_CERT: ${{ secrets.MACOS_APPLICATION_CERT }} MACOS_APPLICATION_PASS: ${{ secrets.MACOS_APPLICATION_PASS }} + MACOS_INSTALLER_ID: ${{ secrets.MACOS_INSTALLER_ID }} MACOS_INSTALLER_CERT: ${{ secrets.MACOS_INSTALLER_CERT }} MACOS_INSTALLER_PASS: ${{ secrets.MACOS_INSTALLER_PASS }} run: | @@ -206,7 +208,7 @@ jobs: ./create_osx_install_zip.sh unused nozip FILES=("tari_base_node" "tari_console_wallet" "tari_mining_node" "tari_merge_mining_proxy") for FILE in "${FILES[@]}"; do - codesign --force -s "Developer ID Application: Tari Labs, LLC (8XGMD9X2H2)" "/tmp/tari_testnet/runtime/$FILE" -v + codesign --force -s "Developer ID Application: $MACOS_APPLICATION_ID" "/tmp/tari_testnet/runtime/$FILE" -v codesign --verify --deep --display --verbose=4 "/tmp/tari_testnet/runtime/$FILE" done pkgbuild --root /tmp/tari_testnet \ @@ -214,7 +216,7 @@ jobs: --version "$VERSION" \ --install-location "/tmp/tari" \ --scripts "/tmp/tari_testnet/scripts" \ - --sign "Developer ID Installer: Tari Labs, LLC (8XGMD9X2H2)" \ + --sign "Developer ID Installer: $MACOS_INSTALLER_ID" \ "${{ github.workspace }}${{ env.TBN_DIST }}/tari-${{ env.VERSION }}.pkg" - name: Artifact macos pkg if: startsWith(runner.os,'macOS') From 0754ac5b1a416f4a94727b58aabb03e939270fbf Mon Sep 17 00:00:00 2001 From: Mike the Tike Date: Mon, 7 Feb 2022 19:32:19 +0200 Subject: [PATCH 20/20] refactor: reduce log level of some messages (#3804) Co-authored-by: Byron Hambly --- .../tasks/txo_validation_task.rs | 14 +++++++------- .../transaction_validation_protocol.rs | 18 +++++++++--------- comms/src/connection_manager/common.rs | 9 +-------- comms/src/connection_manager/dialer.rs | 1 - comms/src/connection_manager/listener.rs | 1 - comms/src/protocol/identity.rs | 9 --------- comms/src/protocol/rpc/client/mod.rs | 4 ++-- comms/src/protocol/rpc/handshake.rs | 4 ++-- 8 files changed, 21 insertions(+), 39 deletions(-) diff --git a/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs b/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs index f4938eeca6..a30420fdef 100644 --- a/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs +++ b/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs @@ -84,7 +84,7 @@ where .ok_or(OutputManagerError::Shutdown) .for_protocol(self.operation_id)?; - info!( + debug!( target: LOG_TARGET, "Starting TXO validation protocol (Id: {})", self.operation_id, ); @@ -96,7 +96,7 @@ where self.update_spent_outputs(&mut base_node_client, last_mined_header) .await?; self.publish_event(OutputManagerEvent::TxoValidationSuccess(self.operation_id)); - info!( + debug!( target: LOG_TARGET, "Finished TXO validation protocol (Id: {})", self.operation_id, ); @@ -227,7 +227,7 @@ where .for_protocol(self.operation_id)?; for batch in unconfirmed_outputs.chunks(self.config.tx_validator_batch_size) { - info!( + debug!( target: LOG_TARGET, "Asking base node for location of {} unconfirmed outputs by hash (Operation ID: {})", batch.len(), @@ -268,7 +268,7 @@ where client: &mut BaseNodeWalletRpcClient, ) -> Result, OutputManagerProtocolError> { let mut last_mined_header_hash = None; - info!( + debug!( target: LOG_TARGET, "Checking last mined TXO to see if the base node has re-orged (Operation ID: {})", self.operation_id ); @@ -306,7 +306,7 @@ where .await .for_protocol(self.operation_id)?; } else { - info!( + debug!( target: LOG_TARGET, "Last mined transaction is still in the block chain according to base node. (Operation ID: {})", self.operation_id @@ -344,7 +344,7 @@ where .await .for_protocol(self.operation_id)?; } else { - info!( + debug!( target: LOG_TARGET, "Last mined transaction is still in the block chain according to base node (Operation ID: {}).", self.operation_id @@ -366,7 +366,7 @@ where let result = match client.get_header_by_height(height).await { Ok(r) => r, Err(rpc_error) => { - info!( + warn!( target: LOG_TARGET, "Error asking base node for header:{} (Operation ID: {})", rpc_error, self.operation_id ); diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs index f346cfb87a..b348c29758 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs @@ -104,7 +104,7 @@ where .for_protocol(self.operation_id.as_u64())?; self.check_for_reorgs(&mut *base_node_wallet_client).await?; - info!( + debug!( target: LOG_TARGET, "Checking if transactions have been mined since last we checked (Operation ID: {})", self.operation_id ); @@ -122,7 +122,7 @@ where .query_base_node_for_transactions(batch, &mut *base_node_wallet_client) .await .for_protocol(self.operation_id.as_u64())?; - info!( + debug!( target: LOG_TARGET, "Base node returned {} as mined and {} as unmined (Operation ID: {})", mined.len(), @@ -130,7 +130,7 @@ where self.operation_id ); for (mined_tx, mined_height, mined_in_block, num_confirmations) in &mined { - info!( + debug!( target: LOG_TARGET, "Updating transaction {} as mined and confirmed '{}' (Operation ID: {})", mined_tx.tx_id, @@ -152,7 +152,7 @@ where // Treat coinbases separately if unmined_tx.is_coinbase() { if unmined_tx.coinbase_block_height.unwrap_or_default() <= tip_height { - info!( + debug!( target: LOG_TARGET, "Updated coinbase {} as abandoned (Operation ID: {})", unmined_tx.tx_id, @@ -167,7 +167,7 @@ where .await?; state_changed = true; } else { - info!( + debug!( target: LOG_TARGET, "Coinbase not found, but it is for a block that is not yet in the chain. Coinbase \ height: {}, tip height:{} (Operation ID: {})", @@ -177,7 +177,7 @@ where ); } } else { - info!( + debug!( target: LOG_TARGET, "Updated transaction {} as unmined (Operation ID: {})", unmined_tx.tx_id, self.operation_id ); @@ -208,7 +208,7 @@ where &mut self, client: &mut BaseNodeWalletRpcClient, ) -> Result<(), TransactionServiceProtocolError> { - info!( + debug!( target: LOG_TARGET, "Checking last mined transactions to see if the base node has re-orged (Operation ID: {})", self.operation_id @@ -259,7 +259,7 @@ where .await?; self.publish_event(TransactionEvent::TransactionValidationStateChanged(op_id)); } else { - info!( + debug!( target: LOG_TARGET, "Last mined transaction is still in the block chain according to base node (Operation ID: {}).", self.operation_id @@ -294,7 +294,7 @@ where } if batch_signatures.is_empty() { - info!( + debug!( target: LOG_TARGET, "No transactions needed to query with the base node (Operation ID: {})", self.operation_id ); diff --git a/comms/src/connection_manager/common.rs b/comms/src/connection_manager/common.rs index f7caef7203..44e19e42ed 100644 --- a/comms/src/connection_manager/common.rs +++ b/comms/src/connection_manager/common.rs @@ -25,7 +25,6 @@ use std::{convert::TryFrom, net::Ipv6Addr}; use log::*; use tokio::io::{AsyncRead, AsyncWrite}; -use super::types::ConnectionDirection; use crate::{ connection_manager::error::ConnectionManagerError, multiaddr::{Multiaddr, Protocol}, @@ -50,17 +49,11 @@ pub async fn perform_identity_exchange< >( socket: &mut TSocket, node_identity: &NodeIdentity, - direction: ConnectionDirection, our_supported_protocols: P, network_info: NodeNetworkInfo, ) -> Result { - debug!( - target: LOG_TARGET, - "{} socket opened to peer. Performing identity exchange.", direction - ); - let peer_identity = - protocol::identity_exchange(node_identity, direction, our_supported_protocols, network_info, socket).await?; + protocol::identity_exchange(node_identity, our_supported_protocols, network_info, socket).await?; Ok(peer_identity) } diff --git a/comms/src/connection_manager/dialer.rs b/comms/src/connection_manager/dialer.rs index c0f6487f5e..08b1bc9c45 100644 --- a/comms/src/connection_manager/dialer.rs +++ b/comms/src/connection_manager/dialer.rs @@ -389,7 +389,6 @@ where let peer_identity = common::perform_identity_exchange( &mut socket, &node_identity, - CONNECTION_DIRECTION, &our_supported_protocols, config.network_info.clone(), ) diff --git a/comms/src/connection_manager/listener.rs b/comms/src/connection_manager/listener.rs index 3f50376ba0..fb2221ed08 100644 --- a/comms/src/connection_manager/listener.rs +++ b/comms/src/connection_manager/listener.rs @@ -381,7 +381,6 @@ where let peer_identity = common::perform_identity_exchange( &mut noise_socket, &node_identity, - CONNECTION_DIRECTION, &our_supported_protocols, config.network_info.clone(), ) diff --git a/comms/src/protocol/identity.rs b/comms/src/protocol/identity.rs index 3c96b36c06..7cb1797ec9 100644 --- a/comms/src/protocol/identity.rs +++ b/comms/src/protocol/identity.rs @@ -29,10 +29,8 @@ use tokio::{ io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, time, }; -use tracing; use crate::{ - connection_manager::ConnectionDirection, message::MessageExt, peer_manager::NodeIdentity, proto::identity::PeerIdentityMsg, @@ -43,10 +41,8 @@ const LOG_TARGET: &str = "comms::protocol::identity"; const MAX_IDENTITY_PROTOCOL_MSG_SIZE: u16 = 1024; -#[tracing::instrument(skip(socket, our_supported_protocols))] pub async fn identity_exchange<'p, TSocket, P>( node_identity: &NodeIdentity, - direction: ConnectionDirection, our_supported_protocols: P, network_info: NodeNetworkInfo, socket: &mut TSocket, @@ -186,7 +182,6 @@ mod test { use futures::{future, StreamExt}; use crate::{ - connection_manager::ConnectionDirection, peer_manager::PeerFeatures, protocol::{IdentityProtocolError, NodeNetworkInfo}, runtime, @@ -211,7 +206,6 @@ mod test { let (result1, result2) = future::join( super::identity_exchange( &node_identity1, - ConnectionDirection::Inbound, &[], NodeNetworkInfo { minor_version: 1, @@ -221,7 +215,6 @@ mod test { ), super::identity_exchange( &node_identity2, - ConnectionDirection::Outbound, &[], NodeNetworkInfo { minor_version: 2, @@ -260,7 +253,6 @@ mod test { let (result1, result2) = future::join( super::identity_exchange( &node_identity1, - ConnectionDirection::Inbound, &[], NodeNetworkInfo { major_version: 0, @@ -270,7 +262,6 @@ mod test { ), super::identity_exchange( &node_identity2, - ConnectionDirection::Outbound, &[], NodeNetworkInfo { major_version: 1, diff --git a/comms/src/protocol/rpc/client/mod.rs b/comms/src/protocol/rpc/client/mod.rs index f62e8b9597..cba5a9b1a1 100644 --- a/comms/src/protocol/rpc/client/mod.rs +++ b/comms/src/protocol/rpc/client/mod.rs @@ -440,7 +440,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + StreamId self.framed.stream_id() } - #[tracing::instrument(name = "rpc_client_worker run", skip(self), fields(next_request_id = self.next_request_id))] + #[tracing::instrument(level="trace", name = "rpc_client_worker run", skip(self), fields(next_request_id = self.next_request_id))] async fn run(mut self) { debug!( target: LOG_TARGET, @@ -590,7 +590,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + StreamId Ok(()) } - #[tracing::instrument(name = "rpc_do_request_response", skip(self, reply, request), fields(request_method = ?request.method, request_body_size = request.message.len()))] + #[tracing::instrument(level="trace", name = "rpc_do_request_response", skip(self, reply, request), fields(request_method = ?request.method, request_body_size = request.message.len()))] async fn do_request_response( &mut self, request: BaseRequest, diff --git a/comms/src/protocol/rpc/handshake.rs b/comms/src/protocol/rpc/handshake.rs index e3f558536f..b131214ef3 100644 --- a/comms/src/protocol/rpc/handshake.rs +++ b/comms/src/protocol/rpc/handshake.rs @@ -78,7 +78,7 @@ where T: AsyncRead + AsyncWrite + Unpin } /// Server-side handshake protocol - #[tracing::instrument(name = "rpc::server::perform_server_handshake", skip(self), err, fields(comms.direction="inbound"))] + #[tracing::instrument(level="trace", name = "rpc::server::perform_server_handshake", skip(self), err, fields(comms.direction="inbound"))] pub async fn perform_server_handshake(&mut self) -> Result { match self.recv_next_frame().await { Ok(Some(Ok(msg))) => { @@ -88,7 +88,7 @@ where T: AsyncRead + AsyncWrite + Unpin .iter() .find(|v| msg.supported_versions.contains(v)); if let Some(version) = version { - event!(Level::INFO, version = version, "Server accepted version"); + event!(Level::DEBUG, version = version, "Server accepted version"); debug!(target: LOG_TARGET, "Server accepted version: {}", version); let reply = proto::rpc::RpcSessionReply { session_result: Some(proto::rpc::rpc_session_reply::SessionResult::AcceptedVersion(*version)),