diff --git a/.circleci/config.yml b/.circleci/config.yml index 41bbab02f5b..3161fe743a9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -31,6 +31,9 @@ commands: - run: name: Build miner command: cargo build --release --bin tari_miner + - run: + name: Build wallet FFI + command: cargo build --release --package tari_wallet_ffi - run: name: Run cucumber scenarios no_output_timeout: 20m diff --git a/.deepsource.toml b/.deepsource.toml new file mode 100644 index 00000000000..7219beb3ba7 --- /dev/null +++ b/.deepsource.toml @@ -0,0 +1,10 @@ +version = 1 + + +[[analyzers]] +name = "rust" +enabled = true + + [analyzers.meta] + msrv = "stable" + diff --git a/Cargo.lock b/Cargo.lock index 85179a2a518..ebdb2384a18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4594,7 +4594,7 @@ dependencies = [ [[package]] name = "tari_app_grpc" -version = "0.38.5" +version = "0.38.6" dependencies = [ "argon2 0.4.1", "base64 0.13.0", @@ -4619,7 +4619,7 @@ dependencies = [ [[package]] name = "tari_app_utilities" -version = "0.38.5" +version = "0.38.6" dependencies = [ "clap 3.2.22", "config", @@ -4641,7 +4641,7 @@ dependencies = [ [[package]] name = "tari_base_node" -version = "0.38.5" +version = "0.38.6" dependencies = [ "anyhow", "async-trait", @@ -4742,7 +4742,7 @@ dependencies = [ [[package]] name = "tari_common" -version = "0.38.5" +version = "0.38.6" dependencies = [ "anyhow", "blake2 0.9.2", @@ -4770,7 +4770,7 @@ dependencies = [ [[package]] name = "tari_common_sqlite" -version = "0.38.5" +version = "0.38.6" dependencies = [ "diesel", "log", @@ -4779,7 +4779,7 @@ dependencies = [ [[package]] name = "tari_common_types" -version = "0.38.5" +version = "0.38.6" dependencies = [ "base64 0.13.0", "digest 0.9.0", @@ -4795,7 +4795,7 @@ dependencies = [ [[package]] name = "tari_comms" -version = "0.38.5" +version = "0.38.6" dependencies = [ "anyhow", "async-trait", @@ -4845,7 +4845,7 @@ dependencies = [ [[package]] name = "tari_comms_dht" -version = "0.38.5" +version = "0.38.6" dependencies = [ "anyhow", "bitflags 1.3.2", @@ -4891,7 +4891,7 @@ dependencies = [ [[package]] name = "tari_comms_rpc_macros" -version = "0.38.5" +version = "0.38.6" dependencies = [ "futures 0.3.24", "proc-macro2", @@ -4906,7 +4906,7 @@ dependencies = [ [[package]] name = "tari_console_wallet" -version = "0.38.5" +version = "0.38.6" dependencies = [ "base64 0.13.0", "bitflags 1.3.2", @@ -4956,7 +4956,7 @@ dependencies = [ [[package]] name = "tari_core" -version = "0.38.5" +version = "0.38.6" dependencies = [ "async-trait", "bincode", @@ -5044,7 +5044,7 @@ dependencies = [ [[package]] name = "tari_key_manager" -version = "0.38.5" +version = "0.38.6" dependencies = [ "argon2 0.2.4", "arrayvec 0.7.2", @@ -5091,7 +5091,7 @@ dependencies = [ [[package]] name = "tari_merge_mining_proxy" -version = "0.38.5" +version = "0.38.6" dependencies = [ "anyhow", "bincode", @@ -5143,7 +5143,7 @@ dependencies = [ [[package]] name = "tari_miner" -version = "0.38.5" +version = "0.38.6" dependencies = [ "base64 0.13.0", "bufstream", @@ -5179,7 +5179,7 @@ dependencies = [ [[package]] name = "tari_mining_helper_ffi" -version = "0.38.5" +version = "0.38.6" dependencies = [ "hex", "libc", @@ -5196,7 +5196,7 @@ dependencies = [ [[package]] name = "tari_mmr" -version = "0.38.5" +version = "0.38.6" dependencies = [ "bincode", "blake2 0.9.2", @@ -5215,7 +5215,7 @@ dependencies = [ [[package]] name = "tari_p2p" -version = "0.38.5" +version = "0.38.6" dependencies = [ "anyhow", "bytes 0.5.6", @@ -5272,7 +5272,7 @@ dependencies = [ [[package]] name = "tari_service_framework" -version = "0.38.5" +version = "0.38.6" dependencies = [ "anyhow", "async-trait", @@ -5289,7 +5289,7 @@ dependencies = [ [[package]] name = "tari_shutdown" -version = "0.38.5" +version = "0.38.6" dependencies = [ "futures 0.3.24", "tokio", @@ -5297,7 +5297,7 @@ dependencies = [ [[package]] name = "tari_storage" -version = "0.38.5" +version = "0.38.6" dependencies = [ "bincode", "lmdb-zero", @@ -5311,7 +5311,7 @@ dependencies = [ [[package]] name = "tari_test_utils" -version = "0.38.5" +version = "0.38.6" dependencies = [ "futures 0.3.24", "futures-test", @@ -5338,7 +5338,7 @@ dependencies = [ [[package]] name = "tari_wallet" -version = "0.38.5" +version = "0.38.6" dependencies = [ "argon2 0.2.4", "async-trait", @@ -5389,7 +5389,7 @@ dependencies = [ [[package]] name = "tari_wallet_ffi" -version = "0.38.5" +version = "0.38.6" dependencies = [ "cbindgen 0.24.3", "chrono", diff --git a/applications/tari_app_grpc/Cargo.toml b/applications/tari_app_grpc/Cargo.toml index 002eefa59cd..25fbb05cbdc 100644 --- a/applications/tari_app_grpc/Cargo.toml +++ b/applications/tari_app_grpc/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "This crate is to provide a single source for all cross application grpc files and conversions to and from tari::core" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/applications/tari_app_grpc/src/conversions/transaction_input.rs b/applications/tari_app_grpc/src/conversions/transaction_input.rs index 0a42d1d61d2..a4e346728f5 100644 --- a/applications/tari_app_grpc/src/conversions/transaction_input.rs +++ b/applications/tari_app_grpc/src/conversions/transaction_input.rs @@ -119,8 +119,8 @@ impl TryFrom for grpc::TransactionInput { script: input .script() .map_err(|_| "Non-compact Transaction input should contain script".to_string())? - .as_bytes(), - input_data: input.input_data.as_bytes(), + .to_bytes(), + input_data: input.input_data.to_bytes(), script_signature, sender_offset_public_key: input .sender_offset_public_key() diff --git a/applications/tari_app_grpc/src/conversions/transaction_output.rs b/applications/tari_app_grpc/src/conversions/transaction_output.rs index af9afd989c8..8a037d8ef68 100644 --- a/applications/tari_app_grpc/src/conversions/transaction_output.rs +++ b/applications/tari_app_grpc/src/conversions/transaction_output.rs @@ -85,7 +85,7 @@ impl From for grpc::TransactionOutput { features: Some(output.features.into()), commitment: Vec::from(output.commitment.as_bytes()), range_proof: Vec::from(output.proof.as_bytes()), - script: output.script.as_bytes(), + script: output.script.to_bytes(), sender_offset_public_key: output.sender_offset_public_key.as_bytes().to_vec(), metadata_signature: Some(grpc::ComSignature { public_nonce_commitment: Vec::from(output.metadata_signature.public_nonce().as_bytes()), diff --git a/applications/tari_app_grpc/src/conversions/unblinded_output.rs b/applications/tari_app_grpc/src/conversions/unblinded_output.rs index d49153c35b8..18c8dab78c5 100644 --- a/applications/tari_app_grpc/src/conversions/unblinded_output.rs +++ b/applications/tari_app_grpc/src/conversions/unblinded_output.rs @@ -41,8 +41,8 @@ impl From for grpc::UnblindedOutput { value: u64::from(output.value), spending_key: output.spending_key.as_bytes().to_vec(), features: Some(output.features.into()), - script: output.script.as_bytes(), - input_data: output.input_data.as_bytes(), + script: output.script.to_bytes(), + input_data: output.input_data.to_bytes(), script_private_key: output.script_private_key.as_bytes().to_vec(), sender_offset_public_key: output.sender_offset_public_key.as_bytes().to_vec(), metadata_signature: Some(grpc::ComSignature { diff --git a/applications/tari_app_utilities/Cargo.toml b/applications/tari_app_utilities/Cargo.toml index 3ada64c6467..4eca3252b4d 100644 --- a/applications/tari_app_utilities/Cargo.toml +++ b/applications/tari_app_utilities/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_app_utilities" -version = "0.38.5" +version = "0.38.7" authors = ["The Tari Development Community"] edition = "2018" license = "BSD-3-Clause" diff --git a/applications/tari_base_node/Cargo.toml b/applications/tari_base_node/Cargo.toml index 2ff991ef587..cd717a4cc0c 100644 --- a/applications/tari_base_node/Cargo.toml +++ b/applications/tari_base_node/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "The tari full base node implementation" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/applications/tari_console_wallet/Cargo.toml b/applications/tari_console_wallet/Cargo.toml index 8f78a9c42b3..6b303259770 100644 --- a/applications/tari_console_wallet/Cargo.toml +++ b/applications/tari_console_wallet/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_console_wallet" -version = "0.38.5" +version = "0.38.7" authors = ["The Tari Development Community"] edition = "2018" license = "BSD-3-Clause" diff --git a/applications/tari_merge_mining_proxy/Cargo.toml b/applications/tari_merge_mining_proxy/Cargo.toml index d0bac487679..6fab0f47653 100644 --- a/applications/tari_merge_mining_proxy/Cargo.toml +++ b/applications/tari_merge_mining_proxy/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "The Tari merge mining proxy for xmrig" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [features] diff --git a/applications/tari_miner/Cargo.toml b/applications/tari_miner/Cargo.toml index f63f4f72917..3dffbc295d0 100644 --- a/applications/tari_miner/Cargo.toml +++ b/applications/tari_miner/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "The tari miner implementation" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/applications/tari_miner/src/difficulty.rs b/applications/tari_miner/src/difficulty.rs index d4ef569167c..8e283d83481 100644 --- a/applications/tari_miner/src/difficulty.rs +++ b/applications/tari_miner/src/difficulty.rs @@ -22,9 +22,8 @@ use std::convert::TryInto; -use sha3::{Digest, Sha3_256}; use tari_app_grpc::tari_rpc::BlockHeader as grpc_header; -use tari_core::{blocks::BlockHeader, large_ints::U256}; +use tari_core::{blocks::BlockHeader, proof_of_work::sha3_difficulty}; use tari_utilities::epoch_time::EpochTime; use crate::errors::MinerError; @@ -34,7 +33,6 @@ pub type Difficulty = u64; #[derive(Clone)] pub struct BlockHeaderSha3 { pub header: BlockHeader, - hash_merge_mining: Sha3_256, pub hashes: u64, } @@ -43,19 +41,7 @@ impl BlockHeaderSha3 { #[allow(clippy::cast_sign_loss)] pub fn new(header: grpc_header) -> Result { let header: BlockHeader = header.try_into().map_err(MinerError::BlockHeader)?; - - let hash_merge_mining = Sha3_256::new().chain(header.mining_hash()); - - Ok(Self { - hash_merge_mining, - header, - hashes: 0, - }) - } - - #[inline] - fn get_hash_before_nonce(&self) -> Sha3_256 { - self.hash_merge_mining.clone() + Ok(Self { header, hashes: 0 }) } /// This function will update the timestamp of the header, but only if the new timestamp is greater than the current @@ -65,7 +51,6 @@ impl BlockHeaderSha3 { // should only change the timestamp if we move it forward. if timestamp > self.header.timestamp.as_u64() { self.header.timestamp = EpochTime::from(timestamp); - self.hash_merge_mining = Sha3_256::new().chain(self.header.mining_hash()); } } @@ -82,13 +67,7 @@ impl BlockHeaderSha3 { #[inline] pub fn difficulty(&mut self) -> Difficulty { self.hashes = self.hashes.saturating_add(1); - let hash = self - .get_hash_before_nonce() - .chain(self.header.nonce.to_le_bytes()) - .chain(self.header.pow.to_bytes()) - .finalize(); - let hash = Sha3_256::digest(&hash); - big_endian_difficulty(&hash) + sha3_difficulty(&self.header).into() } #[allow(clippy::cast_possible_wrap)] @@ -102,13 +81,6 @@ impl BlockHeaderSha3 { } } -/// This will provide the difficulty of the hash assuming the hash is big_endian -fn big_endian_difficulty(hash: &[u8]) -> Difficulty { - let scalar = U256::from_big_endian(hash); // Big endian so the hash has leading zeroes - let result = U256::MAX / scalar; - result.low_u64() -} - #[cfg(test)] pub mod test { use chrono::{DateTime, NaiveDate, Utc}; diff --git a/base_layer/common_types/Cargo.toml b/base_layer/common_types/Cargo.toml index e22cf81ef78..b19366d2b5c 100644 --- a/base_layer/common_types/Cargo.toml +++ b/base_layer/common_types/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_common_types" authors = ["The Tari Development Community"] description = "Tari cryptocurrency common types" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/base_layer/core/Cargo.toml b/base_layer/core/Cargo.toml index d97ccee92c7..bd4ddd669b8 100644 --- a/base_layer/core/Cargo.toml +++ b/base_layer/core/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [features] diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index ca893c34cad..40483e2c7c7 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -1693,18 +1693,12 @@ fn check_for_valid_height(db: &T, height: u64) -> Result<( /// Removes blocks from the db from current tip to specified height. /// Returns the blocks removed, ordered from tip to height. -fn rewind_to_height( - db: &mut T, - mut height: u64, -) -> Result>, ChainStorageError> { +fn rewind_to_height(db: &mut T, height: u64) -> Result>, ChainStorageError> { let last_header = db.fetch_last_header()?; - let mut txn = DbTransaction::new(); - // Delete headers let last_header_height = last_header.height; let metadata = db.fetch_chain_metadata()?; - let expected_block_hash = *metadata.best_block(); let last_block_height = metadata.height_of_longest_chain(); // We use the cmp::max value here because we'll only delete headers here and leave remaining headers to be deleted // with the whole block @@ -1727,20 +1721,20 @@ fn rewind_to_height( ); } // We might have more headers than blocks, so we first see if we need to delete the extra headers. - (0..steps_back).for_each(|h| { + for h in 0..steps_back { + let mut txn = DbTransaction::new(); info!( target: LOG_TARGET, "Rewinding headers at height {}", last_header_height - h ); txn.delete_header(last_header_height - h); - }); - + db.write(txn)?; + } // Delete blocks let mut steps_back = last_block_height.saturating_sub(height); // No blocks to remove, no need to update the best block if steps_back == 0 { - db.write(txn)?; return Ok(vec![]); } @@ -1761,22 +1755,45 @@ fn rewind_to_height( effective_pruning_horizon ); steps_back = effective_pruning_horizon; - height = 0; } - for h in 0..steps_back { + let mut txn = DbTransaction::new(); info!(target: LOG_TARGET, "Deleting block {}", last_block_height - h,); let block = fetch_block(db, last_block_height - h, false)?; let block = Arc::new(block.try_into_chain_block()?); txn.delete_block(*block.hash()); txn.delete_header(last_block_height - h); if !prune_past_horizon && !db.contains(&DbKey::OrphanBlock(*block.hash()))? { - // Because we know we will remove blocks we can't recover, this will be a destructive rewind, so we can't - // recover from this apart from resync from another peer. Failure here should not be common as - // this chain has a valid proof of work that has been tested at this point in time. + // Because we know we will remove blocks we can't recover, this will be a destructive rewind, so we + // can't recover from this apart from resync from another peer. Failure here + // should not be common as this chain has a valid proof of work that has been + // tested at this point in time. txn.insert_chained_orphan(block.clone()); } removed_blocks.push(block); + // Set best block to one before, to keep DB consistent. Or if we reached pruned horizon, set best block to 0. + let chain_header = db.fetch_chain_header_by_height(if prune_past_horizon && h + 1 == steps_back { + 0 + } else { + last_block_height - h - 1 + })?; + let metadata = db.fetch_chain_metadata()?; + let expected_block_hash = *metadata.best_block(); + txn.set_best_block( + chain_header.height(), + chain_header.accumulated_data().hash, + chain_header.accumulated_data().total_accumulated_difficulty, + expected_block_hash, + chain_header.timestamp(), + ); + // Update metadata + debug!( + target: LOG_TARGET, + "Updating best block to height (#{}), total accumulated difficulty: {}", + chain_header.height(), + chain_header.accumulated_data().total_accumulated_difficulty + ); + db.write(txn)?; } if prune_past_horizon { @@ -1785,6 +1802,7 @@ fn rewind_to_height( // We don't have these complete blocks, so we don't push them to the channel for further processing such as the // mempool add reorg'ed tx. for h in 0..(last_block_height - steps_back) { + let mut txn = DbTransaction::new(); debug!( target: LOG_TARGET, "Deleting blocks and utxos {}", @@ -1792,27 +1810,10 @@ fn rewind_to_height( ); let header = fetch_header(db, last_block_height - h - steps_back)?; txn.delete_block(header.hash()); + db.write(txn)?; } } - let chain_header = db.fetch_chain_header_by_height(height)?; - // Update metadata - debug!( - target: LOG_TARGET, - "Updating best block to height (#{}), total accumulated difficulty: {}", - chain_header.height(), - chain_header.accumulated_data().total_accumulated_difficulty - ); - - txn.set_best_block( - chain_header.height(), - chain_header.accumulated_data().hash, - chain_header.accumulated_data().total_accumulated_difficulty, - expected_block_hash, - chain_header.timestamp(), - ); - db.write(txn)?; - Ok(removed_blocks) } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs index 75e238b088d..a1bc2dcd112 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs @@ -445,3 +445,51 @@ pub fn lmdb_clear(txn: &WriteTransaction<'_>, db: &Database) -> Result( + txn: &WriteTransaction<'_>, + db: &Database, + f: F, +) -> Result<(), ChainStorageError> +where + F: Fn(V) -> Option, + V: DeserializeOwned, + R: Serialize, +{ + let mut access = txn.access(); + let mut cursor = txn.cursor(db).map_err(|e| { + error!(target: LOG_TARGET, "Could not get read cursor from lmdb: {:?}", e); + ChainStorageError::AccessError(e.to_string()) + })?; + let iter = CursorIter::new( + MaybeOwned::Borrowed(&mut cursor), + &access, + |c, a| c.first(a), + Cursor::next::<[u8], [u8]>, + )?; + let items = iter + .map(|r| r.map(|(k, v)| (k.to_vec(), v.to_vec()))) + .collect::, _>>()?; + + for (key, val) in items { + // let (key, val) = row?; + let val = deserialize::(&val)?; + if let Some(ret) = f(val) { + let ret_bytes = serialize(&ret)?; + access.put(db, &key, &ret_bytes, put::Flags::empty()).map_err(|e| { + if let lmdb_zero::Error::Code(code) = &e { + if *code == lmdb_zero::error::MAP_FULL { + return ChainStorageError::DbResizeRequired; + } + } + error!( + target: LOG_TARGET, + "Could not replace value in lmdb transaction: {:?}", e + ); + ChainStorageError::AccessError(e.to_string()) + })?; + } + } + Ok(()) +} diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 7cbb3f1ac34..d3b7f9cc27e 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -313,6 +313,8 @@ impl LMDBDatabase { consensus_manager, }; + run_migrations(&db)?; + Ok(db) } @@ -2751,6 +2753,7 @@ enum MetadataKey { HorizonData, DeletedBitmap, BestBlockTimestamp, + MigrationVersion, } impl MetadataKey { @@ -2763,14 +2766,15 @@ impl MetadataKey { impl fmt::Display for MetadataKey { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - MetadataKey::ChainHeight => f.write_str("Current chain height"), - MetadataKey::AccumulatedWork => f.write_str("Total accumulated work"), - MetadataKey::PruningHorizon => f.write_str("Pruning horizon"), - MetadataKey::PrunedHeight => f.write_str("Effective pruned height"), - MetadataKey::BestBlock => f.write_str("Chain tip block hash"), - MetadataKey::HorizonData => f.write_str("Database info"), - MetadataKey::DeletedBitmap => f.write_str("Deleted bitmap"), - MetadataKey::BestBlockTimestamp => f.write_str("Chain tip block timestamp"), + MetadataKey::ChainHeight => write!(f, "Current chain height"), + MetadataKey::AccumulatedWork => write!(f, "Total accumulated work"), + MetadataKey::PruningHorizon => write!(f, "Pruning horizon"), + MetadataKey::PrunedHeight => write!(f, "Effective pruned height"), + MetadataKey::BestBlock => write!(f, "Chain tip block hash"), + MetadataKey::HorizonData => write!(f, "Database info"), + MetadataKey::DeletedBitmap => write!(f, "Deleted bitmap"), + MetadataKey::BestBlockTimestamp => write!(f, "Chain tip block timestamp"), + MetadataKey::MigrationVersion => write!(f, "Migration version"), } } } @@ -2786,6 +2790,7 @@ enum MetadataValue { HorizonData(HorizonData), DeletedBitmap(DeletedBitmap), BestBlockTimestamp(u64), + MigrationVersion(u64), } impl fmt::Display for MetadataValue { @@ -2801,6 +2806,7 @@ impl fmt::Display for MetadataValue { write!(f, "Deleted Bitmap ({} indexes)", deleted.bitmap().cardinality()) }, MetadataValue::BestBlockTimestamp(timestamp) => write!(f, "Chain tip block timestamp is {}", timestamp), + MetadataValue::MigrationVersion(n) => write!(f, "Migration version {}", n), } } } @@ -2867,3 +2873,110 @@ impl<'a, 'b> DeletedBitmapModel<'a, WriteTransaction<'b>> { Ok(()) } } + +fn run_migrations(db: &LMDBDatabase) -> Result<(), ChainStorageError> { + const MIGRATION_VERSION: u64 = 1; + let txn = db.read_transaction()?; + + let k = MetadataKey::MigrationVersion; + let val = lmdb_get::<_, MetadataValue>(&*txn, &db.metadata_db, &k.as_u32())?; + let n = match val { + Some(MetadataValue::MigrationVersion(n)) => n, + Some(_) | None => 0, + }; + info!( + target: LOG_TARGET, + "Blockchain database is at v{} (required version: {})", n, MIGRATION_VERSION + ); + drop(txn); + + if n < MIGRATION_VERSION { + tari_script_execution_stack_bug_migration::migrate(db)?; + info!(target: LOG_TARGET, "Migrated database to version {}", MIGRATION_VERSION); + let txn = db.write_transaction()?; + lmdb_replace( + &txn, + &db.metadata_db, + &k.as_u32(), + &MetadataValue::MigrationVersion(MIGRATION_VERSION), + )?; + txn.commit()?; + } + + Ok(()) +} + +// TODO: this is a temporary fix, remove +mod tari_script_execution_stack_bug_migration { + use serde::{Deserialize, Serialize}; + use tari_common_types::types::{ComSignature, PublicKey}; + use tari_crypto::ristretto::{pedersen::PedersenCommitment, RistrettoPublicKey, RistrettoSchnorr}; + use tari_script::{ExecutionStack, HashValue, ScalarValue, StackItem}; + + use super::*; + use crate::{ + chain_storage::lmdb_db::lmdb::lmdb_map_inplace, + transactions::transaction_components::{SpentOutput, TransactionInputVersion}, + }; + + pub fn migrate(db: &LMDBDatabase) -> Result<(), ChainStorageError> { + { + let txn = db.read_transaction()?; + // Only perform migration if necessary + if lmdb_len(&txn, &db.inputs_db)? == 0 { + return Ok(()); + } + } + unsafe { + LMDBStore::resize(&db.env, &LMDBConfig::new(0, 1024 * 1024 * 1024, 0))?; + } + let txn = db.write_transaction()?; + lmdb_map_inplace(&txn, &db.inputs_db, |mut v: TransactionInputRowDataV0| { + let mut items = Vec::with_capacity(v.input.input_data.items.len()); + while let Some(item) = v.input.input_data.items.pop() { + if let StackItemV0::Commitment(ref commitment) = item { + let pk = PublicKey::from_bytes(commitment.as_bytes()).unwrap(); + items.push(StackItem::PublicKey(pk)); + } else { + items.push(unsafe { mem::transmute(item) }); + } + } + let mut v = unsafe { mem::transmute::<_, TransactionInputRowData>(v) }; + v.input.input_data = ExecutionStack::new(items); + Some(v) + })?; + txn.commit()?; + Ok(()) + } + + #[derive(Debug, Serialize, Deserialize)] + pub(crate) struct TransactionInputRowDataV0 { + pub input: TransactionInputV0, + pub header_hash: HashOutput, + pub mmr_position: u32, + pub hash: HashOutput, + } + + #[derive(Debug, Serialize, Deserialize)] + pub struct TransactionInputV0 { + version: TransactionInputVersion, + spent_output: SpentOutput, + input_data: ExecutionStackV0, + script_signature: ComSignature, + } + + #[derive(Debug, Serialize, Deserialize)] + struct ExecutionStackV0 { + items: Vec, + } + + #[derive(Debug, Serialize, Deserialize)] + enum StackItemV0 { + Number(i64), + Hash(HashValue), + Scalar(ScalarValue), + Commitment(PedersenCommitment), + PublicKey(RistrettoPublicKey), + Signature(RistrettoSchnorr), + } +} diff --git a/base_layer/core/src/consensus/consensus_constants.rs b/base_layer/core/src/consensus/consensus_constants.rs index e4cbcf67083..1268fe139ef 100644 --- a/base_layer/core/src/consensus/consensus_constants.rs +++ b/base_layer/core/src/consensus/consensus_constants.rs @@ -517,30 +517,53 @@ impl ConsensusConstants { target_time: 200, }); let (input_version_range, output_version_range, kernel_version_range) = version_zero(); - vec![ConsensusConstants { - effective_from_height: 0, - // Todo fix after test - coinbase_lock_height: 6, - blockchain_version: 0, - valid_blockchain_version_range: 0..=0, - future_time_limit: 540, - difficulty_block_window: 90, - max_block_transaction_weight: 127_795, - median_timestamp_count: 11, - emission_initial: 18_462_816_327 * uT, - emission_decay: &ESMERALDA_DECAY_PARAMS, - emission_tail: 800 * T, - max_randomx_seed_height: 3000, - proof_of_work: algos, - faucet_value: (10 * 4000) * T, - transaction_weight: TransactionWeight::v1(), - max_script_byte_size: 2048, - input_version_range, - output_version_range, - kernel_version_range, - permitted_output_types: Self::current_permitted_output_types(), - validator_node_timeout: 50, - }] + vec![ + ConsensusConstants { + effective_from_height: 0, + coinbase_lock_height: 6, + blockchain_version: 0, + valid_blockchain_version_range: 0..=0, + future_time_limit: 540, + difficulty_block_window: 90, + max_block_transaction_weight: 127_795, + median_timestamp_count: 11, + emission_initial: 18_462_816_327 * uT, + emission_decay: &ESMERALDA_DECAY_PARAMS, + emission_tail: 800 * T, + max_randomx_seed_height: 3000, + proof_of_work: algos.clone(), + faucet_value: (10 * 4000) * T, + transaction_weight: TransactionWeight::v1(), + max_script_byte_size: 2048, + input_version_range: input_version_range.clone(), + output_version_range: output_version_range.clone(), + kernel_version_range: kernel_version_range.clone(), + permitted_output_types: Self::current_permitted_output_types(), + }, + ConsensusConstants { + effective_from_height: 23000, + coinbase_lock_height: 6, + blockchain_version: 1, + valid_blockchain_version_range: 0..=1, + future_time_limit: 540, + difficulty_block_window: 90, + max_block_transaction_weight: 127_795, + median_timestamp_count: 11, + emission_initial: 18_462_816_327 * uT, + emission_decay: &ESMERALDA_DECAY_PARAMS, + emission_tail: 800 * T, + max_randomx_seed_height: 3000, + proof_of_work: algos, + faucet_value: (10 * 4000) * T, + transaction_weight: TransactionWeight::v1(), + max_script_byte_size: 2048, + input_version_range, + output_version_range, + kernel_version_range, + permitted_output_types: Self::current_permitted_output_types(), + validator_node_timeout: 50, + }, + ] } pub fn mainnet() -> Vec { @@ -667,6 +690,11 @@ impl ConsensusConstantsBuilder { self } + pub fn with_blockchain_version(mut self, version: u16) -> Self { + self.consensus.blockchain_version = version; + self + } + pub fn build(self) -> ConsensusConstants { self.consensus } diff --git a/base_layer/core/src/consensus/consensus_encoding/script.rs b/base_layer/core/src/consensus/consensus_encoding/script.rs index 17e8aa7dcef..ea11a27c330 100644 --- a/base_layer/core/src/consensus/consensus_encoding/script.rs +++ b/base_layer/core/src/consensus/consensus_encoding/script.rs @@ -31,7 +31,7 @@ use crate::consensus::{ConsensusDecoding, ConsensusEncoding, ConsensusEncodingSi impl ConsensusEncoding for TariScript { fn consensus_encode(&self, writer: &mut W) -> Result<(), io::Error> { - self.as_bytes().consensus_encode(writer) + self.to_bytes().consensus_encode(writer) } } @@ -54,7 +54,7 @@ impl ConsensusDecoding for TariScript { impl ConsensusEncoding for ExecutionStack { fn consensus_encode(&self, writer: &mut W) -> Result<(), io::Error> { - self.as_bytes().consensus_encode(writer) + self.to_bytes().consensus_encode(writer) } } diff --git a/base_layer/core/src/proof_of_work/sha3_pow.rs b/base_layer/core/src/proof_of_work/sha3_pow.rs index 4b79c29fa6a..fe56685dd54 100644 --- a/base_layer/core/src/proof_of_work/sha3_pow.rs +++ b/base_layer/core/src/proof_of_work/sha3_pow.rs @@ -37,12 +37,19 @@ pub fn sha3_difficulty(header: &BlockHeader) -> Difficulty { } pub fn sha3_hash(header: &BlockHeader) -> Vec { - Sha3_256::new() - .chain(header.mining_hash()) - .chain(header.nonce.to_le_bytes()) - .chain(header.pow.to_bytes()) - .finalize() - .to_vec() + let sha = Sha3_256::new(); + match header.version { + 0 => sha + .chain(header.mining_hash()) + .chain(header.nonce.to_le_bytes()) + .chain(header.pow.to_bytes()), + _ => sha + .chain(header.nonce.to_le_bytes()) + .chain(header.mining_hash()) + .chain(header.pow.to_bytes()), + } + .finalize() + .to_vec() } fn sha3_difficulty_with_hash(header: &BlockHeader) -> (Difficulty, Vec) { diff --git a/base_layer/core/src/proto/transaction.rs b/base_layer/core/src/proto/transaction.rs index 700e7cb3d59..5a8b2beff3a 100644 --- a/base_layer/core/src/proto/transaction.rs +++ b/base_layer/core/src/proto/transaction.rs @@ -168,7 +168,7 @@ impl TryFrom for proto::types::TransactionInput { if input.is_compact() { let output_hash = input.output_hash(); Ok(Self { - input_data: input.input_data.as_bytes(), + input_data: input.input_data.to_bytes(), script_signature: Some(input.script_signature.into()), output_hash: output_hash.to_vec(), ..Default::default() @@ -192,8 +192,8 @@ impl TryFrom for proto::types::TransactionInput { script: input .script() .map_err(|_| "Non-compact Transaction input should contain script".to_string())? - .as_bytes(), - input_data: input.input_data.as_bytes(), + .to_bytes(), + input_data: input.input_data.to_bytes(), script_signature: Some(input.script_signature.clone().into()), sender_offset_public_key: input .sender_offset_public_key() @@ -277,7 +277,7 @@ impl From for proto::types::TransactionOutput { features: Some(output.features.into()), commitment: Some(output.commitment.into()), range_proof: output.proof.to_vec(), - script: output.script.as_bytes(), + script: output.script.to_bytes(), sender_offset_public_key: output.sender_offset_public_key.as_bytes().to_vec(), metadata_signature: Some(output.metadata_signature.into()), covenant: output.covenant.to_bytes(), diff --git a/base_layer/core/src/transactions/transaction_components/transaction_input.rs b/base_layer/core/src/transactions/transaction_components/transaction_input.rs index 9c3e664bdff..2a48ed400a6 100644 --- a/base_layer/core/src/transactions/transaction_components/transaction_input.rs +++ b/base_layer/core/src/transactions/transaction_components/transaction_input.rs @@ -270,9 +270,11 @@ impl TransactionInput { SpentOutput::OutputData { ref script, .. } => { match script.execute_with_context(&self.input_data, &context)? { StackItem::PublicKey(pubkey) => Ok(pubkey), - _ => Err(TransactionError::ScriptExecutionError( - "The script executed successfully but it did not leave a public key on the stack".to_string(), - )), + item => Err(TransactionError::ScriptExecutionError(format!( + "The script executed successfully but it did not leave a public key on the stack. Remaining \ + stack item was {:?}", + item + ))), } }, } diff --git a/base_layer/core/src/transactions/transaction_protocol/proto/transaction_sender.rs b/base_layer/core/src/transactions/transaction_protocol/proto/transaction_sender.rs index b3c4e91a34a..8820bc18bf4 100644 --- a/base_layer/core/src/transactions/transaction_protocol/proto/transaction_sender.rs +++ b/base_layer/core/src/transactions/transaction_protocol/proto/transaction_sender.rs @@ -138,7 +138,7 @@ impl From for proto::SingleRoundSenderData { metadata: Some(sender_data.metadata.into()), message: sender_data.message, features: Some(sender_data.features.into()), - script: sender_data.script.as_bytes(), + script: sender_data.script.to_bytes(), sender_offset_public_key: sender_data.sender_offset_public_key.to_vec(), public_commitment_nonce: sender_data.public_commitment_nonce.to_vec(), covenant: sender_data.covenant.to_consensus_bytes(), diff --git a/base_layer/core/tests/block_validation.rs b/base_layer/core/tests/block_validation.rs index 9659a99b559..01037db622a 100644 --- a/base_layer/core/tests/block_validation.rs +++ b/base_layer/core/tests/block_validation.rs @@ -102,6 +102,7 @@ fn test_monero_blocks() { max_difficulty: 1.into(), target_time: 200, }) + .with_blockchain_version(0) .build(); let cm = ConsensusManager::builder(network).add_consensus_constants(cc).build(); let header_validator = HeaderValidator::new(cm.clone()); diff --git a/base_layer/core/tests/chain_storage_tests/chain_backend.rs b/base_layer/core/tests/chain_storage_tests/chain_backend.rs index fcdc74b6d71..822c456eee3 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_backend.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_backend.rs @@ -33,7 +33,7 @@ use tari_test_utils::paths::create_temporary_data_path; use crate::helpers::database::create_orphan_block; #[test] -fn lmdb_insert_contains_delete_and_fetch_orphan() { +fn test_lmdb_insert_contains_delete_and_fetch_orphan() { let network = Network::LocalNet; let consensus = ConsensusManagerBuilder::new(network).build(); let mut db = create_test_db(); @@ -63,7 +63,7 @@ fn lmdb_insert_contains_delete_and_fetch_orphan() { } #[test] -fn lmdb_file_lock() { +fn test_lmdb_file_lock() { // Create temporary test folder let temp_path = create_temporary_data_path(); diff --git a/base_layer/core/tests/chain_storage_tests/chain_storage.rs b/base_layer/core/tests/chain_storage_tests/chain_storage.rs index 4fd53d97580..a69c5a71f5a 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_storage.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_storage.rs @@ -75,7 +75,7 @@ use crate::helpers::{ }; #[test] -fn fetch_nonexistent_header() { +fn test_fetch_nonexistent_header() { let network = Network::LocalNet; let _consensus_manager = ConsensusManagerBuilder::new(network).build(); let store = create_test_blockchain_db(); @@ -84,7 +84,7 @@ fn fetch_nonexistent_header() { } #[test] -fn insert_and_fetch_header() { +fn test_insert_and_fetch_header() { let network = Network::LocalNet; let _consensus_manager = ConsensusManagerBuilder::new(network).build(); let store = create_test_blockchain_db(); @@ -110,7 +110,7 @@ fn insert_and_fetch_header() { } #[test] -fn insert_and_fetch_orphan() { +fn test_insert_and_fetch_orphan() { let network = Network::LocalNet; let consensus_manager = ConsensusManagerBuilder::new(network).build(); let store = create_test_blockchain_db(); @@ -127,7 +127,7 @@ fn insert_and_fetch_orphan() { } #[test] -fn store_and_retrieve_block() { +fn test_store_and_retrieve_block() { let (db, blocks, _, _) = create_new_blockchain(Network::LocalNet); let hash = blocks[0].hash(); // Check the metadata @@ -144,7 +144,7 @@ fn store_and_retrieve_block() { } #[test] -fn add_multiple_blocks() { +fn test_add_multiple_blocks() { // Create new database with genesis block let network = Network::LocalNet; let consensus_manager = ConsensusManagerBuilder::new(network).build(); @@ -201,7 +201,7 @@ fn test_checkpoints() { #[test] #[allow(clippy::identity_op)] -fn rewind_to_height() { +fn test_rewind_to_height() { let _ = env_logger::builder().is_test(true).try_init(); let network = Network::LocalNet; let (mut db, mut blocks, mut outputs, consensus_manager) = create_new_blockchain(network); @@ -277,7 +277,7 @@ fn test_coverage_chain_storage() { } #[test] -fn rewind_past_horizon_height() { +fn test_rewind_past_horizon_height() { let network = Network::LocalNet; let block0 = genesis_block::get_esmeralda_genesis_block(); let consensus_manager = ConsensusManagerBuilder::new(network).with_block(block0.clone()).build(); @@ -320,7 +320,7 @@ fn rewind_past_horizon_height() { } #[test] -fn handle_tip_reorg() { +fn test_handle_tip_reorg() { // GB --> A1 --> A2(Low PoW) [Main Chain] // \--> B2(Highest PoW) [Forked Chain] // Initially, the main chain is GB->A1->A2. B2 has a higher accumulated PoW and when B2 is added the main chain is @@ -388,7 +388,7 @@ fn handle_tip_reorg() { #[test] #[allow(clippy::identity_op)] #[allow(clippy::too_many_lines)] -fn handle_reorg() { +fn test_handle_reorg() { // GB --> A1 --> A2 --> A3 -----> A4(Low PoW) [Main Chain] // \--> B2 --> B3(?) --> B4(Medium PoW) [Forked Chain 1] // \-----> C4(Highest PoW) [Forked Chain 2] @@ -561,7 +561,7 @@ fn handle_reorg() { #[test] #[allow(clippy::too_many_lines)] -fn reorgs_should_update_orphan_tips() { +fn test_reorgs_should_update_orphan_tips() { // Create a main chain GB -> A1 -> A2 // Create an orphan chain GB -> B1 // Add a block B2 that forces a reorg to B2 @@ -810,7 +810,7 @@ fn reorgs_should_update_orphan_tips() { } #[test] -fn handle_reorg_with_no_removed_blocks() { +fn test_handle_reorg_with_no_removed_blocks() { // GB --> A1 // \--> B2 (?) --> B3) // Initially, the main chain is GB->A1 with orphaned blocks B3. When B2 arrives late and is @@ -883,7 +883,7 @@ fn handle_reorg_with_no_removed_blocks() { } #[test] -fn handle_reorg_failure_recovery() { +fn test_handle_reorg_failure_recovery() { // GB --> A1 --> A2 --> A3 -----> A4(Low PoW) [Main Chain] // \--> B2 --> B3(double spend - rejected by db) [Forked Chain 1] // \--> B2 --> B3'(validation failed) [Forked Chain 1] @@ -1002,7 +1002,7 @@ fn handle_reorg_failure_recovery() { } #[test] -fn store_and_retrieve_blocks() { +fn test_store_and_retrieve_blocks() { let validators = Validators::new( MockValidator::new(true), MockValidator::new(true), @@ -1064,7 +1064,7 @@ fn store_and_retrieve_blocks() { #[test] #[allow(clippy::identity_op)] -fn store_and_retrieve_blocks_from_contents() { +fn test_store_and_retrieve_blocks_from_contents() { let network = Network::LocalNet; let (mut db, mut blocks, mut outputs, consensus_manager) = create_new_blockchain(network); @@ -1102,7 +1102,7 @@ fn store_and_retrieve_blocks_from_contents() { } #[test] -fn restore_metadata_and_pruning_horizon_update() { +fn test_restore_metadata_and_pruning_horizon_update() { // Perform test let validators = Validators::new( MockValidator::new(true), @@ -1177,7 +1177,7 @@ fn restore_metadata_and_pruning_horizon_update() { } static EMISSION: [u64; 2] = [10, 10]; #[test] -fn invalid_block() { +fn test_invalid_block() { let factories = CryptoFactories::default(); let network = Network::LocalNet; let consensus_constants = ConsensusConstantsBuilder::new(network) @@ -1278,7 +1278,7 @@ fn invalid_block() { } #[test] -fn orphan_cleanup_on_block_add() { +fn test_orphan_cleanup_on_block_add() { let network = Network::LocalNet; let consensus_manager = ConsensusManagerBuilder::new(network).build(); let validators = Validators::new( @@ -1345,7 +1345,7 @@ fn orphan_cleanup_on_block_add() { } #[test] -fn horizon_height_orphan_cleanup() { +fn test_horizon_height_orphan_cleanup() { let network = Network::LocalNet; let block0 = genesis_block::get_esmeralda_genesis_block(); let consensus_manager = ConsensusManagerBuilder::new(network).with_block(block0.clone()).build(); @@ -1405,7 +1405,7 @@ fn horizon_height_orphan_cleanup() { #[test] #[allow(clippy::too_many_lines)] -fn orphan_cleanup_on_reorg() { +fn test_orphan_cleanup_on_reorg() { // Create Main Chain let network = Network::LocalNet; let factories = CryptoFactories::default(); @@ -1541,7 +1541,7 @@ fn orphan_cleanup_on_reorg() { } #[test] -fn orphan_cleanup_delete_all_orphans() { +fn test_orphan_cleanup_delete_all_orphans() { let path = create_temporary_data_path(); let network = Network::LocalNet; let validators = Validators::new( @@ -1646,7 +1646,7 @@ fn orphan_cleanup_delete_all_orphans() { } #[test] -fn fails_validation() { +fn test_fails_validation() { let network = Network::LocalNet; let factories = CryptoFactories::default(); let consensus_constants = ConsensusConstantsBuilder::new(network).build(); @@ -1757,8 +1757,7 @@ mod malleability { // This test hightlights that the "version" field is not being included in the input hash // so a consensus change is needed for the input to include it #[test] - #[ignore] - fn version() { + fn test_version() { check_input_malleability(|block: &mut Block| { let input = &mut block.body.inputs_mut()[0]; let mod_version = match input.version { @@ -1770,7 +1769,7 @@ mod malleability { } #[test] - fn spent_output() { + fn test_spent_output() { check_input_malleability(|block: &mut Block| { // to modify the spent output, we will substitue it for a copy of a different output // we will use one of the outputs of the current transaction @@ -1791,7 +1790,7 @@ mod malleability { } #[test] - fn input_data() { + fn test_input_data() { check_input_malleability(|block: &mut Block| { block.body.inputs_mut()[0] .input_data @@ -1801,7 +1800,7 @@ mod malleability { } #[test] - fn script_signature() { + fn test_script_signature() { check_input_malleability(|block: &mut Block| { let input = &mut block.body.inputs_mut()[0]; input.script_signature = ComSignature::default(); @@ -1813,7 +1812,7 @@ mod malleability { use super::*; #[test] - fn version() { + fn test_version() { check_output_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; let mod_version = match output.version { @@ -1825,7 +1824,7 @@ mod malleability { } #[test] - fn features() { + fn test_features() { check_output_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; output.features.maturity += 1; @@ -1833,7 +1832,7 @@ mod malleability { } #[test] - fn commitment() { + fn test_commitment() { check_output_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; let mod_commitment = &output.commitment + &output.commitment; @@ -1842,7 +1841,7 @@ mod malleability { } #[test] - fn proof() { + fn test_proof() { check_witness_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; let mod_proof = RangeProof::from_hex(&(output.proof.to_hex() + "00")).unwrap(); @@ -1851,10 +1850,10 @@ mod malleability { } #[test] - fn script() { + fn test_script() { check_output_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; - let mut script_bytes = output.script.as_bytes(); + let mut script_bytes = output.script.to_bytes(); Opcode::PushZero.to_bytes(&mut script_bytes); let mod_script = TariScript::from_bytes(&script_bytes).unwrap(); output.script = mod_script; @@ -1864,8 +1863,7 @@ mod malleability { // This test hightlights that the "sender_offset_public_key" field is not being included in the output hash // so a consensus change is needed for the output to include it #[test] - #[ignore] - fn sender_offset_public_key() { + fn test_sender_offset_public_key() { check_output_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; @@ -1876,7 +1874,7 @@ mod malleability { } #[test] - fn metadata_signature() { + fn test_metadata_signature() { check_witness_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; output.metadata_signature = ComSignature::default(); @@ -1884,7 +1882,7 @@ mod malleability { } #[test] - fn covenant() { + fn test_covenant() { check_output_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; let mod_covenant = covenant!(absolute_height(@uint(42))); @@ -1903,7 +1901,7 @@ mod malleability { // the "features" field has only a constant value at the moment, so no malleability test possible #[test] - fn fee() { + fn test_fee() { check_kernel_malleability(|block: &mut Block| { let kernel = &mut block.body.kernels_mut()[0]; kernel.fee += MicroTari::from(1); @@ -1911,7 +1909,7 @@ mod malleability { } #[test] - fn lock_height() { + fn test_lock_height() { check_kernel_malleability(|block: &mut Block| { let kernel = &mut block.body.kernels_mut()[0]; kernel.lock_height += 1; @@ -1919,7 +1917,7 @@ mod malleability { } #[test] - fn excess() { + fn test_excess() { check_kernel_malleability(|block: &mut Block| { let kernel = &mut block.body.kernels_mut()[0]; let mod_excess = &kernel.excess + &kernel.excess; @@ -1928,7 +1926,7 @@ mod malleability { } #[test] - fn excess_sig() { + fn test_excess_sig() { check_kernel_malleability(|block: &mut Block| { let kernel = &mut block.body.kernels_mut()[0]; // "gerate_keys" should return a group of random keys, different from the ones in the field @@ -1941,7 +1939,7 @@ mod malleability { #[allow(clippy::identity_op)] #[test] -fn fetch_deleted_position_block_hash() { +fn test_fetch_deleted_position_block_hash() { // Create Main Chain let network = Network::LocalNet; let (mut store, mut blocks, mut outputs, consensus_manager) = create_new_blockchain(network); diff --git a/base_layer/key_manager/Cargo.toml b/base_layer/key_manager/Cargo.toml index 75d69a695f4..843b4b3d500 100644 --- a/base_layer/key_manager/Cargo.toml +++ b/base_layer/key_manager/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet key management" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2021" [lib] diff --git a/base_layer/mmr/Cargo.toml b/base_layer/mmr/Cargo.toml index 5774c5b1ead..7cadc4811f8 100644 --- a/base_layer/mmr/Cargo.toml +++ b/base_layer/mmr/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "A Merkle Mountain Range implementation" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [features] diff --git a/base_layer/p2p/Cargo.toml b/base_layer/p2p/Cargo.toml index 665991d18ab..d6c255d2f22 100644 --- a/base_layer/p2p/Cargo.toml +++ b/base_layer/p2p/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_p2p" -version = "0.38.5" +version = "0.38.7" authors = ["The Tari Development community"] description = "Tari base layer-specific peer-to-peer communication features" repository = "https://github.com/tari-project/tari" diff --git a/base_layer/p2p/src/services/liveness/service.rs b/base_layer/p2p/src/services/liveness/service.rs index def15f51168..5da92ad1004 100644 --- a/base_layer/p2p/src/services/liveness/service.rs +++ b/base_layer/p2p/src/services/liveness/service.rs @@ -161,7 +161,7 @@ where match ping_pong_msg.kind().ok_or(LivenessError::InvalidPingPongType)? { PingPong::Ping => { self.state.inc_pings_received(); - self.send_pong(ping_pong_msg.nonce, public_key).await.unwrap(); + self.send_pong(ping_pong_msg.nonce, public_key).await?; self.state.inc_pongs_sent(); debug!( diff --git a/base_layer/service_framework/Cargo.toml b/base_layer/service_framework/Cargo.toml index f70eb71d7a9..a210101ada2 100644 --- a/base_layer/service_framework/Cargo.toml +++ b/base_layer/service_framework/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_service_framework" -version = "0.38.5" +version = "0.38.7" authors = ["The Tari Development Community"] description = "The Tari communication stack service framework" repository = "https://github.com/tari-project/tari" diff --git a/base_layer/tari_mining_helper_ffi/Cargo.toml b/base_layer/tari_mining_helper_ffi/Cargo.toml index 53072f64875..847a26b6c32 100644 --- a/base_layer/tari_mining_helper_ffi/Cargo.toml +++ b/base_layer/tari_mining_helper_ffi/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_mining_helper_ffi" authors = ["The Tari Development Community"] description = "Tari cryptocurrency miningcore C FFI bindings" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/base_layer/wallet/Cargo.toml b/base_layer/wallet/Cargo.toml index cf624000042..1637d03106a 100644 --- a/base_layer/wallet/Cargo.toml +++ b/base_layer/wallet/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_wallet" authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet library" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs index 0710212f940..45968f67084 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs @@ -391,56 +391,54 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - match op { - WriteOperation::Insert(kvp) => self.insert(kvp, &conn)?, + let mut msg = "".to_string(); + let result = match op { + WriteOperation::Insert(kvp) => { + msg.push_str("Insert"); + self.insert(kvp, &conn)?; + Ok(None) + }, WriteOperation::Remove(k) => match k { DbKey::AnyOutputByCommitment(commitment) => { - // Used by coinbase when mining. - match OutputSql::find_by_commitment(&commitment.to_vec(), &conn) { - Ok(mut o) => { - o.delete(&conn)?; - self.decrypt_if_necessary(&mut o)?; - if start.elapsed().as_millis() > 0 { - trace!( - target: LOG_TARGET, - "sqlite profile - write Remove: lock {} + db_op {} = {} ms", - acquire_lock.as_millis(), - (start.elapsed() - acquire_lock).as_millis(), - start.elapsed().as_millis() - ); - } - return Ok(Some(DbValue::AnyOutput(Box::new(DbUnblindedOutput::try_from(o)?)))); - }, - Err(e) => { - match e { - OutputManagerStorageError::DieselError(DieselError::NotFound) => (), - e => return Err(e), - }; - }, - } + conn.transaction::<_, _, _>(|| { + msg.push_str("Remove"); + // Used by coinbase when mining. + match OutputSql::find_by_commitment(&commitment.to_vec(), &conn) { + Ok(mut o) => { + o.delete(&conn)?; + self.decrypt_if_necessary(&mut o)?; + Ok(Some(DbValue::AnyOutput(Box::new(DbUnblindedOutput::try_from(o)?)))) + }, + Err(e) => match e { + OutputManagerStorageError::DieselError(DieselError::NotFound) => Ok(None), + e => Err(e), + }, + } + }) }, - DbKey::SpentOutput(_s) => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::UnspentOutputHash(_h) => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::UnspentOutput(_k) => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::UnspentOutputs => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::SpentOutputs => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::InvalidOutputs => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::TimeLockedUnspentOutputs(_) => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::KnownOneSidedPaymentScripts => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::OutputsByTxIdAndStatus(_, _) => return Err(OutputManagerStorageError::OperationNotSupported), + DbKey::SpentOutput(_s) => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::UnspentOutputHash(_h) => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::UnspentOutput(_k) => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::UnspentOutputs => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::SpentOutputs => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::InvalidOutputs => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::TimeLockedUnspentOutputs(_) => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::KnownOneSidedPaymentScripts => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::OutputsByTxIdAndStatus(_, _) => Err(OutputManagerStorageError::OperationNotSupported), }, - } + }; if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, - "sqlite profile - write Insert: lock {} + db_op {} = {} ms", + "sqlite profile - write {}: lock {} + db_op {} = {} ms", + msg, acquire_lock.as_millis(), (start.elapsed() - acquire_lock).as_millis(), start.elapsed().as_millis() ); } - Ok(None) + result } fn fetch_pending_incoming_outputs(&self) -> Result, OutputManagerStorageError> { @@ -852,50 +850,55 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let outputs = OutputSql::find_by_tx_id_and_encumbered(tx_id, &conn)?; + conn.transaction::<_, _, _>(|| { + let outputs = OutputSql::find_by_tx_id_and_encumbered(tx_id, &conn)?; - if outputs.is_empty() { - return Err(OutputManagerStorageError::ValueNotFound); - } + if outputs.is_empty() { + return Err(OutputManagerStorageError::ValueNotFound); + } - for output in &outputs { - if output.received_in_tx_id == Some(tx_id.as_i64_wrapped()) { - info!( - target: LOG_TARGET, - "Cancelling pending inbound output with Commitment: {} - MMR Position: {:?} from TxId: {}", - output.commitment.as_ref().unwrap_or(&vec![]).to_hex(), - output.mined_mmr_position, - tx_id - ); - output.update( - UpdateOutput { - status: Some(OutputStatus::CancelledInbound), - ..Default::default() - }, - &conn, - )?; - } else if output.spent_in_tx_id == Some(tx_id.as_i64_wrapped()) { - info!( - target: LOG_TARGET, - "Cancelling pending outbound output with Commitment: {} - MMR Position: {:?} from TxId: {}", - output.commitment.as_ref().unwrap_or(&vec![]).to_hex(), - output.mined_mmr_position, - tx_id - ); - output.update( - UpdateOutput { - status: Some(OutputStatus::Unspent), - spent_in_tx_id: Some(None), - // We clear these so that the output will be revalidated the next time a validation is done. - mined_height: Some(None), - mined_in_block: Some(None), - ..Default::default() - }, - &conn, - )?; - } else { + for output in &outputs { + if output.received_in_tx_id == Some(tx_id.as_i64_wrapped()) { + info!( + target: LOG_TARGET, + "Cancelling pending inbound output with Commitment: {} - MMR Position: {:?} from TxId: {}", + output.commitment.as_ref().unwrap_or(&vec![]).to_hex(), + output.mined_mmr_position, + tx_id + ); + output.update( + UpdateOutput { + status: Some(OutputStatus::CancelledInbound), + ..Default::default() + }, + &conn, + )?; + } else if output.spent_in_tx_id == Some(tx_id.as_i64_wrapped()) { + info!( + target: LOG_TARGET, + "Cancelling pending outbound output with Commitment: {} - MMR Position: {:?} from TxId: {}", + output.commitment.as_ref().unwrap_or(&vec![]).to_hex(), + output.mined_mmr_position, + tx_id + ); + output.update( + UpdateOutput { + status: Some(OutputStatus::Unspent), + spent_in_tx_id: Some(None), + // We clear these so that the output will be revalidated the next time a validation is done. + mined_height: Some(None), + mined_in_block: Some(None), + ..Default::default() + }, + &conn, + )?; + } else { + } } - } + + Ok(()) + })?; + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -915,17 +918,22 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let db_output = OutputSql::find_by_commitment_and_cancelled(&output.commitment.to_vec(), false, &conn)?; - db_output.update( - // Note: Only the `nonce` and `u` portion needs to be updated at this time as the `v` portion is already - // correct - UpdateOutput { - metadata_signature_nonce: Some(output.metadata_signature.public_nonce().to_vec()), - metadata_signature_u_key: Some(output.metadata_signature.u().to_vec()), - ..Default::default() - }, - &conn, - )?; + + conn.transaction::<_, OutputManagerStorageError, _>(|| { + let db_output = OutputSql::find_by_commitment_and_cancelled(&output.commitment.to_vec(), false, &conn)?; + db_output.update( + // Note: Only the `nonce` and `u` portion needs to be updated at this time as the `v` portion is + // already correct + UpdateOutput { + metadata_signature_nonce: Some(output.metadata_signature.public_nonce().to_vec()), + metadata_signature_u_key: Some(output.metadata_signature.u().to_vec()), + ..Default::default() + }, + &conn, + )?; + + Ok(()) + })?; if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -943,18 +951,23 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let output = OutputSql::find_by_commitment_and_cancelled(&commitment.to_vec(), false, &conn)?; - if OutputStatus::try_from(output.status)? != OutputStatus::Invalid { - return Err(OutputManagerStorageError::ValuesNotFound); - } - output.update( - UpdateOutput { - status: Some(OutputStatus::Unspent), - ..Default::default() - }, - &conn, - )?; + conn.transaction::<_, _, _>(|| { + let output = OutputSql::find_by_commitment_and_cancelled(&commitment.to_vec(), false, &conn)?; + + if OutputStatus::try_from(output.status)? != OutputStatus::Invalid { + return Err(OutputManagerStorageError::ValuesNotFound); + } + output.update( + UpdateOutput { + status: Some(OutputStatus::Unspent), + ..Default::default() + }, + &conn, + )?; + + Ok(()) + })?; if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -1417,8 +1430,8 @@ impl From for KnownOneSidedPaymentScriptSql { let script_lock_height = known_script.script_lock_height as i64; let script_hash = known_script.script_hash; let private_key = known_script.private_key.as_bytes().to_vec(); - let script = known_script.script.as_bytes().to_vec(); - let input = known_script.input.as_bytes().to_vec(); + let script = known_script.script.to_bytes().to_vec(); + let input = known_script.input.to_bytes().to_vec(); KnownOneSidedPaymentScriptSql { script_hash, private_key, diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/new_output_sql.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/new_output_sql.rs index d3d2561ee85..2878e54a6ce 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/new_output_sql.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/new_output_sql.rs @@ -83,8 +83,8 @@ impl NewOutputSql { status: status as i32, received_in_tx_id: received_in_tx_id.map(|i| i.as_u64() as i64), hash: Some(output.hash.to_vec()), - script: output.unblinded_output.script.as_bytes(), - input_data: output.unblinded_output.input_data.as_bytes(), + script: output.unblinded_output.script.to_bytes(), + input_data: output.unblinded_output.input_data.to_bytes(), script_private_key: output.unblinded_output.script_private_key.to_vec(), metadata: Some(output.unblinded_output.features.metadata.clone()), sender_offset_public_key: output.unblinded_output.sender_offset_public_key.to_vec(), diff --git a/base_layer/wallet/src/transaction_service/storage/database.rs b/base_layer/wallet/src/transaction_service/storage/database.rs index f018ba30887..8a7eec41005 100644 --- a/base_layer/wallet/src/transaction_service/storage/database.rs +++ b/base_layer/wallet/src/transaction_service/storage/database.rs @@ -117,7 +117,7 @@ pub trait TransactionBackend: Send + Sync + Clone { /// Mark a pending transaction direct send attempt as a success fn mark_direct_send_success(&self, tx_id: TxId) -> Result<(), TransactionStorageError>; /// Cancel coinbase transactions at a specific block height - fn cancel_coinbase_transaction_at_block_height(&self, block_height: u64) -> Result<(), TransactionStorageError>; + fn cancel_coinbase_transactions_at_block_height(&self, block_height: u64) -> Result<(), TransactionStorageError>; /// Find coinbase transaction at a specific block height for a given amount fn find_coinbase_transaction_at_block_height( &self, @@ -693,7 +693,7 @@ where T: TransactionBackend + 'static &self, block_height: u64, ) -> Result<(), TransactionStorageError> { - self.db.cancel_coinbase_transaction_at_block_height(block_height) + self.db.cancel_coinbase_transactions_at_block_height(block_height) } pub fn find_coinbase_transaction_at_block_height( diff --git a/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs b/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs index 92a101cadc4..7d244ca8176 100644 --- a/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs +++ b/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs @@ -123,44 +123,50 @@ impl TransactionServiceSqliteDatabase { fn remove(&self, key: DbKey, conn: &SqliteConnection) -> Result, TransactionStorageError> { match key { - DbKey::PendingOutboundTransaction(k) => match OutboundTransactionSql::find_by_cancelled(k, false, conn) { - Ok(mut v) => { - v.delete(conn)?; - self.decrypt_if_necessary(&mut v)?; - Ok(Some(DbValue::PendingOutboundTransaction(Box::new( - OutboundTransaction::try_from(v)?, - )))) - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => Err( - TransactionStorageError::ValueNotFound(DbKey::PendingOutboundTransaction(k)), - ), - Err(e) => Err(e), + DbKey::PendingOutboundTransaction(k) => { + conn.transaction::<_, _, _>(|| match OutboundTransactionSql::find_by_cancelled(k, false, conn) { + Ok(mut v) => { + v.delete(conn)?; + self.decrypt_if_necessary(&mut v)?; + Ok(Some(DbValue::PendingOutboundTransaction(Box::new( + OutboundTransaction::try_from(v)?, + )))) + }, + Err(TransactionStorageError::DieselError(DieselError::NotFound)) => Err( + TransactionStorageError::ValueNotFound(DbKey::PendingOutboundTransaction(k)), + ), + Err(e) => Err(e), + }) }, - DbKey::PendingInboundTransaction(k) => match InboundTransactionSql::find_by_cancelled(k, false, conn) { - Ok(mut v) => { - v.delete(conn)?; - self.decrypt_if_necessary(&mut v)?; - Ok(Some(DbValue::PendingInboundTransaction(Box::new( - InboundTransaction::try_from(v)?, - )))) - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => Err( - TransactionStorageError::ValueNotFound(DbKey::PendingOutboundTransaction(k)), - ), - Err(e) => Err(e), + DbKey::PendingInboundTransaction(k) => { + conn.transaction::<_, _, _>(|| match InboundTransactionSql::find_by_cancelled(k, false, conn) { + Ok(mut v) => { + v.delete(conn)?; + self.decrypt_if_necessary(&mut v)?; + Ok(Some(DbValue::PendingInboundTransaction(Box::new( + InboundTransaction::try_from(v)?, + )))) + }, + Err(TransactionStorageError::DieselError(DieselError::NotFound)) => Err( + TransactionStorageError::ValueNotFound(DbKey::PendingOutboundTransaction(k)), + ), + Err(e) => Err(e), + }) }, - DbKey::CompletedTransaction(k) => match CompletedTransactionSql::find_by_cancelled(k, false, conn) { - Ok(mut v) => { - v.delete(conn)?; - self.decrypt_if_necessary(&mut v)?; - Ok(Some(DbValue::CompletedTransaction(Box::new( - CompletedTransaction::try_from(v)?, - )))) - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { - Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction(k))) - }, - Err(e) => Err(e), + DbKey::CompletedTransaction(k) => { + conn.transaction::<_, _, _>(|| match CompletedTransactionSql::find_by_cancelled(k, false, conn) { + Ok(mut v) => { + v.delete(conn)?; + self.decrypt_if_necessary(&mut v)?; + Ok(Some(DbValue::CompletedTransaction(Box::new( + CompletedTransaction::try_from(v)?, + )))) + }, + Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { + Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction(k))) + }, + Err(e) => Err(e), + }) }, DbKey::PendingOutboundTransactions => Err(TransactionStorageError::OperationNotSupported), DbKey::PendingInboundTransactions => Err(TransactionStorageError::OperationNotSupported), @@ -169,7 +175,7 @@ impl TransactionServiceSqliteDatabase { DbKey::CancelledPendingInboundTransactions => Err(TransactionStorageError::OperationNotSupported), DbKey::CancelledCompletedTransactions => Err(TransactionStorageError::OperationNotSupported), DbKey::CancelledPendingOutboundTransaction(k) => { - match OutboundTransactionSql::find_by_cancelled(k, true, conn) { + conn.transaction::<_, _, _>(|| match OutboundTransactionSql::find_by_cancelled(k, true, conn) { Ok(mut v) => { v.delete(conn)?; self.decrypt_if_necessary(&mut v)?; @@ -181,10 +187,10 @@ impl TransactionServiceSqliteDatabase { TransactionStorageError::ValueNotFound(DbKey::CancelledPendingOutboundTransaction(k)), ), Err(e) => Err(e), - } + }) }, DbKey::CancelledPendingInboundTransaction(k) => { - match InboundTransactionSql::find_by_cancelled(k, true, conn) { + conn.transaction::<_, _, _>(|| match InboundTransactionSql::find_by_cancelled(k, true, conn) { Ok(mut v) => { v.delete(conn)?; self.decrypt_if_necessary(&mut v)?; @@ -196,7 +202,7 @@ impl TransactionServiceSqliteDatabase { TransactionStorageError::ValueNotFound(DbKey::CancelledPendingOutboundTransaction(k)), ), Err(e) => Err(e), - } + }) }, DbKey::AnyTransaction(_) => Err(TransactionStorageError::OperationNotSupported), } @@ -579,20 +585,22 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { return Err(TransactionStorageError::TransactionAlreadyExists); } - match OutboundTransactionSql::find_by_cancelled(tx_id, false, &conn) { - Ok(v) => { - let mut completed_tx_sql = CompletedTransactionSql::try_from(completed_transaction)?; - self.encrypt_if_necessary(&mut completed_tx_sql)?; - v.delete(&conn)?; - completed_tx_sql.commit(&conn)?; - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { - return Err(TransactionStorageError::ValueNotFound( - DbKey::PendingOutboundTransaction(tx_id), - )) - }, - Err(e) => return Err(e), - }; + let mut completed_tx_sql = CompletedTransactionSql::try_from(completed_transaction)?; + self.encrypt_if_necessary(&mut completed_tx_sql)?; + + conn.transaction::<_, _, _>(|| { + match OutboundTransactionSql::complete_outbound_transaction(tx_id, &conn) { + Ok(_) => completed_tx_sql.commit(&conn)?, + Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { + return Err(TransactionStorageError::ValueNotFound( + DbKey::PendingOutboundTransaction(tx_id), + )) + }, + Err(e) => return Err(e), + } + + Ok(()) + })?; if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -618,20 +626,22 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { return Err(TransactionStorageError::TransactionAlreadyExists); } - match InboundTransactionSql::find_by_cancelled(tx_id, false, &conn) { - Ok(v) => { - let mut completed_tx_sql = CompletedTransactionSql::try_from(completed_transaction)?; - self.encrypt_if_necessary(&mut completed_tx_sql)?; - v.delete(&conn)?; - completed_tx_sql.commit(&conn)?; - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { - return Err(TransactionStorageError::ValueNotFound( - DbKey::PendingInboundTransaction(tx_id), - )) - }, - Err(e) => return Err(e), - }; + let mut completed_tx_sql = CompletedTransactionSql::try_from(completed_transaction)?; + self.encrypt_if_necessary(&mut completed_tx_sql)?; + + conn.transaction::<_, _, _>(|| { + match InboundTransactionSql::complete_inbound_transaction(tx_id, &conn) { + Ok(_) => completed_tx_sql.commit(&conn)?, + Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { + return Err(TransactionStorageError::ValueNotFound( + DbKey::PendingInboundTransaction(tx_id), + )) + }, + Err(e) => return Err(e), + }; + + Ok(()) + })?; if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -649,25 +659,32 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - match CompletedTransactionSql::find_by_cancelled(tx_id, false, &conn) { - Ok(v) => { - if TransactionStatus::try_from(v.status)? == TransactionStatus::Completed { - v.update( - UpdateCompletedTransactionSql { - status: Some(TransactionStatus::Broadcast as i32), - ..Default::default() - }, - &conn, - )?; - } - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { - return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( - tx_id, - ))) - }, - Err(e) => return Err(e), - }; + conn.transaction::<_, _, _>(|| { + match CompletedTransactionSql::find_by_cancelled(tx_id, false, &conn) { + Ok(v) => { + // Note: This status test that does not error if the status do not match makes it inefficient + // to combine the 'find' and 'update' queries. + if TransactionStatus::try_from(v.status)? == TransactionStatus::Completed { + v.update( + UpdateCompletedTransactionSql { + status: Some(TransactionStatus::Broadcast as i32), + ..Default::default() + }, + &conn, + )?; + } + }, + Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { + return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( + tx_id, + ))) + }, + Err(e) => return Err(e), + } + + Ok(()) + })?; + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -688,17 +705,15 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - match CompletedTransactionSql::find_by_cancelled(tx_id, false, &conn) { - Ok(v) => { - v.reject(reason, &conn)?; - }, + match CompletedTransactionSql::reject_completed_transaction(tx_id, reason, &conn) { + Ok(_) => {}, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, ))); }, Err(e) => return Err(e), - }; + } if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -719,22 +734,20 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - match InboundTransactionSql::find(tx_id, &conn) { - Ok(v) => { - v.set_cancelled(cancelled, &conn)?; - }, + + match InboundTransactionSql::find_and_set_cancelled(tx_id, cancelled, &conn) { + Ok(_) => {}, Err(_) => { - match OutboundTransactionSql::find(tx_id, &conn) { - Ok(v) => { - v.set_cancelled(cancelled, &conn)?; - }, + match OutboundTransactionSql::find_and_set_cancelled(tx_id, cancelled, &conn) { + Ok(_) => {}, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValuesNotFound); }, Err(e) => return Err(e), }; }, - }; + } + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -751,33 +764,12 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - match InboundTransactionSql::find_by_cancelled(tx_id, false, &conn) { - Ok(v) => { - v.update( - UpdateInboundTransactionSql { - cancelled: None, - direct_send_success: Some(1i32), - receiver_protocol: None, - send_count: None, - last_send_timestamp: None, - }, - &conn, - )?; - }, + + match InboundTransactionSql::mark_direct_send_success(tx_id, &conn) { + Ok(_) => {}, Err(_) => { - match OutboundTransactionSql::find_by_cancelled(tx_id, false, &conn) { - Ok(v) => { - v.update( - UpdateOutboundTransactionSql { - cancelled: None, - direct_send_success: Some(1i32), - sender_protocol: None, - send_count: None, - last_send_timestamp: None, - }, - &conn, - )?; - }, + match OutboundTransactionSql::mark_direct_send_success(tx_id, &conn) { + Ok(_) => {}, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValuesNotFound); }, @@ -785,6 +777,7 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { }; }, }; + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -808,55 +801,68 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let mut inbound_txs = InboundTransactionSql::index(&conn)?; - // If the db is already encrypted then the very first output we try to encrypt will fail. - for tx in &mut inbound_txs { - // Test if this transaction is encrypted or not to avoid a double encryption. - let _inbound_transaction = InboundTransaction::try_from(tx.clone()).map_err(|_| { - error!( - target: LOG_TARGET, - "Could not convert Inbound Transaction from database version, it might already be encrypted" - ); - TransactionStorageError::AlreadyEncrypted - })?; - tx.encrypt(&cipher) - .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; - tx.update_encryption(&conn)?; - } + conn.transaction::<_, TransactionStorageError, _>(|| { + let mut inbound_txs = InboundTransactionSql::index(&conn)?; + // If the db is already encrypted then the very first output we try to encrypt will fail. + for tx in &mut inbound_txs { + // Test if this transaction is encrypted or not to avoid a double encryption. + let _inbound_transaction = InboundTransaction::try_from(tx.clone()).map_err(|_| { + error!( + target: LOG_TARGET, + "Could not convert Inbound Transaction from database version, it might already be encrypted" + ); + TransactionStorageError::AlreadyEncrypted + })?; + tx.encrypt(&cipher) + .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; + tx.update_encryption(&conn)?; + } - let mut outbound_txs = OutboundTransactionSql::index(&conn)?; - // If the db is already encrypted then the very first output we try to encrypt will fail. - for tx in &mut outbound_txs { - // Test if this transaction is encrypted or not to avoid a double encryption. - let _outbound_transaction = OutboundTransaction::try_from(tx.clone()).map_err(|_| { - error!( - target: LOG_TARGET, - "Could not convert Inbound Transaction from database version, it might already be encrypted" - ); - TransactionStorageError::AlreadyEncrypted - })?; - tx.encrypt(&cipher) - .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; - tx.update_encryption(&conn)?; - } + Ok(()) + })?; + + conn.transaction::<_, TransactionStorageError, _>(|| { + let mut outbound_txs = OutboundTransactionSql::index(&conn)?; + // If the db is already encrypted then the very first output we try to encrypt will fail. + for tx in &mut outbound_txs { + // Test if this transaction is encrypted or not to avoid a double encryption. + let _outbound_transaction = OutboundTransaction::try_from(tx.clone()).map_err(|_| { + error!( + target: LOG_TARGET, + "Could not convert Inbound Transaction from database version, it might already be encrypted" + ); + TransactionStorageError::AlreadyEncrypted + })?; + tx.encrypt(&cipher) + .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; + tx.update_encryption(&conn)?; + } - let mut completed_txs = CompletedTransactionSql::index(&conn)?; - // If the db is already encrypted then the very first output we try to encrypt will fail. - for tx in &mut completed_txs { - // Test if this transaction is encrypted or not to avoid a double encryption. - let _completed_transaction = CompletedTransaction::try_from(tx.clone()).map_err(|_| { - error!( - target: LOG_TARGET, - "Could not convert Inbound Transaction from database version, it might already be encrypted" - ); - TransactionStorageError::AlreadyEncrypted - })?; - tx.encrypt(&cipher) - .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; - tx.update_encryption(&conn)?; - } + Ok(()) + })?; + + conn.transaction::<_, TransactionStorageError, _>(|| { + let mut completed_txs = CompletedTransactionSql::index(&conn)?; + // If the db is already encrypted then the very first output we try to encrypt will fail. + for tx in &mut completed_txs { + // Test if this transaction is encrypted or not to avoid a double encryption. + let _completed_transaction = CompletedTransaction::try_from(tx.clone()).map_err(|_| { + error!( + target: LOG_TARGET, + "Could not convert Inbound Transaction from database version, it might already be encrypted" + ); + TransactionStorageError::AlreadyEncrypted + })?; + tx.encrypt(&cipher) + .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; + tx.update_encryption(&conn)?; + } + + Ok(()) + })?; (*current_cipher) = Some(cipher); + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -882,31 +888,44 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let mut inbound_txs = InboundTransactionSql::index(&conn)?; + conn.transaction::<_, TransactionStorageError, _>(|| { + let mut inbound_txs = InboundTransactionSql::index(&conn)?; - for tx in &mut inbound_txs { - tx.decrypt(&cipher) - .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; - tx.update_encryption(&conn)?; - } + for tx in &mut inbound_txs { + tx.decrypt(&cipher) + .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; + tx.update_encryption(&conn)?; + } - let mut outbound_txs = OutboundTransactionSql::index(&conn)?; + Ok(()) + })?; - for tx in &mut outbound_txs { - tx.decrypt(&cipher) - .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; - tx.update_encryption(&conn)?; - } + conn.transaction::<_, TransactionStorageError, _>(|| { + let mut outbound_txs = OutboundTransactionSql::index(&conn)?; - let mut completed_txs = CompletedTransactionSql::index(&conn)?; - for tx in &mut completed_txs { - tx.decrypt(&cipher) - .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; - tx.update_encryption(&conn)?; - } + for tx in &mut outbound_txs { + tx.decrypt(&cipher) + .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; + tx.update_encryption(&conn)?; + } + + Ok(()) + })?; + + conn.transaction::<_, TransactionStorageError, _>(|| { + let mut completed_txs = CompletedTransactionSql::index(&conn)?; + for tx in &mut completed_txs { + tx.decrypt(&cipher) + .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; + tx.update_encryption(&conn)?; + } + + Ok(()) + })?; // Now that all the decryption has been completed we can safely remove the cipher fully std::mem::drop((*current_cipher).take()); + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -920,15 +939,16 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { Ok(()) } - fn cancel_coinbase_transaction_at_block_height(&self, block_height: u64) -> Result<(), TransactionStorageError> { + fn cancel_coinbase_transactions_at_block_height(&self, block_height: u64) -> Result<(), TransactionStorageError> { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let coinbase_txs = CompletedTransactionSql::index_coinbase_at_block_height(block_height as i64, &conn)?; - for c in &coinbase_txs { - c.reject(TxCancellationReason::AbandonedCoinbase, &conn)?; - } + CompletedTransactionSql::reject_coinbases_at_block_height( + block_height as i64, + TxCancellationReason::AbandonedCoinbase, + &conn, + )?; if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -977,34 +997,13 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - if let Ok(tx) = CompletedTransactionSql::find(tx_id, &conn) { - let update = UpdateCompletedTransactionSql { - send_count: Some(tx.send_count + 1), - last_send_timestamp: Some(Some(Utc::now().naive_utc())), - ..Default::default() - }; - tx.update(update, &conn)?; - } else if let Ok(tx) = OutboundTransactionSql::find(tx_id, &conn) { - let update = UpdateOutboundTransactionSql { - cancelled: None, - direct_send_success: None, - sender_protocol: None, - send_count: Some(tx.send_count + 1), - last_send_timestamp: Some(Some(Utc::now().naive_utc())), - }; - tx.update(update, &conn)?; - } else if let Ok(tx) = InboundTransactionSql::find_by_cancelled(tx_id, false, &conn) { - let update = UpdateInboundTransactionSql { - cancelled: None, - direct_send_success: None, - receiver_protocol: None, - send_count: Some(tx.send_count + 1), - last_send_timestamp: Some(Some(Utc::now().naive_utc())), - }; - tx.update(update, &conn)?; - } else { + if CompletedTransactionSql::increment_send_count(tx_id, &conn).is_err() && + OutboundTransactionSql::increment_send_count(tx_id, &conn).is_err() && + InboundTransactionSql::increment_send_count(tx_id, &conn).is_err() + { return Err(TransactionStorageError::ValuesNotFound); } + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -1031,25 +1030,36 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - match CompletedTransactionSql::find(tx_id, &conn) { - Ok(v) => { - v.update_mined_height( - mined_height, - mined_in_block, - mined_timestamp, - num_confirmations, - is_confirmed, - &conn, - is_faux, - )?; - }, + let status = if is_confirmed { + if is_faux { + TransactionStatus::FauxConfirmed + } else { + TransactionStatus::MinedConfirmed + } + } else if is_faux { + TransactionStatus::FauxUnconfirmed + } else { + TransactionStatus::MinedUnconfirmed + }; + + match CompletedTransactionSql::update_mined_height( + tx_id, + num_confirmations, + status, + mined_height, + mined_in_block, + mined_timestamp, + &conn, + ) { + Ok(_) => {}, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, ))); }, Err(e) => return Err(e), - }; + } + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -1186,17 +1196,15 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - match CompletedTransactionSql::find(tx_id, &conn) { - Ok(v) => { - v.set_as_unmined(&conn)?; - }, + match CompletedTransactionSql::set_as_unmined(tx_id, &conn) { + Ok(_) => {}, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, ))); }, Err(e) => return Err(e), - }; + } if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -1285,10 +1293,8 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { fn abandon_coinbase_transaction(&self, tx_id: TxId) -> Result<(), TransactionStorageError> { let conn = self.database_connection.get_pooled_connection()?; - match CompletedTransactionSql::find_by_cancelled(tx_id, false, &conn) { - Ok(tx) => { - tx.abandon_coinbase(&conn)?; - }, + match CompletedTransactionSql::find_and_abandon_coinbase(tx_id, &conn) { + Ok(_) => {}, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, @@ -1390,6 +1396,68 @@ impl InboundTransactionSql { .first::(conn)?) } + pub fn mark_direct_send_success(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + diesel::update( + inbound_transactions::table + .filter(inbound_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(inbound_transactions::cancelled.eq(i32::from(false))), + ) + .set(UpdateInboundTransactionSql { + cancelled: None, + direct_send_success: Some(1i32), + receiver_protocol: None, + send_count: None, + last_send_timestamp: None, + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + pub fn complete_inbound_transaction(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + diesel::delete( + inbound_transactions::table + .filter(inbound_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(inbound_transactions::cancelled.eq(i32::from(false))), + ) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + pub fn increment_send_count(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + diesel::update( + inbound_transactions::table + .filter(inbound_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(inbound_transactions::cancelled.eq(i32::from(false))), + ) + .set(UpdateInboundTransactionSql { + cancelled: None, + direct_send_success: None, + receiver_protocol: None, + send_count: Some( + if let Some(value) = inbound_transactions::table + .filter(inbound_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(inbound_transactions::cancelled.eq(i32::from(false))) + .select(inbound_transactions::send_count) + .load::(conn)? + .first() + { + value + 1 + } else { + return Err(TransactionStorageError::DieselError(DieselError::NotFound)); + }, + ), + last_send_timestamp: Some(Some(Utc::now().naive_utc())), + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + pub fn delete(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { let num_deleted = diesel::delete(inbound_transactions::table.filter(inbound_transactions::tx_id.eq(&self.tx_id))) @@ -1421,17 +1489,23 @@ impl InboundTransactionSql { Ok(()) } - pub fn set_cancelled(&self, cancelled: bool, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { - self.update( - UpdateInboundTransactionSql { + pub fn find_and_set_cancelled( + tx_id: TxId, + cancelled: bool, + conn: &SqliteConnection, + ) -> Result<(), TransactionStorageError> { + diesel::update(inbound_transactions::table.filter(inbound_transactions::tx_id.eq(tx_id.as_u64() as i64))) + .set(UpdateInboundTransactionSql { cancelled: Some(i32::from(cancelled)), direct_send_success: None, receiver_protocol: None, send_count: None, last_send_timestamp: None, - }, - conn, - ) + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) } pub fn update_encryption(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { @@ -1589,6 +1663,63 @@ impl OutboundTransactionSql { .first::(conn)?) } + pub fn mark_direct_send_success(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + diesel::update( + outbound_transactions::table + .filter(outbound_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(outbound_transactions::cancelled.eq(i32::from(false))), + ) + .set(UpdateOutboundTransactionSql { + cancelled: None, + direct_send_success: Some(1i32), + sender_protocol: None, + send_count: None, + last_send_timestamp: None, + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + pub fn complete_outbound_transaction(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + diesel::delete( + outbound_transactions::table + .filter(outbound_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(outbound_transactions::cancelled.eq(i32::from(false))), + ) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + pub fn increment_send_count(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + diesel::update(outbound_transactions::table.filter(outbound_transactions::tx_id.eq(tx_id.as_u64() as i64))) + .set(UpdateOutboundTransactionSql { + cancelled: None, + direct_send_success: None, + sender_protocol: None, + send_count: Some( + if let Some(value) = outbound_transactions::table + .filter(outbound_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .select(outbound_transactions::send_count) + .load::(conn)? + .first() + { + value + 1 + } else { + return Err(TransactionStorageError::DieselError(DieselError::NotFound)); + }, + ), + last_send_timestamp: Some(Some(Utc::now().naive_utc())), + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + pub fn delete(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { diesel::delete(outbound_transactions::table.filter(outbound_transactions::tx_id.eq(&self.tx_id))) .execute(conn) @@ -1609,17 +1740,23 @@ impl OutboundTransactionSql { Ok(()) } - pub fn set_cancelled(&self, cancelled: bool, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { - self.update( - UpdateOutboundTransactionSql { + pub fn find_and_set_cancelled( + tx_id: TxId, + cancelled: bool, + conn: &SqliteConnection, + ) -> Result<(), TransactionStorageError> { + diesel::update(outbound_transactions::table.filter(outbound_transactions::tx_id.eq(tx_id.as_u64() as i64))) + .set(UpdateOutboundTransactionSql { cancelled: Some(i32::from(cancelled)), direct_send_success: None, sender_protocol: None, send_count: None, last_send_timestamp: None, - }, - conn, - ) + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) } pub fn update_encryption(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { @@ -1823,6 +1960,23 @@ impl CompletedTransactionSql { .load::(conn)?) } + pub fn find_and_abandon_coinbase(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + let _ = diesel::update( + completed_transactions::table + .filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(completed_transactions::cancelled.is_null()) + .filter(completed_transactions::coinbase_block_height.is_not_null()), + ) + .set(UpdateCompletedTransactionSql { + cancelled: Some(Some(TxCancellationReason::AbandonedCoinbase as i32)), + ..Default::default() + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + pub fn find(tx_id: TxId, conn: &SqliteConnection) -> Result { Ok(completed_transactions::table .filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64)) @@ -1847,6 +2001,70 @@ impl CompletedTransactionSql { Ok(query.first::(conn)?) } + pub fn reject_completed_transaction( + tx_id: TxId, + reason: TxCancellationReason, + conn: &SqliteConnection, + ) -> Result<(), TransactionStorageError> { + diesel::update( + completed_transactions::table + .filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(completed_transactions::cancelled.is_null()), + ) + .set(UpdateCompletedTransactionSql { + cancelled: Some(Some(reason as i32)), + status: Some(TransactionStatus::Rejected as i32), + ..Default::default() + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + pub fn increment_send_count(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + // This query uses a sub-query to retrieve an existing value in the table + diesel::update(completed_transactions::table.filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64))) + .set(UpdateCompletedTransactionSql { + send_count: Some( + if let Some(value) = completed_transactions::table + .filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .select(completed_transactions::send_count) + .load::(conn)? + .first() + { + value + 1 + } else { + return Err(TransactionStorageError::DieselError(DieselError::NotFound)); + }, + ), + last_send_timestamp: Some(Some(Utc::now().naive_utc())), + ..Default::default() + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + pub fn reject_coinbases_at_block_height( + block_height: i64, + reason: TxCancellationReason, + conn: &SqliteConnection, + ) -> Result { + Ok(diesel::update( + completed_transactions::table + .filter(completed_transactions::status.eq(TransactionStatus::Coinbase as i32)) + .filter(completed_transactions::coinbase_block_height.eq(block_height)), + ) + .set(UpdateCompletedTransactionSql { + cancelled: Some(Some(reason as i32)), + status: Some(TransactionStatus::Rejected as i32), + ..Default::default() + }) + .execute(conn)?) + } + pub fn delete(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { let num_deleted = diesel::delete(completed_transactions::table.filter(completed_transactions::tx_id.eq(&self.tx_id))) @@ -1871,58 +2089,70 @@ impl CompletedTransactionSql { Ok(()) } - pub fn reject(&self, reason: TxCancellationReason, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { - self.update( - UpdateCompletedTransactionSql { - cancelled: Some(Some(reason as i32)), - status: Some(TransactionStatus::Rejected as i32), - ..Default::default() - }, - conn, - )?; - - Ok(()) - } - - pub fn abandon_coinbase(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { - if self.coinbase_block_height.is_none() { - return Err(TransactionStorageError::NotCoinbase); - } - - self.update( - UpdateCompletedTransactionSql { - cancelled: Some(Some(TxCancellationReason::AbandonedCoinbase as i32)), + pub fn update_mined_height( + tx_id: TxId, + num_confirmations: u64, + status: TransactionStatus, + mined_height: u64, + mined_in_block: BlockHash, + mined_timestamp: u64, + conn: &SqliteConnection, + ) -> Result<(), TransactionStorageError> { + diesel::update(completed_transactions::table.filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64))) + .set(UpdateCompletedTransactionSql { + confirmations: Some(Some(num_confirmations as i64)), + status: Some(status as i32), + mined_height: Some(Some(mined_height as i64)), + mined_in_block: Some(Some(mined_in_block.to_vec())), + mined_timestamp: Some(NaiveDateTime::from_timestamp(mined_timestamp as i64, 0)), + // If the tx is mined, then it can't be cancelled + cancelled: None, ..Default::default() - }, - conn, - )?; + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; Ok(()) } - pub fn set_as_unmined(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { - let status = if self.coinbase_block_height.is_some() { - Some(TransactionStatus::Coinbase as i32) - } else if self.status == TransactionStatus::FauxConfirmed as i32 { - Some(TransactionStatus::FauxUnconfirmed as i32) - } else if self.status == TransactionStatus::Broadcast as i32 { - Some(TransactionStatus::Broadcast as i32) - } else { - Some(TransactionStatus::Completed as i32) - }; - - self.update( - UpdateCompletedTransactionSql { - status, + pub fn set_as_unmined(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + // This query uses two sub-queries to retrieve existing values in the table + diesel::update(completed_transactions::table.filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64))) + .set(UpdateCompletedTransactionSql { + status: { + if let Some(Some(_coinbase_block_height)) = completed_transactions::table + .filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .select(completed_transactions::coinbase_block_height) + .load::>(conn)? + .first() + { + Some(TransactionStatus::Coinbase as i32) + } else if let Some(status) = completed_transactions::table + .filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .select(completed_transactions::status) + .load::(conn)? + .first() + { + if *status == TransactionStatus::FauxConfirmed as i32 { + Some(TransactionStatus::FauxUnconfirmed as i32) + } else if *status == TransactionStatus::Broadcast as i32 { + Some(TransactionStatus::Broadcast as i32) + } else { + Some(TransactionStatus::Completed as i32) + } + } else { + return Err(TransactionStorageError::DieselError(DieselError::NotFound)); + } + }, mined_in_block: Some(None), mined_height: Some(None), confirmations: Some(None), // Turns out it should not be cancelled cancelled: Some(None), ..Default::default() - }, - conn, - )?; + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; // Ideally the outputs should be marked unmined here as well, but because of the separation of classes, // that will be done in the outputs service. @@ -1941,45 +2171,6 @@ impl CompletedTransactionSql { Ok(()) } - - pub fn update_mined_height( - &self, - mined_height: u64, - mined_in_block: BlockHash, - mined_timestamp: u64, - num_confirmations: u64, - is_confirmed: bool, - conn: &SqliteConnection, - is_faux: bool, - ) -> Result<(), TransactionStorageError> { - let status = if is_confirmed { - if is_faux { - TransactionStatus::FauxConfirmed as i32 - } else { - TransactionStatus::MinedConfirmed as i32 - } - } else if is_faux { - TransactionStatus::FauxUnconfirmed as i32 - } else { - TransactionStatus::MinedUnconfirmed as i32 - }; - - self.update( - UpdateCompletedTransactionSql { - confirmations: Some(Some(num_confirmations as i64)), - status: Some(status), - mined_height: Some(Some(mined_height as i64)), - mined_in_block: Some(Some(mined_in_block.to_vec())), - mined_timestamp: Some(NaiveDateTime::from_timestamp(mined_timestamp as i64, 0)), - // If the tx is mined, then it can't be cancelled - cancelled: None, - ..Default::default() - }, - conn, - )?; - - Ok(()) - } } impl Encryptable for CompletedTransactionSql { @@ -2240,6 +2431,7 @@ mod test { InboundTransactionSql, OutboundTransactionSql, TransactionServiceSqliteDatabase, + UpdateCompletedTransactionSql, }, }, util::encryption::Encryptable, @@ -2517,16 +2709,10 @@ mod test { .unwrap(); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, true, &conn).is_err()); - InboundTransactionSql::try_from(inbound_tx1.clone()) - .unwrap() - .set_cancelled(true, &conn) - .unwrap(); + InboundTransactionSql::find_and_set_cancelled(inbound_tx1.tx_id, true, &conn).unwrap(); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, false, &conn).is_err()); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, true, &conn).is_ok()); - InboundTransactionSql::try_from(inbound_tx1.clone()) - .unwrap() - .set_cancelled(false, &conn) - .unwrap(); + InboundTransactionSql::find_and_set_cancelled(inbound_tx1.tx_id, false, &conn).unwrap(); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, true, &conn).is_err()); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, false, &conn).is_ok()); OutboundTransactionSql::try_from(outbound_tx1.clone()) @@ -2535,16 +2721,10 @@ mod test { .unwrap(); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, true, &conn).is_err()); - OutboundTransactionSql::try_from(outbound_tx1.clone()) - .unwrap() - .set_cancelled(true, &conn) - .unwrap(); + OutboundTransactionSql::find_and_set_cancelled(outbound_tx1.tx_id, true, &conn).unwrap(); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, false, &conn).is_err()); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, true, &conn).is_ok()); - OutboundTransactionSql::try_from(outbound_tx1.clone()) - .unwrap() - .set_cancelled(false, &conn) - .unwrap(); + OutboundTransactionSql::find_and_set_cancelled(outbound_tx1.tx_id, false, &conn).unwrap(); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, true, &conn).is_err()); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, false, &conn).is_ok()); @@ -2556,7 +2736,14 @@ mod test { assert!(CompletedTransactionSql::find_by_cancelled(completed_tx1.tx_id, true, &conn).is_err()); CompletedTransactionSql::try_from(completed_tx1.clone()) .unwrap() - .reject(TxCancellationReason::Unknown, &conn) + .update( + UpdateCompletedTransactionSql { + cancelled: Some(Some(TxCancellationReason::Unknown as i32)), + status: Some(TransactionStatus::Rejected as i32), + ..Default::default() + }, + &conn, + ) .unwrap(); assert!(CompletedTransactionSql::find_by_cancelled(completed_tx1.tx_id, false, &conn).is_err()); assert!(CompletedTransactionSql::find_by_cancelled(completed_tx1.tx_id, true, &conn).is_ok()); diff --git a/base_layer/wallet_ffi/Cargo.toml b/base_layer/wallet_ffi/Cargo.toml index 66bc653af30..1ce077c8bcf 100644 --- a/base_layer/wallet_ffi/Cargo.toml +++ b/base_layer/wallet_ffi/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_wallet_ffi" authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet C FFI bindings" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/changelog.md b/changelog.md index 1c8d231881b..16b56371eff 100644 --- a/changelog.md +++ b/changelog.md @@ -2,6 +2,54 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +### [0.38.7](https://github.com/tari-project/tari/compare/v0.38.6...v0.38.7) (2022-10-11) + + +### Bug Fixes + +* **core:** only resize db if migration is required ([#4792](https://github.com/tari-project/tari/issues/4792)) ([4811a57](https://github.com/tari-project/tari/commit/4811a5772665af4e3b9007ccadedfc651e1d232e)) +* **miner:** clippy error ([#4793](https://github.com/tari-project/tari/issues/4793)) ([734db22](https://github.com/tari-project/tari/commit/734db22bbdd36b5371aa9c70f4342bb0d3c2f3a4)) + +### [0.38.6](https://github.com/tari-project/tari/compare/v0.38.5...v0.38.6) (2022-10-11) + + +### Features + +* **base-node:** add client connection count to status line ([#4774](https://github.com/tari-project/tari/issues/4774)) ([8339b1d](https://github.com/tari-project/tari/commit/8339b1de1bace96671d8eba0cf309adb9f78014a)) +* move nonce to first in sha hash ([#4778](https://github.com/tari-project/tari/issues/4778)) ([054a314](https://github.com/tari-project/tari/commit/054a314f015ab7a3f1e571f3ee0c7a58ad0ebb5a)) +* remove dalek ng ([#4769](https://github.com/tari-project/tari/issues/4769)) ([953b0b7](https://github.com/tari-project/tari/commit/953b0b7cfc371467e7d15e933e79c8d07712f666)) + + +### Bug Fixes + +* batch rewind operations ([#4752](https://github.com/tari-project/tari/issues/4752)) ([79d3c47](https://github.com/tari-project/tari/commit/79d3c47a86bc37be0117b33c869f9e04df068384)) +* **ci:** fix client path for nodejs ([#4765](https://github.com/tari-project/tari/issues/4765)) ([c7b5e68](https://github.com/tari-project/tari/commit/c7b5e68b400c79040f2dd92ee1cc779224e463ee)) +* **core:** only resize db if migration is required ([#4792](https://github.com/tari-project/tari/issues/4792)) ([4811a57](https://github.com/tari-project/tari/commit/4811a5772665af4e3b9007ccadedfc651e1d232e)) +* **dht:** remove some invalid saf failure cases ([#4787](https://github.com/tari-project/tari/issues/4787)) ([86b4d94](https://github.com/tari-project/tari/commit/86b4d9437f87cb31ed922ff7a7dc73e7fe29eb69)) +* fix config.toml bug ([#4780](https://github.com/tari-project/tari/issues/4780)) ([f6043c1](https://github.com/tari-project/tari/commit/f6043c1f03f33a34e2612516ffca8a589e319001)) +* **miner:** clippy error ([#4793](https://github.com/tari-project/tari/issues/4793)) ([734db22](https://github.com/tari-project/tari/commit/734db22bbdd36b5371aa9c70f4342bb0d3c2f3a4)) +* **p2p/liveness:** remove fallible unwrap ([#4784](https://github.com/tari-project/tari/issues/4784)) ([e59be99](https://github.com/tari-project/tari/commit/e59be99401fc4b50f1b4f5a6a16948959e5c56a1)) +* **tari-script:** use tari script encoding for execution stack serde de/serialization ([#4791](https://github.com/tari-project/tari/issues/4791)) ([c62f7eb](https://github.com/tari-project/tari/commit/c62f7eb6c5b6b4336c7351bd89cb3a700fde1bb2)) + +### [0.38.6](https://github.com/tari-project/tari/compare/v0.38.5...v0.38.6) (2022-10-11) + + +### Features + +* **base-node:** add client connection count to status line ([#4774](https://github.com/tari-project/tari/issues/4774)) ([8339b1d](https://github.com/tari-project/tari/commit/8339b1de1bace96671d8eba0cf309adb9f78014a)) +* move nonce to first in sha hash ([#4778](https://github.com/tari-project/tari/issues/4778)) ([054a314](https://github.com/tari-project/tari/commit/054a314f015ab7a3f1e571f3ee0c7a58ad0ebb5a)) +* remove dalek ng ([#4769](https://github.com/tari-project/tari/issues/4769)) ([953b0b7](https://github.com/tari-project/tari/commit/953b0b7cfc371467e7d15e933e79c8d07712f666)) + + +### Bug Fixes + +* batch rewind operations ([#4752](https://github.com/tari-project/tari/issues/4752)) ([79d3c47](https://github.com/tari-project/tari/commit/79d3c47a86bc37be0117b33c869f9e04df068384)) +* **ci:** fix client path for nodejs ([#4765](https://github.com/tari-project/tari/issues/4765)) ([c7b5e68](https://github.com/tari-project/tari/commit/c7b5e68b400c79040f2dd92ee1cc779224e463ee)) +* **dht:** remove some invalid saf failure cases ([#4787](https://github.com/tari-project/tari/issues/4787)) ([86b4d94](https://github.com/tari-project/tari/commit/86b4d9437f87cb31ed922ff7a7dc73e7fe29eb69)) +* fix config.toml bug ([#4780](https://github.com/tari-project/tari/issues/4780)) ([f6043c1](https://github.com/tari-project/tari/commit/f6043c1f03f33a34e2612516ffca8a589e319001)) +* **p2p/liveness:** remove fallible unwrap ([#4784](https://github.com/tari-project/tari/issues/4784)) ([e59be99](https://github.com/tari-project/tari/commit/e59be99401fc4b50f1b4f5a6a16948959e5c56a1)) +* **tari-script:** use tari script encoding for execution stack serde de/serialization ([#4791](https://github.com/tari-project/tari/issues/4791)) ([c62f7eb](https://github.com/tari-project/tari/commit/c62f7eb6c5b6b4336c7351bd89cb3a700fde1bb2)) + ### [0.38.5](https://github.com/tari-project/tari/compare/v0.38.4...v0.38.5) (2022-10-03) diff --git a/common/Cargo.toml b/common/Cargo.toml index 61350b5cb9e..9bdb4ee1d83 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [features] diff --git a/common/config/presets/c_base_node.toml b/common/config/presets/c_base_node.toml index 26a5ae09c10..bb2dad874eb 100644 --- a/common/config/presets/c_base_node.toml +++ b/common/config/presets/c_base_node.toml @@ -118,7 +118,7 @@ track_reorgs = true # The maximum number of transactions to sync in a single sync session Default: 10_000 #service.initial_sync_max_transactions = 10_000 # The maximum number of blocks added via sync or re-org to triggering a sync -#block_sync_trigger = 5 +#service.block_sync_trigger = 5 [base_node.state_machine] # The initial max sync latency. If a peer fails to stream a header/block within this deadline another sync peer will be diff --git a/common_sqlite/Cargo.toml b/common_sqlite/Cargo.toml index 0101cba1bf0..55213455e90 100644 --- a/common_sqlite/Cargo.toml +++ b/common_sqlite/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_common_sqlite" authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet library" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/comms/core/Cargo.toml b/comms/core/Cargo.toml index bfab6a6a11b..c1684e6b47b 100644 --- a/comms/core/Cargo.toml +++ b/comms/core/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/comms/core/src/protocol/rpc/server/error.rs b/comms/core/src/protocol/rpc/server/error.rs index ea3458b4e5e..a829ff60359 100644 --- a/comms/core/src/protocol/rpc/server/error.rs +++ b/comms/core/src/protocol/rpc/server/error.rs @@ -60,8 +60,17 @@ pub enum RpcServerError { ServiceCallExceededDeadline, #[error("Stream read exceeded deadline")] ReadStreamExceededDeadline, - #[error("Early close error: {0}")] - EarlyCloseError(#[from] EarlyCloseError), + #[error("Early close: {0}")] + EarlyClose(#[from] EarlyCloseError), +} + +impl RpcServerError { + pub fn early_close_io(&self) -> Option<&io::Error> { + match self { + Self::EarlyClose(e) => e.io(), + _ => None, + } + } } impl From for RpcServerError { diff --git a/comms/core/src/protocol/rpc/server/mod.rs b/comms/core/src/protocol/rpc/server/mod.rs index 6690e314184..a05a40de4fc 100644 --- a/comms/core/src/protocol/rpc/server/mod.rs +++ b/comms/core/src/protocol/rpc/server/mod.rs @@ -44,6 +44,7 @@ use std::{ convert::TryFrom, future::Future, io, + io::ErrorKind, pin::Pin, sync::Arc, task::Poll, @@ -353,7 +354,7 @@ where { Ok(_) => {}, Err(err @ RpcServerError::HandshakeError(_)) => { - debug!(target: LOG_TARGET, "{}", err); + debug!(target: LOG_TARGET, "Handshake error: {}", err); metrics::handshake_error_counter(&node_id, ¬ification.protocol).inc(); }, Err(err) => { @@ -530,7 +531,7 @@ where metrics::error_counter(&self.node_id, &self.protocol, &err).inc(); let level = match &err { RpcServerError::Io(e) => err_to_log_level(e), - RpcServerError::EarlyCloseError(e) => e.io().map(err_to_log_level).unwrap_or(log::Level::Error), + RpcServerError::EarlyClose(e) => e.io().map(err_to_log_level).unwrap_or(log::Level::Error), _ => log::Level::Error, }; log!( @@ -562,8 +563,10 @@ where err, ); } - error!( + let level = err.early_close_io().map(err_to_log_level).unwrap_or(log::Level::Error); + log!( target: LOG_TARGET, + level, "(peer: {}, protocol: {}) Failed to handle request: {}", self.node_id, self.protocol_name(), @@ -880,8 +883,13 @@ fn into_response(request_id: u32, result: Result) -> RpcRe } fn err_to_log_level(err: &io::Error) -> log::Level { + error!(target: LOG_TARGET, "KIND: {}", err.kind()); match err.kind() { - io::ErrorKind::BrokenPipe | io::ErrorKind::WriteZero => log::Level::Debug, + ErrorKind::ConnectionReset | + ErrorKind::ConnectionAborted | + ErrorKind::BrokenPipe | + ErrorKind::WriteZero | + ErrorKind::UnexpectedEof => log::Level::Debug, _ => log::Level::Error, } } diff --git a/comms/dht/Cargo.toml b/comms/dht/Cargo.toml index b644c51565c..08bd5c0d883 100644 --- a/comms/dht/Cargo.toml +++ b/comms/dht/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_comms_dht" -version = "0.38.5" +version = "0.38.7" authors = ["The Tari Development Community"] description = "Tari comms DHT module" repository = "https://github.com/tari-project/tari" diff --git a/comms/dht/src/dht.rs b/comms/dht/src/dht.rs index e3acdae98a8..3fef7d43a89 100644 --- a/comms/dht/src/dht.rs +++ b/comms/dht/src/dht.rs @@ -31,6 +31,7 @@ use tari_comms::{ pipeline::PipelineError, }; use tari_shutdown::ShutdownSignal; +use tari_utilities::epoch_time::EpochTime; use thiserror::Error; use tokio::sync::{broadcast, mpsc}; use tower::{layer::Layer, Service, ServiceBuilder}; @@ -298,6 +299,7 @@ impl Dht { .layer(MetricsLayer::new(self.metrics_collector.clone())) .layer(inbound::DeserializeLayer::new(self.peer_manager.clone())) .layer(filter::FilterLayer::new(self.unsupported_saf_messages_filter())) + .layer(filter::FilterLayer::new(discard_expired_messages)) .layer(inbound::DecryptionLayer::new( self.config.clone(), self.node_identity.clone(), @@ -432,6 +434,20 @@ fn filter_messages_to_rebroadcast(msg: &DecryptedDhtMessage) -> bool { } } +/// Check message expiry and immediately discard if expired +fn discard_expired_messages(msg: &DhtInboundMessage) -> bool { + if let Some(expires) = msg.dht_header.expires { + if expires < EpochTime::now() { + debug!( + target: LOG_TARGET, + "[discard_expired_messages] Discarding expired message {}", msg + ); + return false; + } + } + true +} + #[cfg(test)] mod test { use std::{sync::Arc, time::Duration}; diff --git a/comms/dht/src/envelope.rs b/comms/dht/src/envelope.rs index 3f4f2ef06ee..6ac881cb80d 100644 --- a/comms/dht/src/envelope.rs +++ b/comms/dht/src/envelope.rs @@ -43,7 +43,7 @@ use crate::version::DhtProtocolVersion; pub(crate) fn datetime_to_timestamp(datetime: DateTime) -> Timestamp { Timestamp { seconds: datetime.timestamp(), - nanos: datetime.timestamp_subsec_nanos().try_into().unwrap_or(std::i32::MAX), + nanos: datetime.timestamp_subsec_nanos().try_into().unwrap_or(i32::MAX), } } diff --git a/comms/dht/src/store_forward/database/stored_message.rs b/comms/dht/src/store_forward/database/stored_message.rs index b8d095d9019..1913b5be02b 100644 --- a/comms/dht/src/store_forward/database/stored_message.rs +++ b/comms/dht/src/store_forward/database/stored_message.rs @@ -20,8 +20,6 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::convert::TryInto; - use chrono::NaiveDateTime; use tari_comms::message::MessageExt; use tari_utilities::{hex, hex::Hex}; @@ -50,7 +48,7 @@ pub struct NewStoredMessage { } impl NewStoredMessage { - pub fn try_construct(message: DecryptedDhtMessage, priority: StoredMessagePriority) -> Option { + pub fn new(message: DecryptedDhtMessage, priority: StoredMessagePriority) -> Self { let DecryptedDhtMessage { authenticated_origin, decryption_result, @@ -64,8 +62,8 @@ impl NewStoredMessage { }; let body_hash = hex::to_hex(&dedup::create_message_hash(&dht_header.message_signature, &body)); - Some(Self { - version: dht_header.version.as_major().try_into().ok()?, + Self { + version: dht_header.version.as_major() as i32, origin_pubkey: authenticated_origin.as_ref().map(|pk| pk.to_hex()), message_type: dht_header.message_type as i32, destination_pubkey: dht_header.destination.public_key().map(|pk| pk.to_hex()), @@ -81,7 +79,7 @@ impl NewStoredMessage { }, body_hash, body, - }) + } } } diff --git a/comms/dht/src/store_forward/error.rs b/comms/dht/src/store_forward/error.rs index 4a71b410eb1..85fd5678c21 100644 --- a/comms/dht/src/store_forward/error.rs +++ b/comms/dht/src/store_forward/error.rs @@ -27,7 +27,7 @@ use tari_comms::{ message::MessageError, peer_manager::{NodeId, PeerManagerError}, }; -use tari_utilities::byte_array::ByteArrayError; +use tari_utilities::{byte_array::ByteArrayError, epoch_time::EpochTime}; use thiserror::Error; use crate::{ @@ -81,10 +81,10 @@ pub enum StoreAndForwardError { RequesterChannelClosed, #[error("The request was cancelled by the store and forward service")] RequestCancelled, - #[error("The message was not valid for store and forward")] - InvalidStoreMessage, - #[error("The envelope version is invalid")] - InvalidEnvelopeVersion, + #[error("The {field} field was not valid, discarding SAF response: {details}")] + InvalidSafResponseMessage { field: &'static str, details: String }, + #[error("The message has expired, not storing message in SAF db (expiry: {expired}, now: {now})")] + NotStoringExpiredMessage { expired: EpochTime, now: EpochTime }, #[error("MalformedNodeId: {0}")] MalformedNodeId(#[from] ByteArrayError), #[error("DHT message type should not have been forwarded")] diff --git a/comms/dht/src/store_forward/message.rs b/comms/dht/src/store_forward/message.rs index f753b9941ba..f74af32c613 100644 --- a/comms/dht/src/store_forward/message.rs +++ b/comms/dht/src/store_forward/message.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::convert::{TryFrom, TryInto}; +use std::convert::TryFrom; use chrono::{DateTime, Utc}; use prost::Message; @@ -76,10 +76,7 @@ impl TryFrom for StoredMessage { let dht_header = DhtHeader::decode(message.header.as_slice())?; Ok(Self { stored_at: Some(datetime_to_timestamp(DateTime::from_utc(message.stored_at, Utc))), - version: message - .version - .try_into() - .map_err(|_| StoreAndForwardError::InvalidEnvelopeVersion)?, + version: message.version as u32, body: message.body, dht_header: Some(dht_header), }) diff --git a/comms/dht/src/store_forward/saf_handler/task.rs b/comms/dht/src/store_forward/saf_handler/task.rs index 7f5390d3827..4bce651e68f 100644 --- a/comms/dht/src/store_forward/saf_handler/task.rs +++ b/comms/dht/src/store_forward/saf_handler/task.rs @@ -36,7 +36,7 @@ use tari_comms::{ types::CommsPublicKey, BytesMut, }; -use tari_utilities::{convert::try_convert_all, ByteArray}; +use tari_utilities::ByteArray; use tokio::sync::mpsc; use tower::{Service, ServiceExt}; @@ -216,7 +216,7 @@ where S: Service let messages = self.saf_requester.fetch_messages(query.clone()).await?; let stored_messages = StoredMessagesResponse { - messages: try_convert_all(messages)?, + messages: messages.into_iter().map(TryInto::try_into).collect::>()?, request_id: retrieve_msgs.request_id, response_type: resp_type as i32, }; @@ -430,8 +430,13 @@ where S: Service .stored_at .map(|t| { Result::<_, StoreAndForwardError>::Ok(DateTime::from_utc( - NaiveDateTime::from_timestamp_opt(t.seconds, t.nanos.try_into().unwrap_or(u32::MAX)) - .ok_or(StoreAndForwardError::InvalidStoreMessage)?, + NaiveDateTime::from_timestamp_opt(t.seconds, 0).ok_or_else(|| { + StoreAndForwardError::InvalidSafResponseMessage { + field: "stored_at", + details: "number of seconds provided represents more days than can fit in a u32" + .to_string(), + } + })?, Utc, )) }) @@ -618,7 +623,7 @@ where S: Service mod test { use std::time::Duration; - use chrono::Utc; + use chrono::{Timelike, Utc}; use tari_comms::{message::MessageExt, runtime, wrap_in_envelope_body}; use tari_test_utils::collect_recv; use tari_utilities::{hex, hex::Hex}; @@ -932,7 +937,7 @@ mod test { .unwrap() .unwrap(); - assert_eq!(last_saf_received, msg2_time); + assert_eq!(last_saf_received.second(), msg2_time.second()); } #[runtime::test] diff --git a/comms/dht/src/store_forward/store.rs b/comms/dht/src/store_forward/store.rs index c0d2b8d224b..70690bde948 100644 --- a/comms/dht/src/store_forward/store.rs +++ b/comms/dht/src/store_forward/store.rs @@ -437,13 +437,13 @@ where S: Service + Se ); if let Some(expires) = message.dht_header.expires { - if expires < EpochTime::now() { - return SafResult::Err(StoreAndForwardError::InvalidStoreMessage); + let now = EpochTime::now(); + if expires < now { + return Err(StoreAndForwardError::NotStoringExpiredMessage { expired: expires, now }); } } - let stored_message = - NewStoredMessage::try_construct(message, priority).ok_or(StoreAndForwardError::InvalidStoreMessage)?; + let stored_message = NewStoredMessage::new(message, priority); self.saf_requester.insert_message(stored_message).await } } diff --git a/comms/rpc_macros/Cargo.toml b/comms/rpc_macros/Cargo.toml index 81e33db8eaf..f9aac328f1a 100644 --- a/comms/rpc_macros/Cargo.toml +++ b/comms/rpc_macros/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [lib] diff --git a/infrastructure/derive/Cargo.toml b/infrastructure/derive/Cargo.toml index dbb5fe630c6..f7c686eed49 100644 --- a/infrastructure/derive/Cargo.toml +++ b/infrastructure/derive/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [lib] diff --git a/infrastructure/shutdown/Cargo.toml b/infrastructure/shutdown/Cargo.toml index 9070aab5e48..cd2c41d80eb 100644 --- a/infrastructure/shutdown/Cargo.toml +++ b/infrastructure/shutdown/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/infrastructure/storage/Cargo.toml b/infrastructure/storage/Cargo.toml index 181f62b20ea..9a966cf4973 100644 --- a/infrastructure/storage/Cargo.toml +++ b/infrastructure/storage/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/infrastructure/storage/tests/lmdb.rs b/infrastructure/storage/tests/lmdb.rs index 38441e39e03..45740521c71 100644 --- a/infrastructure/storage/tests/lmdb.rs +++ b/infrastructure/storage/tests/lmdb.rs @@ -118,7 +118,7 @@ fn insert_all_users(name: &str) -> (Vec, LMDBDatabase) { } #[test] -fn single_thread() { +fn test_single_thread() { { let users = load_users(); let env = init("single_thread").unwrap(); @@ -136,7 +136,7 @@ fn single_thread() { } #[test] -fn multi_thread() { +fn test_multi_thread() { { let users_arc = Arc::new(load_users()); let env = init("multi_thread").unwrap(); @@ -167,7 +167,7 @@ fn multi_thread() { } #[test] -fn transactions() { +fn test_transactions() { { let (users, db) = insert_all_users("transactions"); // Test the `exists` and value retrieval functions @@ -186,7 +186,7 @@ fn transactions() { /// Simultaneous writes in different threads #[test] #[allow(clippy::same_item_push)] -fn multi_thread_writes() { +fn test_multi_thread_writes() { { let env = init("multi-thread-writes").unwrap(); let mut threads = Vec::new(); @@ -220,7 +220,7 @@ fn multi_thread_writes() { /// Multiple write transactions in a single thread #[test] -fn multi_writes() { +fn test_multi_writes() { { let env = init("multi-writes").unwrap(); for i in 0..2 { @@ -241,7 +241,7 @@ fn multi_writes() { } #[test] -fn pair_iterator() { +fn test_pair_iterator() { { let (users, db) = insert_all_users("pair_iterator"); let res = db.for_each::(|pair| { @@ -256,7 +256,7 @@ fn pair_iterator() { } #[test] -fn exists_and_delete() { +fn test_exists_and_delete() { { let (_, db) = insert_all_users("delete"); assert!(db.contains_key(&525u64).unwrap()); @@ -267,7 +267,7 @@ fn exists_and_delete() { } #[test] -fn lmdb_resize_on_create() { +fn test_lmdb_resize_on_create() { let db_env_name = "resize"; { let path = get_path(db_env_name); diff --git a/infrastructure/tari_script/src/lib.rs b/infrastructure/tari_script/src/lib.rs index e796c55a4dd..81ef3d5e7ff 100644 --- a/infrastructure/tari_script/src/lib.rs +++ b/infrastructure/tari_script/src/lib.rs @@ -24,7 +24,7 @@ mod serde; mod stack; pub use error::ScriptError; -pub use op_codes::{slice_to_boxed_hash, slice_to_hash, HashValue, Opcode}; +pub use op_codes::{slice_to_boxed_hash, slice_to_hash, HashValue, Message, Opcode, ScalarValue}; pub use script::TariScript; pub use script_commitment::{ScriptCommitment, ScriptCommitmentError, ScriptCommitmentFactory}; pub use script_context::ScriptContext; diff --git a/infrastructure/tari_script/src/script.rs b/infrastructure/tari_script/src/script.rs index b91fce8480d..38ab6cb8b14 100644 --- a/infrastructure/tari_script/src/script.rs +++ b/infrastructure/tari_script/src/script.rs @@ -119,7 +119,7 @@ impl TariScript { } } - pub fn as_bytes(&self) -> Vec { + pub fn to_bytes(&self) -> Vec { self.script.iter().fold(Vec::new(), |mut bytes, op| { op.to_bytes(&mut bytes); bytes @@ -137,7 +137,7 @@ impl TariScript { if D::output_size() < 32 { return Err(ScriptError::InvalidDigest); } - let h = D::digest(&self.as_bytes()); + let h = D::digest(&self.to_bytes()); Ok(slice_to_hash(&h.as_slice()[..32])) } @@ -178,7 +178,7 @@ impl TariScript { pub fn script_message(&self, pub_key: &RistrettoPublicKey) -> Result { let b = Blake256::new() .chain(pub_key.as_bytes()) - .chain(&self.as_bytes()) + .chain(&self.to_bytes()) .finalize(); RistrettoSecretKey::from_bytes(b.as_slice()).map_err(|_| ScriptError::InvalidSignature) } @@ -562,7 +562,7 @@ impl Hex for TariScript { } fn to_hex(&self) -> String { - to_hex(&self.as_bytes()) + to_hex(&self.to_bytes()) } } @@ -948,7 +948,7 @@ mod test { #[test] fn serialisation() { let script = script!(Add Sub Add); - assert_eq!(&script.as_bytes(), &[0x93, 0x94, 0x93]); + assert_eq!(&script.to_bytes(), &[0x93, 0x94, 0x93]); assert_eq!(TariScript::from_bytes(&[0x93, 0x94, 0x93]).unwrap(), script); assert_eq!(script.to_hex(), "939493"); assert_eq!(TariScript::from_hex("939493").unwrap(), script); diff --git a/infrastructure/tari_script/src/serde.rs b/infrastructure/tari_script/src/serde.rs index 658eef02a9d..b9379dae644 100644 --- a/infrastructure/tari_script/src/serde.rs +++ b/infrastructure/tari_script/src/serde.rs @@ -26,12 +26,12 @@ use serde::{ }; use tari_utilities::hex::{from_hex, Hex}; -use crate::TariScript; +use crate::{ExecutionStack, TariScript}; impl Serialize for TariScript { fn serialize(&self, ser: S) -> Result where S: Serializer { - let script_bin = self.as_bytes(); + let script_bin = self.to_bytes(); if ser.is_human_readable() { ser.serialize_str(&script_bin.to_hex()) } else { @@ -40,44 +40,99 @@ impl Serialize for TariScript { } } -struct ScriptVisitor; +impl<'de> Deserialize<'de> for TariScript { + fn deserialize(de: D) -> Result + where D: Deserializer<'de> { + struct ScriptVisitor; -impl<'de> Visitor<'de> for ScriptVisitor { - type Value = TariScript; + impl<'de> Visitor<'de> for ScriptVisitor { + type Value = TariScript; - fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("Expecting a binary array or hex string") - } + fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Expecting a binary array or hex string") + } - fn visit_str(self, v: &str) -> Result - where E: Error { - let bytes = from_hex(v).map_err(|e| E::custom(e.to_string()))?; - self.visit_bytes(&bytes) - } + fn visit_str(self, v: &str) -> Result + where E: Error { + let bytes = from_hex(v).map_err(|e| E::custom(e.to_string()))?; + self.visit_bytes(&bytes) + } - fn visit_string(self, v: String) -> Result - where E: Error { - self.visit_str(&v) - } + fn visit_string(self, v: String) -> Result + where E: Error { + self.visit_str(&v) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where E: Error { + TariScript::from_bytes(v).map_err(|e| E::custom(e.to_string())) + } + + fn visit_borrowed_bytes(self, v: &'de [u8]) -> Result + where E: Error { + self.visit_bytes(v) + } + } - fn visit_bytes(self, v: &[u8]) -> Result - where E: Error { - TariScript::from_bytes(v).map_err(|e| E::custom(e.to_string())) + if de.is_human_readable() { + de.deserialize_string(ScriptVisitor) + } else { + de.deserialize_bytes(ScriptVisitor) + } } +} - fn visit_borrowed_bytes(self, v: &'de [u8]) -> Result - where E: Error { - self.visit_bytes(v) +// -------------------------------- ExecutionStack -------------------------------- // +impl Serialize for ExecutionStack { + fn serialize(&self, ser: S) -> Result + where S: Serializer { + let stack_bin = self.to_bytes(); + if ser.is_human_readable() { + ser.serialize_str(&stack_bin.to_hex()) + } else { + ser.serialize_bytes(&stack_bin) + } } } -impl<'de> Deserialize<'de> for TariScript { +impl<'de> Deserialize<'de> for ExecutionStack { fn deserialize(de: D) -> Result where D: Deserializer<'de> { + struct ExecutionStackVisitor; + + impl<'de> Visitor<'de> for ExecutionStackVisitor { + type Value = ExecutionStack; + + fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Expecting a binary array or hex string") + } + + fn visit_str(self, v: &str) -> Result + where E: Error { + let bytes = from_hex(v).map_err(|e| E::custom(e.to_string()))?; + self.visit_bytes(&bytes) + } + + fn visit_string(self, v: String) -> Result + where E: Error { + self.visit_str(&v) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where E: Error { + ExecutionStack::from_bytes(v).map_err(|e| E::custom(e.to_string())) + } + + fn visit_borrowed_bytes(self, v: &'de [u8]) -> Result + where E: Error { + self.visit_bytes(v) + } + } + if de.is_human_readable() { - de.deserialize_string(ScriptVisitor) + de.deserialize_string(ExecutionStackVisitor) } else { - de.deserialize_bytes(ScriptVisitor) + de.deserialize_bytes(ExecutionStackVisitor) } } } diff --git a/infrastructure/tari_script/src/stack.rs b/infrastructure/tari_script/src/stack.rs index 757988f9c3d..f3b714b95cf 100644 --- a/infrastructure/tari_script/src/stack.rs +++ b/infrastructure/tari_script/src/stack.rs @@ -17,7 +17,6 @@ use std::convert::TryFrom; -use serde::{Deserialize, Serialize}; use tari_crypto::ristretto::{pedersen::PedersenCommitment, RistrettoPublicKey, RistrettoSchnorr, RistrettoSecretKey}; use tari_utilities::{ hex::{from_hex, to_hex, Hex, HexError}, @@ -58,7 +57,7 @@ pub const TYPE_PUBKEY: u8 = 4; pub const TYPE_SIG: u8 = 5; pub const TYPE_SCALAR: u8 = 6; -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum StackItem { Number(i64), Hash(HashValue), @@ -178,7 +177,7 @@ stack_item_from!(RistrettoPublicKey => PublicKey); stack_item_from!(RistrettoSchnorr => Signature); stack_item_from!(ScalarValue => Scalar); -#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, PartialEq, Eq)] pub struct ExecutionStack { items: Vec, } @@ -262,7 +261,7 @@ impl ExecutionStack { } /// Return a binary array representation of the input stack - pub fn as_bytes(&self) -> Vec { + pub fn to_bytes(&self) -> Vec { self.items.iter().fold(Vec::new(), |mut bytes, item| { item.to_bytes(&mut bytes); bytes @@ -317,7 +316,7 @@ impl Hex for ExecutionStack { } fn to_hex(&self) -> String { - to_hex(&self.as_bytes()) + to_hex(&self.to_bytes()) } } @@ -361,11 +360,21 @@ mod test { use tari_crypto::{ hash::blake2::Blake256, keys::{PublicKey, SecretKey}, - ristretto::{utils, utils::SignatureSet, RistrettoPublicKey, RistrettoSchnorr, RistrettoSecretKey}, + ristretto::{ + pedersen::PedersenCommitment, + utils, + utils::SignatureSet, + RistrettoPublicKey, + RistrettoSchnorr, + RistrettoSecretKey, + }, + }; + use tari_utilities::{ + hex::{from_hex, Hex}, + message_format::MessageFormat, }; - use tari_utilities::hex::{from_hex, Hex}; - use crate::{op_codes::ScalarValue, ExecutionStack, StackItem}; + use crate::{op_codes::ScalarValue, ExecutionStack, HashValue, StackItem}; #[test] fn as_bytes_roundtrip() { @@ -378,7 +387,7 @@ mod test { } = utils::sign::(&k, b"hi").unwrap(); let items = vec![Number(5432), Number(21), Signature(s), PublicKey(p)]; let stack = ExecutionStack::new(items); - let bytes = stack.as_bytes(); + let bytes = stack.to_bytes(); let stack2 = ExecutionStack::from_bytes(&bytes).unwrap(); assert_eq!(stack, stack2); } @@ -445,4 +454,37 @@ mod test { panic!("Expected scalar") } } + + #[test] + fn serde_serialization_non_breaking() { + const SERDE_ENCODED_BYTES: &str = "ce0000000000000006fdf9fc345d2cdd8aff624a55f824c7c9ce3cc9\ + 72e011b4e750e417a90ecc5da50456c0fa32558d6edc0916baa26b48e745de834571534ca253ea82435f08ebbc\ + 7c0556c0fa32558d6edc0916baa26b48e745de834571534ca253ea82435f08ebbc7c6db1023d5c46d78a97da8eb\ + 6c5a37e00d5f2fee182dcb38c1b6c65e90a43c10906fdf9fc345d2cdd8aff624a55f824c7c9ce3cc972e011b4e7\ + 50e417a90ecc5da501d2040000000000000356c0fa32558d6edc0916baa26b48e745de834571534ca253ea82435\ + f08ebbc7c"; + let p = + RistrettoPublicKey::from_hex("56c0fa32558d6edc0916baa26b48e745de834571534ca253ea82435f08ebbc7c").unwrap(); + let s = + RistrettoSecretKey::from_hex("6db1023d5c46d78a97da8eb6c5a37e00d5f2fee182dcb38c1b6c65e90a43c109").unwrap(); + let sig = RistrettoSchnorr::new(p.clone(), s); + let m: HashValue = Blake256::digest(b"Hello Tari Script").into(); + let s: ScalarValue = m; + let commitment = PedersenCommitment::from_public_key(&p); + + // Includes all variants for StackItem + let mut expected_inputs = inputs!(s, p, sig, m, 1234, commitment); + let stack = ExecutionStack::from_binary(&from_hex(SERDE_ENCODED_BYTES).unwrap()).unwrap(); + + for (i, item) in stack.items.into_iter().enumerate().rev() { + assert_eq!( + item, + expected_inputs.pop().unwrap(), + "Stack items did not match at index {}", + i + ); + } + + assert!(expected_inputs.is_empty()); + } } diff --git a/infrastructure/test_utils/Cargo.toml b/infrastructure/test_utils/Cargo.toml index 809e2b0f67d..9a6262255af 100644 --- a/infrastructure/test_utils/Cargo.toml +++ b/infrastructure/test_utils/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "tari_test_utils" description = "Utility functions used in Tari test functions" -version = "0.38.5" +version = "0.38.7" authors = ["The Tari Development Community"] edition = "2018" license = "BSD-3-Clause" diff --git a/integration_tests/config/config.toml b/integration_tests/config/config.toml deleted file mode 100644 index 569d3b05c85..00000000000 --- a/integration_tests/config/config.toml +++ /dev/null @@ -1,380 +0,0 @@ -######################################################################################################################## -# # -# Common Configuration Options # -# # -######################################################################################################################## - -[common] -#override_from="dibbler" -#base_path="/.tari" -#data_dir="data" - -[auto_update] -# This interval in seconds to check for software updates. Setting this to 0 disables checking. -check_interval = 300 - -[dibbler.auto_update] -# Customize the hosts that are used to check for updates. These hosts must contain update information in DNS TXT records. -update_uris = ["updates.dibbler.taripulse.com"] -# Customize the location of the update SHA hashes and maintainer-signed signature. -# "auto_update.hashes_url" = "https://
/hashes.txt" -# "auto_update.hashes_sig_url" = "https://
/hashes.txt.sig" - -[metrics] -# server_bind_address = "127.0.0.1:5577" -# push_endpoint = http://localhost:9091/metrics/job/base-node -# Configuration options for dibbler testnet - -[dibbler.p2p.seeds] -dns_seeds = ["seeds.dibbler.tari.com"] -peer_seeds = [ - # 333388d1cbe3e2bd17453d052f - "c2eca9cf32261a1343e21ed718e79f25bfc74386e9305350b06f62047f519347::/onion3/6yxqk2ybo43u73ukfhyc42qn25echn4zegjpod2ccxzr2jd5atipwzqd:18141", - # 555575715a49fc242d756e52ca - "42fcde82b44af1de95a505d858cb31a422c56c4ac4747fbf3da47d648d4fc346::/onion3/2l3e7ysmihc23zybapdrsbcfg6omtjtfkvwj65dstnfxkwtai2fawtyd:18141", - # 77771f53be07fab4be5f1e1ff7 - "50e6aa8f6c50f1b9d9b3d438dfd2a29cfe1f3e3a650bd9e6b1e10f96b6c38f4d::/onion3/7s6y3cz5bnewlj5ypm7sekhgvqjyrq4bpaj5dyvvo7vxydj7hsmyf5ad:18141", - # 9999016f1f3a6162dddf5a45aa - "36a9df45e1423b5315ffa7a91521924210c8e1d1537ad0968450f20f21e5200d::/onion3/v24qfheti2rztlwzgk6v4kdbes3ra7mo3i2fobacqkbfrk656e3uvnid:18141", - # bbbb8358387d81c388fadb4649 - "be128d570e8ec7b15c101ee1a56d6c56dd7d109199f0bd02f182b71142b8675f::/onion3/ha422qsy743ayblgolui5pg226u42wfcklhc5p7nbhiytlsp4ir2syqd:18141", - # eeeeb0a943ed143e613a135392 - "3e0321c0928ca559ab3c0a396272dfaea705efce88440611a38ff3898b097217::/onion3/sl5ledjoaisst6d4fh7kde746dwweuge4m4mf5nkzdhmy57uwgtb7qqd:18141", - # 66664a0f95ce468941bb9de228 - "b0f797e7413b39b6646fa370e8394d3993ead124b8ba24325c3c07a05e980e7e::/ip4/35.177.93.69/tcp/18189", - # 22221bf814d5e524fce9ba5787 - "0eefb45a4de9484eca74846a4f47d2c8d38e76be1fec63b0112bd00d297c0928::/ip4/13.40.98.39/tcp/18189", - # 4444a0efd8388739d563bdd979 - "544ed2baed414307e119d12894e27f9ddbdfa2fd5b6528dc843f27903e951c30::/ip4/13.40.189.176/tcp/18189" -] - -######################################################################################################################## -# # -# Base Node Configuration Options # -# # -######################################################################################################################## - -# If you are not running a Tari Base node, you can simply leave everything in this section commented out. Base nodes -# help maintain the security of the Tari token and are the surest way to preserve your privacy and be 100% sure that -# no-one is cheating you out of your money. - -[base_node] -# Selected network -network = "dibbler" -# The socket to expose for the gRPC base node server -grpc_address = "/ip4/127.0.0.1/tcp/18142" - -# Spin up and use a built-in Tor instance. This only works on macos/linux and you must comment out tor_control_address below. -# This requires that the base node was built with the optional "libtor" feature flag. -#use_libtor = true - -[dibbler.base_node] -# A path to the file that stores your node identity and secret key -identity_file = "config/base_node_id_dibbler.json" - -[base_node.p2p] -# The node's publicly-accessible hostname. This is the host name that is advertised on the network so that -# peers can find you. -# _NOTE_: If using the `tor` transport type, public_address will be ignored and an onion address will be -# automatically configured -public_address = "/ip4/172.2.3.4/tcp/18189" - -# Optionally bind an additional TCP socket for inbound Tari P2P protocol commms. -# Use cases include: -# - allowing wallets to locally connect to their base node, rather than through tor, when used in conjunction with `tor_proxy_bypass_addresses` -# - multiple P2P addresses, one public over DNS and one private over TOR -# - a "bridge" between TOR and TCP-only nodes -# auxiliary_tcp_listener_address = "/ip4/127.0.0.1/tcp/9998" - -[base_node.p2p.transport] -# -------------- Transport configuration -------------- -# Use TCP to connect to the Tari network. This transport can only communicate with TCP/IP addresses, so peers with -# e.g. tor onion addresses will not be contactable. -#transport = "tcp" -# The address and port to listen for peer connections over TCP. -tcp.listener_address = "/ip4/0.0.0.0/tcp/18189" -# Configures a tor proxy used to connect to onion addresses. All other traffic uses direct TCP connections. -# This setting is optional however, if it is not specified, this node will not be able to connect to nodes that -# only advertise an onion address. -tcp.tor_socks_address = "/ip4/127.0.0.1/tcp/36050" -tcp.tor_socks_auth = "none" - -# # Configures the node to run over a tor hidden service using the Tor proxy. This transport recognises ip/tcp, -# # onion v2, onion v3 and dns addresses. -#type = "tor" -# Address of the tor control server -tor.control_address = "/ip4/127.0.0.1/tcp/9051" -# Authentication to use for the tor control server -tor.control_auth = "none" # or "password=xxxxxx" -# The onion port to use. -tor.onion_port = 18141 -# When these peer addresses are encountered when dialing another peer, the tor proxy is bypassed and the connection is made -# directly over TCP. /ip4, /ip6, /dns, /dns4 and /dns6 are supported. -tor.proxy_bypass_addresses = [] -#tor.proxy_bypass_addresses = ["/dns4/my-foo-base-node/tcp/9998"] -# When using the tor transport and set to true, outbound TCP connections bypass the tor proxy. Defaults to false for better privacy -tor.proxy_bypass_for_outbound_tcp = false - -# Use a SOCKS5 proxy transport. This transport recognises any addresses supported by the proxy. -#type = "socks5" -# The address of the SOCKS5 proxy -# Traffic will be forwarded to tcp.listener_address -socks.proxy_address = "/ip4/127.0.0.1/tcp/9050" -socks.auth = "none" # or "username_password=username:xxxxxxx" - -[base_node.p2p.dht] -auto_join = true -database_url = "base_node_dht.db" -# do we allow test addresses to be accepted like 127.0.0.1 -allow_test_addresses = false - -[base_node.p2p.dht.saf] - -[base_node.lmdb] -#init_size_bytes = 1000000 -#grow_size_bytes = 1600000 -#resize_threshold_bytes = 1600000 - -[base_node.storage] -# Sets the pruning horizon. -#pruning_horizon = 0 -# Set to true to record all reorgs. Recorded reorgs can be viewed using the list-reorgs command. -track_reorgs = true - -######################################################################################################################## -# # -# Wallet Configuration Options # -# # -######################################################################################################################## - -[wallet] -# Override common.network for wallet -override_from = "dibbler" - -# The relative folder to store your local key data and transaction history. DO NOT EVER DELETE THIS FILE unless you -# a) have backed up your seed phrase and -# b) know what you are doing! -db_file = "wallet/wallet.dat" - -# The socket to expose for the gRPC wallet server. This value is ignored if grpc_enabled is false. -grpc_address = "/ip4/127.0.0.1/tcp/18143" - -# Console wallet password -# Should you wish to start your console wallet without typing in your password, the following options are available: -# 1. Start the console wallet with the --password=secret argument, or -# 2. Set the environment variable TARI_WALLET_PASSWORD=secret before starting the console wallet, or -# 3. Set the "password" key in this [wallet] section of the config -# password = "secret" - -# WalletNotify -# Allows you to execute a script or program when these transaction events are received by the console wallet: -# - transaction received -# - transaction sent -# - transaction cancelled -# - transaction mined but unconfirmed -# - transaction mined and confirmed -# An example script is available here: applications/tari_console_wallet/src/notifier/notify_example.sh -# notify = "/path/to/script" - -# This is the timeout period that will be used to monitor TXO queries to the base node (default = 60). Larger values -# are needed for wallets with many (>1000) TXOs to be validated. -#base_node_query_timeout = 180 -# The amount of seconds added to the current time (Utc) which will then be used to check if the message has -# expired or not when processing the message (default = 10800). -#saf_expiry_duration = 10800 -# This is the number of block confirmations required for a transaction to be considered completely mined and -# confirmed. (default = 3) -#transaction_num_confirmations_required = 3 -# This is the timeout period that will be used for base node broadcast monitoring tasks (default = 60) -#transaction_broadcast_monitoring_timeout = 180 -# This is the timeout period that will be used for chain monitoring tasks (default = 60) -#transaction_chain_monitoring_timeout = 60 -# This is the timeout period that will be used for sending transactions directly (default = 20) -#transaction_direct_send_timeout = 180 -# This is the timeout period that will be used for sending transactions via broadcast mode (default = 60) -#transaction_broadcast_send_timeout = 180 -# This is the size of the event channel used to communicate transaction status events to the wallet's UI. A busy console -# wallet doing thousands of bulk payments or used for stress testing needs a fairly big size (>10000) (default = 1000). -#transaction_event_channel_size = 25000 -# This is the size of the event channel used to communicate base node events to the wallet. A busy console -# wallet doing thousands of bulk payments or used for stress testing needs a fairly big size (>3000) (default = 250). -#base_node_event_channel_size = 3500 -# This is the size of the event channel used to communicate output manager events to the wallet. A busy console -# wallet doing thousands of bulk payments or used for stress testing needs a fairly big size (>3000) (default = 250). -#output_manager_event_channel_size = 3500 -# This is the size of the event channel used to communicate base node update events to the wallet. A busy console -# wallet doing thousands of bulk payments or used for stress testing needs a fairly big size (>300) (default = 50). -#base_node_update_publisher_channel_size = 500 -# If a large amount of tiny valued uT UTXOs are used as inputs to a transaction, the fee may be larger than -# the transaction amount. Set this value to `false` to allow spending of "dust" UTXOs for small valued -# transactions (default = true). -#prevent_fee_gt_amount = false -# This option specifies the transaction routing mechanism as being directly between wallets, making -# use of store and forward or using any combination of these. -# (options: "DirectOnly", "StoreAndForwardOnly", DirectAndStoreAndForward". default: "DirectAndStoreAndForward"). -#transaction_routing_mechanism = "DirectAndStoreAndForward" - -# When running the console wallet in command mode, use these values to determine what "stage" and timeout to wait -# for sent transactions. -# The stages are: -# - "DirectSendOrSaf" - The transaction was initiated and was accepted via Direct Send or Store And Forward. -# - "Negotiated" - The recipient replied and the transaction was negotiated. -# - "Broadcast" - The transaction was broadcast to the base node mempool. -# - "MinedUnconfirmed" - The transaction was successfully detected as mined but unconfirmed on the blockchain. -# - "Mined" - The transaction was successfully detected as mined and confirmed on the blockchain. - -# The default values are: "Broadcast", 300 -#command_send_wait_stage = "Broadcast" -#command_send_wait_timeout = 300 - -# The base nodes that the wallet should use for service requests and tracking chain state. -# base_node_service_peers = ["public_key::net_address", ...] -# base_node_service_peers = ["e856839057aac496b9e25f10821116d02b58f20129e9b9ba681b830568e47c4d::/onion3/exe2zgehnw3tvrbef3ep6taiacr6sdyeb54be2s25fpru357r4skhtad:18141"] - -# Configuration for the wallet's base node service -# The refresh interval, defaults to 10 seconds -#base_node_service_refresh_interval = 30 -# The maximum age of service requests in seconds, requests older than this are discarded -#base_node_service_request_max_age = 180 - -#[base_node.transport.tor] -#control_address = "/ip4/127.0.0.1/tcp/9051" -#control_auth_type = "none" # or "password" -# Required for control_auth_type = "password" -#control_auth_password = "super-secure-password" - -[wallet.p2p] - -[wallet.p2p.transport] -# # Configures the node to run over a tor hidden service using the Tor proxy. This transport recognises ip/tcp, -# # onion v2, onion v3 and dns addresses. -type = "tor" -# Address of the tor control server -tor.control_address = "/ip4/127.0.0.1/tcp/9051" -# Authentication to use for the tor control server -tor.control_auth = "none" # or "password=xxxxxx" -# The onion port to use. -tor.onion_port = 18141 -# When these peer addresses are encountered when dialing another peer, the tor proxy is bypassed and the connection is made -# directly over TCP. /ip4, /ip6, /dns, /dns4 and /dns6 are supported. -tor.proxy_bypass_addresses = [] -# When using the tor transport and set to true, outbound TCP connections bypass the tor proxy. Defaults to false for better privacy -tor.proxy_bypass_for_outbound_tcp = false - -[dibbler.wallet] -network = "dibbler" - - - -######################################################################################################################## -# # -# Miner Configuration Options # -# # -######################################################################################################################## - -[miner] -# Number of mining threads -# Default: number of logical CPU cores -#num_mining_threads=8 - -# GRPC address of base node -#base_node_grpc_address = "127.0.0.1:18142" - -# GRPC address of console wallet -#wallet_grpc_address = "127.0.0.1:18143" - -# Start mining only when base node is bootstrapped -# and current block height is on the tip of network -# Default: true -#mine_on_tip_only=true - -# Will check tip with node every N seconds and restart mining -# if height already taken and option `mine_on_tip_only` is set -# to true -# Default: 30 seconds -#validate_tip_timeout_sec=30 - -# Stratum Mode configuration -# mining_pool_address = "miningcore.tari.com:3052" -# mining_wallet_address = "YOUR_WALLET_PUBLIC_KEY" -# mining_worker_name = "worker1" - -######################################################################################################################## -# # -# Merge Mining Configuration Options # -# # -######################################################################################################################## - -[merge_mining_proxy] -#override_from = "dibbler" -monerod_url = [# stagenet - "http://stagenet.xmr-tw.org:38081", - "http://stagenet.community.xmr.to:38081", - "http://monero-stagenet.exan.tech:38081", - "http://xmr-lux.boldsuck.org:38081", - "http://singapore.node.xmr.pm:38081", -] -base_node_grpc_address = "/ip4/127.0.0.1/tcp/18142" -console_wallet_grpc_address = "/ip4/127.0.0.1/tcp/18143" - -# Address of the tari_merge_mining_proxy application -listener_address = "/ip4/127.0.0.1/tcp/18081" - -# In sole merged mining, the block solution is usually submitted to the Monero blockchain -# (monerod) as well as to the Tari blockchain, then this setting should be "true". With pool -# merged mining, there is no sense in submitting the solution to the Monero blockchain as the -# pool does that, then this setting should be "false". (default = true). -submit_to_origin = true - -# The merge mining proxy can either wait for the base node to achieve initial sync at startup before it enables mining, -# or not. If merge mining starts before the base node has achieved initial sync, those Tari mined blocks will not be -# accepted. (Default value = true; will wait for base node initial sync). -#wait_for_initial_sync_at_startup = true - -# Monero auth params -monerod_username = "" -monerod_password = "" -monerod_use_auth = false - -#[dibbler.merge_mining_proxy] -# Put any network specific settings here - - - -######################################################################################################################## -# # -# Validator Node Configuration Options # -# # -######################################################################################################################## - -[validator_node] - -phase_timeout = 30 - -# If set to false, there will be no scanning at all. -scan_for_assets = true -# How often do we want to scan the base layer for changes. -new_asset_scanning_interval = 10 -# If set then only the specific assets will be checked. -# assets_allow_list = [""] - - -constitution_auto_accept = false -constitution_management_polling_interval_in_seconds = 10 -constitution_management_polling_interval = 5 -constitution_management_confirmation_time = 50 -######################################################################################################################## -# # -# Collectibles Configuration Options # -# # -######################################################################################################################## - -[collectibles] -# GRPC address of validator node -#validator_node_grpc_address = "/ip4/127.0.0.1/tcp/18144" - -# GRPC address of base node -#base_node_grpc_address = "/ip4/127.0.0.1/tcp/18142" - -# GRPC address of wallet -#wallet_grpc_address = "/ip4/127.0.0.1/tcp/18143" diff --git a/integration_tests/cucumber.js b/integration_tests/cucumber.js index 544030439cc..5b5dd3baf75 100644 --- a/integration_tests/cucumber.js +++ b/integration_tests/cucumber.js @@ -1,8 +1,7 @@ module.exports = { - default: - "--tags 'not @long-running and not @wallet-ffi and not @broken' --fail-fast", + default: "--tags 'not @long-running and not @wallet-ffi and not @broken' ", none: " ", - ci: "--tags '@critical and not @long-running and not @broken ' --fail-fast", + ci: "--tags '@critical and not @long-running and not @broken '", critical: "--format @cucumber/pretty-formatter --tags @critical", "non-critical": "--tags 'not @critical and not @long-running and not @broken'", diff --git a/integration_tests/features/BaseNodeAutoUpdate.feature b/integration_tests/features/BaseNodeAutoUpdate.feature deleted file mode 100644 index bc05149f8f7..00000000000 --- a/integration_tests/features/BaseNodeAutoUpdate.feature +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2022 The Tari Project -# SPDX-License-Identifier: BSD-3-Clause - -@auto_update -Feature: AutoUpdate - - @broken - Scenario: Auto update finds a new update on base node - Given I have a node NODE_A with auto update enabled - Then NODE_A has a new software update - - @broken - Scenario: Auto update ignores update with invalid signature on base node - Given I have a node NODE_A with auto update configured with a bad signature - Then NODE_A does not have a new software update diff --git a/integration_tests/features/BaseNodeConnectivity.feature b/integration_tests/features/BaseNodeConnectivity.feature index 37300e227af..4dbd112c14a 100644 --- a/integration_tests/features/BaseNodeConnectivity.feature +++ b/integration_tests/features/BaseNodeConnectivity.feature @@ -21,13 +21,11 @@ Feature: Base Node Connectivity Then SEED_A is connected to WALLET_A Scenario: Base node lists heights - Given I have 1 seed nodes - And I have a base node N1 connected to all seed nodes + Given I have a seed node N1 When I mine 5 blocks on N1 Then node N1 lists heights 1 to 5 Scenario: Base node lists headers - Given I have 1 seed nodes - And I have a base node BN1 connected to all seed nodes + Given I have a seed node BN1 When I mine 5 blocks on BN1 Then node BN1 lists headers 1 to 5 with correct heights diff --git a/integration_tests/features/WalletAutoUpdate.feature b/integration_tests/features/WalletAutoUpdate.feature deleted file mode 100644 index 2a9d89c000d..00000000000 --- a/integration_tests/features/WalletAutoUpdate.feature +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2022 The Tari Project -# SPDX-License-Identifier: BSD-3-Clause - -@auto_update -Feature: AutoUpdate - - @broken - Scenario: Auto update finds a new update on wallet - Given I have a wallet WALLET with auto update enabled - Then WALLET has a new software update - - @broken - Scenario: Auto update ignores update with invalid signature on wallet - Given I have a wallet WALLET with auto update configured with a bad signature - Then WALLET does not have a new software update diff --git a/integration_tests/helpers/config.js b/integration_tests/helpers/config.js index a3396eb31da..51ae68f4a7e 100644 --- a/integration_tests/helpers/config.js +++ b/integration_tests/helpers/config.js @@ -84,6 +84,9 @@ function baseEnvs(peerSeeds = [], forceSyncPeers = [], _committee = []) { ["localnet.base_node.p2p.dht.flood_ban_max_msg_count"]: "100000", ["localnet.base_node.p2p.dht.database_url"]: "localnet/dht.db", ["localnet.p2p.seeds.dns_seeds_use_dnssec"]: "false", + ["localnet.base_node.lmdb.init_size_bytes"]: 16000000, + ["localnet.base_node.lmdb.grow_size_bytes"]: 16000000, + ["localnet.base_node.lmdb.resize_threshold_bytes"]: 1024, ["localnet.wallet.identity_file"]: "walletid.json", ["localnet.wallet.contacts_auto_ping_interval"]: "5", @@ -101,9 +104,7 @@ function baseEnvs(peerSeeds = [], forceSyncPeers = [], _committee = []) { ["merge_mining_proxy.monerod_use_auth"]: false, ["merge_mining_proxy.monerod_username"]: "", ["merge_mining_proxy.monerod_password"]: "", - // ["localnet.base_node.storage_db_init_size"]: 100000000, - // ["localnet.base_node.storage.db_resize_threshold"]: 10000000, - // ["localnet.base_node.storage.db_grow_size"]: 20000000, + ["merge_mining_proxy.wait_for_initial_sync_at_startup"]: false, ["miner.num_mining_threads"]: "1", ["miner.mine_on_tip_only"]: true, diff --git a/integration_tests/package-lock.json b/integration_tests/package-lock.json index 2dd066682e5..403f326a619 100644 --- a/integration_tests/package-lock.json +++ b/integration_tests/package-lock.json @@ -9,13 +9,18 @@ "version": "1.0.0", "license": "ISC", "dependencies": { + "@grpc/grpc-js": "^1.2.3", + "@grpc/proto-loader": "^0.5.5", "archiver": "^5.3.1", "axios": "^0.21.4", "clone-deep": "^4.0.1", "csv-parser": "^3.0.0", "dateformat": "^3.0.3", + "fs": "^0.0.1-security", "glob": "^7.2.3", + "grpc-promise": "^1.4.0", "json5": "^2.2.1", + "path": "^0.12.7", "sha3": "^2.1.3", "tari_crypto": "v0.14.0", "utf8": "^3.0.0", @@ -2332,6 +2337,11 @@ } } }, + "node_modules/fs": { + "version": "0.0.1-security", + "resolved": "https://registry.npmjs.org/fs/-/fs-0.0.1-security.tgz", + "integrity": "sha512-3XY9e1pP0CVEUCdj5BmfIZxRBTSDycnbqhIOGec9QYtmVH2fbLpj86CFWkrNOkt/Fvty4KZG5lTglL9j/gJ87w==" + }, "node_modules/fs-constants": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", @@ -3119,6 +3129,15 @@ "node": ">=6" } }, + "node_modules/path": { + "version": "0.12.7", + "resolved": "https://registry.npmjs.org/path/-/path-0.12.7.tgz", + "integrity": "sha512-aXXC6s+1w7otVF9UletFkFcDsJeO7lSZBPUQhtb5O0xJe8LtYhj/GxldoL09bBj9+ZmE2hNoHqQSFMN5fikh4Q==", + "dependencies": { + "process": "^0.11.1", + "util": "^0.10.3" + } + }, "node_modules/path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", @@ -3187,6 +3206,14 @@ "node": ">=6.0.0" } }, + "node_modules/process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", + "engines": { + "node": ">= 0.6.0" + } + }, "node_modules/process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", @@ -3832,6 +3859,14 @@ "resolved": "https://registry.npmjs.org/utf8/-/utf8-3.0.0.tgz", "integrity": "sha512-E8VjFIQ/TyQgp+TZfS6l8yp/xWppSAHzidGiRrqe4bK4XP9pTRyKFgGJpO3SN7zdX4DeomTrwaseCHovfpFcqQ==" }, + "node_modules/util": { + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz", + "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", + "dependencies": { + "inherits": "2.0.3" + } + }, "node_modules/util-arity": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/util-arity/-/util-arity-1.1.0.tgz", @@ -3843,6 +3878,11 @@ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" }, + "node_modules/util/node_modules/inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + }, "node_modules/uuid": { "version": "3.4.0", "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", @@ -5778,6 +5818,11 @@ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.8.tgz", "integrity": "sha512-1x0S9UVJHsQprFcEC/qnNzBLcIxsjAV905f/UkQxbclCsoTWlacCNOpQa/anodLl2uaEKFhfWOvM2Qg77+15zA==" }, + "fs": { + "version": "0.0.1-security", + "resolved": "https://registry.npmjs.org/fs/-/fs-0.0.1-security.tgz", + "integrity": "sha512-3XY9e1pP0CVEUCdj5BmfIZxRBTSDycnbqhIOGec9QYtmVH2fbLpj86CFWkrNOkt/Fvty4KZG5lTglL9j/gJ87w==" + }, "fs-constants": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", @@ -6405,6 +6450,15 @@ "callsites": "^3.0.0" } }, + "path": { + "version": "0.12.7", + "resolved": "https://registry.npmjs.org/path/-/path-0.12.7.tgz", + "integrity": "sha512-aXXC6s+1w7otVF9UletFkFcDsJeO7lSZBPUQhtb5O0xJe8LtYhj/GxldoL09bBj9+ZmE2hNoHqQSFMN5fikh4Q==", + "requires": { + "process": "^0.11.1", + "util": "^0.10.3" + } + }, "path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", @@ -6449,6 +6503,11 @@ "fast-diff": "^1.1.2" } }, + "process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==" + }, "process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", @@ -6956,6 +7015,21 @@ "resolved": "https://registry.npmjs.org/utf8/-/utf8-3.0.0.tgz", "integrity": "sha512-E8VjFIQ/TyQgp+TZfS6l8yp/xWppSAHzidGiRrqe4bK4XP9pTRyKFgGJpO3SN7zdX4DeomTrwaseCHovfpFcqQ==" }, + "util": { + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz", + "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", + "requires": { + "inherits": "2.0.3" + }, + "dependencies": { + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + } + } + }, "util-arity": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/util-arity/-/util-arity-1.1.0.tgz", diff --git a/package-lock.json b/package-lock.json index e2497b00af2..30a0a96353a 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,6 +1,6 @@ { "name": "tari", - "version": "0.38.5", + "version": "0.38.7", "lockfileVersion": 2, "requires": true, "packages": {}