diff --git a/Cargo.lock b/Cargo.lock index 1c2d99b686..1a05d92b35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -463,7 +463,7 @@ dependencies = [ "serde_repr", "tokio", "url", - "zbus 5.7.1", + "zbus 5.8.0", ] [[package]] @@ -3003,7 +3003,7 @@ dependencies = [ "libc", "log", "rustversion", - "windows 0.61.1", + "windows 0.61.3", ] [[package]] @@ -3046,7 +3046,7 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "wasm-bindgen", ] @@ -4293,7 +4293,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" dependencies = [ "cfg-if", - "windows-targets 0.53.0", + "windows-targets 0.53.2", ] [[package]] @@ -4738,7 +4738,7 @@ checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "windows-sys 0.59.0", ] @@ -7918,8 +7918,11 @@ dependencies = [ "hex", "logging", "rusqlite", + "static_assertions", + "storage", "storage-backend-test-suite", "storage-core", + "tempfile", "test-utils", "utils", ] @@ -9083,6 +9086,7 @@ dependencies = [ name = "wallet" version = "1.1.0" dependencies = [ + "async-trait", "bip39", "chainstate", "chainstate-test-framework", @@ -9111,6 +9115,7 @@ dependencies = [ "tempfile", "test-utils", "thiserror 1.0.69", + "tokio", "trezor-client", "tx-verifier", "utils", @@ -9472,9 +9477,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" @@ -9924,9 +9929,9 @@ dependencies = [ [[package]] name = "windows" -version = "0.61.1" +version = "0.61.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5ee8f3d025738cb02bad7868bbb5f8a6327501e870bf51f1b455b0a2454a419" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" dependencies = [ "windows-collections", "windows-core 0.61.2", @@ -10001,9 +10006,9 @@ dependencies = [ [[package]] name = "windows-link" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-numerics" @@ -10117,9 +10122,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.0" +version = "0.53.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" dependencies = [ "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", @@ -10374,9 +10379,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.7.10" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" +checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" dependencies = [ "memchr", ] @@ -10479,9 +10484,9 @@ dependencies = [ [[package]] name = "xcursor" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef33da6b1660b4ddbfb3aef0ade110c8b8a781a3b6382fa5f2b5b040fd55f61" +checksum = "bec9e4a500ca8864c5b47b8b482a73d62e4237670e5b5f1d6b9e3cae50f28f2b" [[package]] name = "xdg-home" @@ -10514,9 +10519,9 @@ checksum = "b9cc00251562a284751c9973bace760d86c0276c471b4be569fe6b068ee97a56" [[package]] name = "xml-rs" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62ce76d9b56901b19a74f19431b0d8b3bc7ca4ad685a746dfd78ca8f4fc6bda" +checksum = "6fd8403733700263c6eb89f192880191f1b83e332f7a20371ddcf421c4a337c7" [[package]] name = "yansi" @@ -10594,9 +10599,9 @@ dependencies = [ [[package]] name = "zbus" -version = "5.7.1" +version = "5.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3a7c7cee313d044fca3f48fa782cb750c79e4ca76ba7bc7718cd4024cdf6f68" +checksum = "597f45e98bc7e6f0988276012797855613cd8269e23b5be62cc4e5d28b7e515d" dependencies = [ "async-broadcast", "async-recursion", @@ -10615,9 +10620,9 @@ dependencies = [ "uds_windows", "windows-sys 0.59.0", "winnow", - "zbus_macros 5.7.1", + "zbus_macros 5.8.0", "zbus_names 4.2.0", - "zvariant 5.5.3", + "zvariant 5.6.0", ] [[package]] @@ -10635,16 +10640,16 @@ dependencies = [ [[package]] name = "zbus_macros" -version = "5.7.1" +version = "5.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17e7e5eec1550f747e71a058df81a9a83813ba0f6a95f39c4e218bdc7ba366a" +checksum = "e5c8e4e14dcdd9d97a98b189cd1220f30e8394ad271e8c987da84f73693862c2" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", "syn 2.0.101", "zbus_names 4.2.0", - "zvariant 5.5.3", + "zvariant 5.6.0", "zvariant_utils 3.2.0", ] @@ -10668,7 +10673,7 @@ dependencies = [ "serde", "static_assertions", "winnow", - "zvariant 5.5.3", + "zvariant 5.6.0", ] [[package]] @@ -10679,18 +10684,18 @@ checksum = "dd15f8e0dbb966fd9245e7498c7e9e5055d9e5c8b676b95bd67091cd11a1e697" [[package]] name = "zerocopy" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote", @@ -10786,16 +10791,16 @@ dependencies = [ [[package]] name = "zvariant" -version = "5.5.3" +version = "5.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d30786f75e393ee63a21de4f9074d4c038d52c5b1bb4471f955db249f9dffb1" +checksum = "d91b3680bb339216abd84714172b5138a4edac677e641ef17e1d8cb1b3ca6e6f" dependencies = [ "endi", "enumflags2", "serde", "url", "winnow", - "zvariant_derive 5.5.3", + "zvariant_derive 5.6.0", "zvariant_utils 3.2.0", ] @@ -10814,9 +10819,9 @@ dependencies = [ [[package]] name = "zvariant_derive" -version = "5.5.3" +version = "5.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75fda702cd42d735ccd48117b1630432219c0e9616bf6cb0f8350844ee4d9580" +checksum = "3a8c68501be459a8dbfffbe5d792acdd23b4959940fc87785fb013b32edbc208" dependencies = [ "proc-macro-crate", "proc-macro2", diff --git a/chainstate/launcher/src/lib.rs b/chainstate/launcher/src/lib.rs index 929c2fcce2..8efc8adaf4 100644 --- a/chainstate/launcher/src/lib.rs +++ b/chainstate/launcher/src/lib.rs @@ -37,7 +37,7 @@ pub const SUBDIRECTORY_LMDB: &str = "chainstate-lmdb"; pub use storage_compatibility::check_storage_compatibility; -fn make_chainstate_and_storage_impl( +fn make_chainstate_and_storage_impl( storage_backend: B, chain_config: Arc, chainstate_config: ChainstateConfig, diff --git a/chainstate/storage/src/internal/expensive.rs b/chainstate/storage/src/internal/expensive.rs index ab1ada3aa6..31b0616b8f 100644 --- a/chainstate/storage/src/internal/expensive.rs +++ b/chainstate/storage/src/internal/expensive.rs @@ -21,7 +21,7 @@ use storage::MakeMapRef; use utils::log_error; use utxo::Utxo; -impl StoreTxRo<'_, B> { +impl StoreTxRo<'_, B> { /// Dump raw database contents #[log_error] pub fn dump_raw(&self) -> crate::Result> { diff --git a/chainstate/storage/src/internal/mod.rs b/chainstate/storage/src/internal/mod.rs index 6e0e3460f4..579758e6d9 100644 --- a/chainstate/storage/src/internal/mod.rs +++ b/chainstate/storage/src/internal/mod.rs @@ -41,9 +41,9 @@ mod version; pub use version::ChainstateStorageVersion; /// Store for blockchain data, parametrized over the backend B -pub struct Store(storage::Storage); +pub struct Store(storage::Storage); -impl Store { +impl Store { /// Create a new chainstate storage #[log_error] pub fn new(backend: B, chain_config: &ChainConfig) -> crate::Result { @@ -76,7 +76,7 @@ impl Store { } } -impl Store { +impl Store { /// Create a default storage (mostly for testing, may want to remove this later) #[log_error] pub fn new_empty() -> crate::Result { @@ -84,16 +84,16 @@ impl Store { } } -impl Clone for Store +impl Clone for Store where - B::Impl: Clone, + storage::Storage: Clone, { fn clone(&self) -> Self { Self(self.0.clone()) } } -impl<'tx, B: storage::Backend + 'tx> Transactional<'tx> for Store { +impl<'tx, B: storage::SharedBackend + 'tx> Transactional<'tx> for Store { type TransactionRo = StoreTxRo<'tx, B>; type TransactionRw = StoreTxRw<'tx, B>; @@ -107,13 +107,15 @@ impl<'tx, B: storage::Backend + 'tx> Transactional<'tx> for Store { &'st self, size: Option, ) -> crate::Result { - self.0.transaction_rw(size).map_err(crate::Error::from).map(StoreTxRw::new) + as storage::StorageSharedWrite<_, _>>::transaction_rw(&self.0, size) + .map_err(crate::Error::from) + .map(StoreTxRw::new) } } -impl BlockchainStorage for Store {} +impl BlockchainStorage for Store {} -impl PoSAccountingStorageRead for Store { +impl PoSAccountingStorageRead for Store { type Error = crate::Error; #[log_error] @@ -162,7 +164,7 @@ impl PoSAccountingStorageRead for Store { } } -impl PoSAccountingStorageRead for Store { +impl PoSAccountingStorageRead for Store { type Error = crate::Error; #[log_error] @@ -211,7 +213,7 @@ impl PoSAccountingStorageRead for Store PoSAccountingStorageWrite for Store { +impl PoSAccountingStorageWrite for Store { #[log_error] fn set_pool_balance(&mut self, pool_id: PoolId, amount: Amount) -> crate::Result<()> { let mut tx = self.transaction_rw(None)?; @@ -322,7 +324,7 @@ impl PoSAccountingStorageWrite for Store } } -impl PoSAccountingStorageWrite for Store { +impl PoSAccountingStorageWrite for Store { #[log_error] fn set_pool_balance(&mut self, pool_id: PoolId, amount: Amount) -> crate::Result<()> { let mut tx = self.transaction_rw(None)?; diff --git a/chainstate/storage/src/internal/store_tx/mod.rs b/chainstate/storage/src/internal/store_tx/mod.rs index 0b31156a52..265c927e39 100644 --- a/chainstate/storage/src/internal/store_tx/mod.rs +++ b/chainstate/storage/src/internal/store_tx/mod.rs @@ -57,17 +57,19 @@ mod well_known { } /// Read-only chainstate storage transaction -pub struct StoreTxRo<'st, B: storage::Backend>(pub(super) storage::TransactionRo<'st, B, Schema>); +pub struct StoreTxRo<'st, B: storage::SharedBackend>( + pub(super) storage::TransactionRo<'st, B, Schema>, +); /// Read-write chainstate storage transaction /// /// It tracks if an error was encountered during the execution of the transaction. If so, it will /// be recorded here and returned by all subsequent operations. -pub struct StoreTxRw<'st, B: storage::Backend> { +pub struct StoreTxRw<'st, B: storage::SharedBackend> { db_tx: crate::Result>, } -impl StoreTxRo<'_, B> { +impl StoreTxRo<'_, B> { // Read a value from the database and decode it fn read(&self, key: K) -> crate::Result> where @@ -101,7 +103,7 @@ impl StoreTxRo<'_, B> { } } -impl<'st, B: storage::Backend> StoreTxRw<'st, B> { +impl<'st, B: storage::SharedBackend> StoreTxRw<'st, B> { pub(super) fn new(db_tx: storage::TransactionRw<'st, B, Schema>) -> Self { let db_tx = Ok(db_tx); Self { db_tx } @@ -208,13 +210,13 @@ impl<'st, B: storage::Backend> StoreTxRw<'st, B> { } } -impl crate::TransactionRo for StoreTxRo<'_, B> { +impl crate::TransactionRo for StoreTxRo<'_, B> { fn close(self) { self.0.close() } } -impl crate::TransactionRw for StoreTxRw<'_, B> { +impl crate::TransactionRw for StoreTxRw<'_, B> { fn commit(self) -> crate::Result<()> { Ok(self.db_tx?.commit()?) } diff --git a/chainstate/storage/src/internal/store_tx/read_impls.rs b/chainstate/storage/src/internal/store_tx/read_impls.rs index 68ac551f19..32ce5ba7ab 100644 --- a/chainstate/storage/src/internal/store_tx/read_impls.rs +++ b/chainstate/storage/src/internal/store_tx/read_impls.rs @@ -86,7 +86,7 @@ mod private { } /// Blockchain data storage transaction -impl BlockchainStorageRead for super::StoreTxRo<'_, B> { +impl BlockchainStorageRead for super::StoreTxRo<'_, B> { #[log_error] fn get_storage_version(&self) -> crate::Result> { self.read_value::() @@ -246,14 +246,14 @@ impl BlockchainStorageRead for super::StoreTxRo<'_, B> { } } -impl EpochStorageRead for super::StoreTxRo<'_, B> { +impl EpochStorageRead for super::StoreTxRo<'_, B> { #[log_error] fn get_epoch_data(&self, epoch_index: u64) -> crate::Result> { self.read::(epoch_index) } } -impl UtxosStorageRead for super::StoreTxRo<'_, B> { +impl UtxosStorageRead for super::StoreTxRo<'_, B> { type Error = crate::Error; #[log_error] @@ -268,7 +268,9 @@ impl UtxosStorageRead for super::StoreTxRo<'_, B> { } } -impl PoSAccountingStorageRead for super::StoreTxRo<'_, B> { +impl PoSAccountingStorageRead + for super::StoreTxRo<'_, B> +{ type Error = crate::Error; #[log_error] @@ -314,7 +316,9 @@ impl PoSAccountingStorageRead for super::Sto } } -impl PoSAccountingStorageRead for super::StoreTxRo<'_, B> { +impl PoSAccountingStorageRead + for super::StoreTxRo<'_, B> +{ type Error = crate::Error; #[log_error] @@ -360,7 +364,7 @@ impl PoSAccountingStorageRead for super:: } } -impl TokensAccountingStorageRead for super::StoreTxRo<'_, B> { +impl TokensAccountingStorageRead for super::StoreTxRo<'_, B> { type Error = crate::Error; #[log_error] @@ -374,7 +378,7 @@ impl TokensAccountingStorageRead for super::StoreTxRo<'_, B } } -impl OrdersAccountingStorageRead for super::StoreTxRo<'_, B> { +impl OrdersAccountingStorageRead for super::StoreTxRo<'_, B> { type Error = crate::Error; #[log_error] @@ -394,7 +398,7 @@ impl OrdersAccountingStorageRead for super::StoreTxRo<'_, B } /// Blockchain data storage transaction -impl BlockchainStorageRead for super::StoreTxRw<'_, B> { +impl BlockchainStorageRead for super::StoreTxRw<'_, B> { #[log_error] fn get_storage_version(&self) -> crate::Result> { self.read_value::() @@ -559,14 +563,14 @@ impl BlockchainStorageRead for super::StoreTxRw<'_, B> { } } -impl EpochStorageRead for super::StoreTxRw<'_, B> { +impl EpochStorageRead for super::StoreTxRw<'_, B> { #[log_error] fn get_epoch_data(&self, epoch_index: u64) -> crate::Result> { self.read::(epoch_index) } } -impl UtxosStorageRead for super::StoreTxRw<'_, B> { +impl UtxosStorageRead for super::StoreTxRw<'_, B> { type Error = crate::Error; #[log_error] @@ -581,7 +585,9 @@ impl UtxosStorageRead for super::StoreTxRw<'_, B> { } } -impl PoSAccountingStorageRead for super::StoreTxRw<'_, B> { +impl PoSAccountingStorageRead + for super::StoreTxRw<'_, B> +{ type Error = crate::Error; #[log_error] @@ -627,7 +633,9 @@ impl PoSAccountingStorageRead for super::Sto } } -impl PoSAccountingStorageRead for super::StoreTxRw<'_, B> { +impl PoSAccountingStorageRead + for super::StoreTxRw<'_, B> +{ type Error = crate::Error; #[log_error] @@ -673,7 +681,7 @@ impl PoSAccountingStorageRead for super:: } } -impl TokensAccountingStorageRead for super::StoreTxRw<'_, B> { +impl TokensAccountingStorageRead for super::StoreTxRw<'_, B> { type Error = crate::Error; #[log_error] @@ -687,7 +695,7 @@ impl TokensAccountingStorageRead for super::StoreTxRw<'_, B } } -impl OrdersAccountingStorageRead for super::StoreTxRw<'_, B> { +impl OrdersAccountingStorageRead for super::StoreTxRw<'_, B> { type Error = crate::Error; #[log_error] diff --git a/chainstate/storage/src/internal/store_tx/write_impls.rs b/chainstate/storage/src/internal/store_tx/write_impls.rs index e84e49d5f2..7ebb3e18be 100644 --- a/chainstate/storage/src/internal/store_tx/write_impls.rs +++ b/chainstate/storage/src/internal/store_tx/write_impls.rs @@ -36,7 +36,7 @@ use utxo::{Utxo, UtxosBlockUndo, UtxosStorageWrite}; use super::db; -impl BlockchainStorageWrite for StoreTxRw<'_, B> { +impl BlockchainStorageWrite for StoreTxRw<'_, B> { #[log_error] fn set_storage_version(&mut self, version: ChainstateStorageVersion) -> crate::Result<()> { self.write_value::(&version) @@ -219,7 +219,7 @@ impl BlockchainStorageWrite for StoreTxRw<'_, B> { } } -impl EpochStorageWrite for StoreTxRw<'_, B> { +impl EpochStorageWrite for StoreTxRw<'_, B> { #[log_error] fn set_epoch_data(&mut self, epoch_index: u64, epoch_data: &EpochData) -> crate::Result<()> { self.write::(epoch_index, epoch_data) @@ -231,7 +231,7 @@ impl EpochStorageWrite for StoreTxRw<'_, B> { } } -impl UtxosStorageWrite for StoreTxRw<'_, B> { +impl UtxosStorageWrite for StoreTxRw<'_, B> { #[log_error] fn set_utxo(&mut self, outpoint: &UtxoOutPoint, entry: Utxo) -> crate::Result<()> { self.write::(outpoint, entry) @@ -248,7 +248,7 @@ impl UtxosStorageWrite for StoreTxRw<'_, B> { } } -impl PoSAccountingStorageWrite for StoreTxRw<'_, B> { +impl PoSAccountingStorageWrite for StoreTxRw<'_, B> { #[log_error] fn set_pool_balance(&mut self, pool_id: PoolId, amount: Amount) -> crate::Result<()> { self.write::(pool_id, amount) @@ -320,7 +320,7 @@ impl PoSAccountingStorageWrite for StoreTxRw } } -impl PoSAccountingStorageWrite for StoreTxRw<'_, B> { +impl PoSAccountingStorageWrite for StoreTxRw<'_, B> { #[log_error] fn set_pool_balance(&mut self, pool_id: PoolId, amount: Amount) -> crate::Result<()> { self.write::(pool_id, amount) @@ -392,7 +392,7 @@ impl PoSAccountingStorageWrite for StoreT } } -impl TokensAccountingStorageWrite for StoreTxRw<'_, B> { +impl TokensAccountingStorageWrite for StoreTxRw<'_, B> { #[log_error] fn set_token_data( &mut self, @@ -418,7 +418,7 @@ impl TokensAccountingStorageWrite for StoreTxRw<'_, B> { } } -impl OrdersAccountingStorageWrite for StoreTxRw<'_, B> { +impl OrdersAccountingStorageWrite for StoreTxRw<'_, B> { #[log_error] fn set_order_data(&mut self, id: &OrderId, data: &OrderData) -> crate::Result<()> { self.write::(id, data) diff --git a/chainstate/storage/src/is_transaction_seal.rs b/chainstate/storage/src/is_transaction_seal.rs index d6b256afbe..178072f6f3 100644 --- a/chainstate/storage/src/is_transaction_seal.rs +++ b/chainstate/storage/src/is_transaction_seal.rs @@ -16,8 +16,8 @@ /// Prevent more types from implementing [crate::TransactionRo] and [crate::TransactionRw] pub trait Seal {} -impl Seal for crate::internal::StoreTxRo<'_, B> {} -impl Seal for crate::internal::StoreTxRw<'_, B> {} +impl Seal for crate::internal::StoreTxRo<'_, B> {} +impl Seal for crate::internal::StoreTxRw<'_, B> {} #[cfg(any(test, feature = "mock"))] impl Seal for crate::mock::MockStoreTxRo {} diff --git a/dns-server/src/crawler_p2p/crawler_manager/storage.rs b/dns-server/src/crawler_p2p/crawler_manager/storage.rs index bee295b5ad..696917c3ab 100644 --- a/dns-server/src/crawler_p2p/crawler_manager/storage.rs +++ b/dns-server/src/crawler_p2p/crawler_manager/storage.rs @@ -81,7 +81,7 @@ where pub fn open_storage(backend: Backend) -> crate::Result> where - Backend: storage::Backend, + Backend: storage::SharedBackend, { let storage = DnsServerStorageImpl::new(backend)?; let version = storage.transaction_ro()?.get_version()?; diff --git a/dns-server/src/crawler_p2p/crawler_manager/storage_impl.rs b/dns-server/src/crawler_p2p/crawler_manager/storage_impl.rs index c22bbe530a..b7ecfc0bce 100644 --- a/dns-server/src/crawler_p2p/crawler_manager/storage_impl.rs +++ b/dns-server/src/crawler_p2p/crawler_manager/storage_impl.rs @@ -52,9 +52,9 @@ type DnsServerStoreTxRw<'st, B> = StorageTxRw<'st, B, Schema>; pub type DnsServerStorageImpl = StorageImpl; -impl DnsServerStorage for DnsServerStorageImpl {} +impl DnsServerStorage for DnsServerStorageImpl {} -impl DnsServerStorageWrite for DnsServerStoreTxRw<'_, B> { +impl DnsServerStorageWrite for DnsServerStoreTxRw<'_, B> { fn set_version(&mut self, version: StorageVersion) -> crate::Result<()> { Ok(self.storage().get_mut::().put(VALUE_ID_VERSION, version.encode())?) } @@ -79,7 +79,7 @@ impl DnsServerStorageWrite for DnsServerStoreTxRw<'_, B> { } } -impl DnsServerStorageRead for DnsServerStoreTxRo<'_, B> { +impl DnsServerStorageRead for DnsServerStoreTxRo<'_, B> { fn get_version(&self) -> crate::Result> { let map = self.storage().get::(); let vec_opt = map.get(VALUE_ID_VERSION)?.as_ref().map(Encoded::decode); diff --git a/do_checks.sh b/do_checks.sh index d268400e75..1888338193 100755 --- a/do_checks.sh +++ b/do_checks.sh @@ -13,13 +13,32 @@ cargo fmt --check -- --config newline_style=Unix # Note: "--allow duplicate" silences the warning "found x duplicate entries for crate y". cargo deny check --allow duplicate --hide-inclusion-graph +CLIPPY_VERSION_RESPONSE=$(cargo clippy --version) +# Note: clippy version starts from 0, e.g. '0.1.90' +if [[ "$CLIPPY_VERSION_RESPONSE" =~ clippy[[:space:]]+0\.([0-9]+)\.([0-9]+) ]]; then + CLIPPY_VERSION_MAJOR="${BASH_REMATCH[1]}" + CLIPPY_VERSION_MINOR="${BASH_REMATCH[2]}" + # Note: for 1.90 CLIPPY_VERSION will be 1090 + CLIPPY_VERSION=$(($CLIPPY_VERSION_MAJOR * 1000 + $CLIPPY_VERSION_MINOR)) +else + echo "Unable to determine the version of Clippy" + exit 1 +fi + # Checks enabled everywhere, including tests, benchmarks. -# Note about "uninlined_format_args": this is about changing `format!("{}", x)` to `format!("{x}")`. +# Note: +# 1) "uninlined_format_args" is about changing `format!("{}", x)` to `format!("{x}")`. # Most of the time this makes the code look better, but: # * there are way too many places like this; # * in some cases it may lead to uglier code; in particular, when the format string is already # quite long. # So we disable it for now. +# 2) "manual_is_multiple_of" - starting from v1.90 clippy insists that `x % 2 == 0` should be +# replaced with `x.is_multiple_of(2)`, which is a questionable improvement. +EXTRA_ARGS=() +if [[ $CLIPPY_VERSION -ge 1090 ]]; then + EXTRA_ARGS+=(-A clippy::manual_is_multiple_of) +fi cargo clippy --all-features --workspace --all-targets -- \ -D warnings \ -A clippy::unnecessary_literal_unwrap \ @@ -32,7 +51,8 @@ cargo clippy --all-features --workspace --all-targets -- \ -D clippy::manual_assert \ -D clippy::unused_async \ -D clippy::mut_mut \ - -D clippy::todo + -D clippy::todo \ + "${EXTRA_ARGS[@]}" # Checks that only apply to production code cargo clippy --all-features --workspace --lib --bins --examples -- \ diff --git a/p2p/src/peer_manager/peerdb/storage_impl.rs b/p2p/src/peer_manager/peerdb/storage_impl.rs index 40b867acfd..4764e70231 100644 --- a/p2p/src/peer_manager/peerdb/storage_impl.rs +++ b/p2p/src/peer_manager/peerdb/storage_impl.rs @@ -65,9 +65,9 @@ type PeerDbStoreTxRw<'st, B> = StorageTxRw<'st, B, Schema>; pub type PeerDbStorageImpl = StorageImpl; -impl PeerDbStorage for PeerDbStorageImpl {} +impl PeerDbStorage for PeerDbStorageImpl {} -impl PeerDbStorageWrite for PeerDbStoreTxRw<'_, B> { +impl PeerDbStorageWrite for PeerDbStoreTxRw<'_, B> { fn set_version(&mut self, version: StorageVersion) -> crate::Result<()> { Ok(self.storage().get_mut::().put(VALUE_ID_VERSION, version.encode())?) } @@ -126,7 +126,7 @@ impl PeerDbStorageWrite for PeerDbStoreTxRw<'_, B> { } } -impl PeerDbStorageRead for PeerDbStoreTxRo<'_, B> { +impl PeerDbStorageRead for PeerDbStoreTxRo<'_, B> { fn get_version(&self) -> crate::Result> { let map = self.storage().get::(); let vec_opt = map.get(VALUE_ID_VERSION)?.as_ref().map(Encoded::decode); diff --git a/p2p/src/peer_manager/peerdb/storage_load.rs b/p2p/src/peer_manager/peerdb/storage_load.rs index ec1eb16ba5..fdf888ed52 100644 --- a/p2p/src/peer_manager/peerdb/storage_load.rs +++ b/p2p/src/peer_manager/peerdb/storage_load.rs @@ -107,7 +107,7 @@ impl LoadedStorage { pub fn open_storage(backend: Backend) -> crate::Result> where - Backend: storage::Backend, + Backend: storage::SharedBackend, { let storage = PeerDbStorageImpl::new(backend)?; let version = storage.transaction_ro()?.get_version()?; diff --git a/p2p/src/peer_manager/peerdb_common/storage_impl.rs b/p2p/src/peer_manager/peerdb_common/storage_impl.rs index 3eac271e42..1f4b18c461 100644 --- a/p2p/src/peer_manager/peerdb_common/storage_impl.rs +++ b/p2p/src/peer_manager/peerdb_common/storage_impl.rs @@ -17,16 +17,16 @@ use storage::schema::Schema; use super::{TransactionRo, TransactionRw, Transactional}; -pub struct StorageImpl(storage::Storage); +pub struct StorageImpl(storage::Storage); -impl StorageImpl { +impl StorageImpl { pub fn new(storage: B) -> crate::Result { let store = storage::Storage::<_, Sch>::new(storage)?; Ok(Self(store)) } } -impl<'tx, B: storage::Backend + 'tx, Sch: Schema> Transactional<'tx> for StorageImpl { +impl<'tx, B: storage::SharedBackend + 'tx, Sch: Schema> Transactional<'tx> for StorageImpl { type TransactionRo = StorageTxRo<'tx, B, Sch>; type TransactionRw = StorageTxRw<'tx, B, Sch>; @@ -35,33 +35,38 @@ impl<'tx, B: storage::Backend + 'tx, Sch: Schema> Transactional<'tx> for Storage } fn transaction_rw<'st: 'tx>(&'st self) -> Result { - self.0.transaction_rw(None).map(StorageTxRw) + as storage::StorageSharedWrite<_, _>>::transaction_rw(&self.0, None) + .map(StorageTxRw) } } -pub struct StorageTxRo<'st, B: storage::Backend, Sch: Schema>(storage::TransactionRo<'st, B, Sch>); +pub struct StorageTxRo<'st, B: storage::SharedBackend, Sch: Schema>( + storage::TransactionRo<'st, B, Sch>, +); -impl<'st, B: storage::Backend, Sch: Schema> StorageTxRo<'st, B, Sch> { +impl<'st, B: storage::SharedBackend, Sch: Schema> StorageTxRo<'st, B, Sch> { pub fn storage(&self) -> &storage::TransactionRo<'st, B, Sch> { &self.0 } } -impl TransactionRo for StorageTxRo<'_, B, Sch> { +impl TransactionRo for StorageTxRo<'_, B, Sch> { fn close(self) { self.0.close() } } -pub struct StorageTxRw<'st, B: storage::Backend, Sch: Schema>(storage::TransactionRw<'st, B, Sch>); +pub struct StorageTxRw<'st, B: storage::SharedBackend, Sch: Schema>( + storage::TransactionRw<'st, B, Sch>, +); -impl<'st, B: storage::Backend, Sch: Schema> StorageTxRw<'st, B, Sch> { +impl<'st, B: storage::SharedBackend, Sch: Schema> StorageTxRw<'st, B, Sch> { pub fn storage(&mut self) -> &mut storage::TransactionRw<'st, B, Sch> { &mut self.0 } } -impl TransactionRw for StorageTxRw<'_, B, Sch> { +impl TransactionRw for StorageTxRw<'_, B, Sch> { fn abort(self) { self.0.abort() } diff --git a/storage/backend-test-suite/src/basic.rs b/storage/backend-test-suite/src/basic.rs index 622781d27d..b9ba33678e 100644 --- a/storage/backend-test-suite/src/basic.rs +++ b/storage/backend-test-suite/src/basic.rs @@ -17,15 +17,15 @@ use crate::prelude::*; -fn put_and_commit>(backend_fn: Arc) { - let store = backend_fn().open(desc(1)).expect("db open to succeed"); +fn put_and_commit>(backend_factory: Arc) { + let mut store = backend_factory.create().open(desc(1)).expect("db open to succeed"); - // Create a transaction, modify storage and abort transaction + // Create a transaction, modify the storage and commit let mut dbtx = store.transaction_rw(None).unwrap(); dbtx.put(MAPID.0, b"hello".to_vec(), b"world".to_vec()).unwrap(); dbtx.commit().expect("commit to succeed"); - // Check the modification did not happen + // Check the modification did happen let dbtx = store.transaction_ro().unwrap(); assert_eq!( dbtx.get(MAPID.0, b"hello").unwrap().as_ref().map(|v| v.as_ref()).unwrap(), @@ -34,8 +34,8 @@ fn put_and_commit>(backend_fn: Arc) { drop(dbtx); } -fn put_and_abort>(backend_fn: Arc) { - let store = backend_fn().open(desc(1)).expect("db open to succeed"); +fn put_and_abort>(backend_factory: Arc) { + let mut store = backend_factory.create().open(desc(1)).expect("db open to succeed"); // Create a transaction, modify storage and abort transaction let mut dbtx = store.transaction_rw(None).unwrap(); @@ -48,8 +48,8 @@ fn put_and_abort>(backend_fn: Arc) { drop(dbtx); } -fn put_two_under_different_keys>(backend_fn: Arc) { - let store = backend_fn().open(desc(1)).expect("db open to succeed"); +fn put_two_under_different_keys>(backend_factory: Arc) { + let mut store = backend_factory.create().open(desc(1)).expect("db open to succeed"); // Create a transaction, modify storage and commit let mut dbtx = store.transaction_rw(None).unwrap(); @@ -88,8 +88,8 @@ fn put_two_under_different_keys>(backend_fn: Arc) drop(dbtx); } -fn put_twice_then_commit_read_last>(backend_fn: Arc) { - let store = backend_fn().open(desc(1)).expect("db open to succeed"); +fn put_twice_then_commit_read_last>(backend_factory: Arc) { + let mut store = backend_factory.create().open(desc(1)).expect("db open to succeed"); let mut dbtx = store.transaction_rw(None).unwrap(); dbtx.put(MAPID.0, b"hello".to_vec(), b"a".to_vec()).unwrap(); @@ -111,8 +111,8 @@ fn put_twice_then_commit_read_last>(backend_fn: Arc< ); } -fn put_iterator_count_matches>(backend_fn: Arc) { - let store = backend_fn().open(desc(1)).expect("db open to succeed"); +fn put_iterator_count_matches>(backend_factory: Arc) { + let mut store = backend_factory.create().open(desc(1)).expect("db open to succeed"); let mut dbtx = store.transaction_rw(None).unwrap(); dbtx.put(MAPID.0, vec![0x00], vec![]).unwrap(); @@ -126,8 +126,8 @@ fn put_iterator_count_matches>(backend_fn: Arc) { assert_eq!(dbtx.greater_equal_iter(MAPID.0, vec![]).unwrap().count(), 4); } -fn put_and_iterate>(backend_fn: Arc) { - let store = backend_fn().open(desc(1)).expect("db open to succeed"); +fn put_and_iterate>(backend_factory: Arc) { + let mut store = backend_factory.create().open(desc(1)).expect("db open to succeed"); // Populate the database with some values let mut dbtx = store.transaction_rw(None).unwrap(); @@ -202,8 +202,8 @@ fn check_greater_equal_iter(dbtx: &Tx, key: Data, expected: &[(&str assert!(entries.eq(expected)); } -fn put_and_iterate_delete_some>(backend_fn: Arc) { - let store = backend_fn().open(desc(1)).expect("db open to succeed"); +fn put_and_iterate_delete_some>(backend_factory: Arc) { + let mut store = backend_factory.create().open(desc(1)).expect("db open to succeed"); let expected_full_0 = [("aa", "0"), ("ab", "1"), ("ac", "2"), ("aca", "3"), ("acb", "4"), ("b", "5")]; @@ -292,7 +292,7 @@ fn put_and_iterate_delete_some>(backend_fn: Arc) drop(dbtx); } -tests![ +common_tests![ put_and_abort, put_and_commit, put_and_iterate_delete_some, diff --git a/storage/backend-test-suite/src/concurrent.rs b/storage/backend-test-suite/src/concurrent.rs index efc8196c98..47a0580d5e 100644 --- a/storage/backend-test-suite/src/concurrent.rs +++ b/storage/backend-test-suite/src/concurrent.rs @@ -18,7 +18,7 @@ use crate::prelude::*; const TEST_KEY: &[u8] = b"foo"; fn setup(backend: B, init: Vec) -> B::Impl { - let store = backend.open(desc(1)).expect("db open to succeed"); + let mut store = backend.open(desc(1)).expect("db open to succeed"); let mut dbtx = store.transaction_rw(None).unwrap(); dbtx.put(MAPID.0, TEST_KEY.to_vec(), init).unwrap(); @@ -27,8 +27,8 @@ fn setup(backend: B, init: Vec) -> B::Impl { store } -fn read_initialize_race>(backend_fn: Arc) { - let store = backend_fn().open(desc(1)).expect("db open to succeed"); +fn read_initialize_race>(backend_factory: Arc) { + let store = backend_factory.create().open(desc(1)).expect("db open to succeed"); let thr0 = thread::spawn({ let store = store.clone(); @@ -47,8 +47,8 @@ fn read_initialize_race>(backend_fn: Arc) { thr0.join().unwrap(); } -fn read_write_race>(backend_fn: Arc) { - let store = setup(backend_fn(), vec![0]); +fn read_write_race>(backend_factory: Arc) { + let store = setup(backend_factory.create(), vec![0]); let thr0 = thread::spawn({ let store = store.clone(); @@ -68,8 +68,8 @@ fn read_write_race>(backend_fn: Arc) { thr0.join().unwrap(); } -fn commutative_read_modify_write>(backend_fn: Arc) { - let store = setup(backend_fn(), vec![0]); +fn commutative_read_modify_write>(backend_factory: Arc) { + let store = setup(backend_factory.create(), vec![0]); let thr0 = thread::spawn({ let store = store.clone(); @@ -102,42 +102,89 @@ fn commutative_read_modify_write>(backend_fn: Arc ); } -fn threaded_reads_consistent>(backend_fn: Arc) { +// Test parallel reading through a normal Backend. A reference to BackendImpl is shared between threads. +// Note that it's disabled for loom, where thread::scope is not available. +#[cfg(not(loom))] +fn threaded_reads_consistent_for_ordinary_backend>( + backend_factory: Arc, +) { let val = [0x77, 0x88, 0x99].as_ref(); - let store = setup(backend_fn(), val.to_vec()); + let store = setup(backend_factory.create(), val.to_vec()); + + thread::scope(|s| { + let thr0 = s.spawn({ + || { + for _ in 0..100 { + let tx = store.transaction_ro().unwrap(); + let obtained_val = tx.get(MAPID.0, TEST_KEY).unwrap().unwrap(); + assert_eq!(obtained_val, val); + } + } + }); + let thr1 = s.spawn({ + || { + for _ in 0..100 { + let tx = store.transaction_ro().unwrap(); + let obtained_val = tx.get(MAPID.0, TEST_KEY).unwrap().unwrap(); + assert_eq!(obtained_val, val); + } + } + }); + + thr0.join().unwrap(); + thr1.join().unwrap(); + }); +} + +// A stub for loom +#[cfg(loom)] +fn threaded_reads_consistent_for_ordinary_backend>( + _backend_factory: Arc, +) { +} + +// Test parallel reading through a SharedBackend. A copy of SharedBackendImpl is shared between threads. +fn threaded_reads_consistent_for_shared_backend>( + backend_factory: Arc, +) { + let val = [0x77, 0x88, 0x99].as_ref(); + let store = setup(backend_factory.create(), val.to_vec()); + + #[cfg(not(loom))] + let iter_count = 100; + // Note: under loom, with only 10 iterations the test takes more than 3 minutes to complete. + // With 5 iterations, the time is under 2 seconds. + #[cfg(loom)] + let iter_count = 5; let thr0 = thread::spawn({ let store = store.clone(); move || { - store - .transaction_ro() - .unwrap() - .get(MAPID.0, TEST_KEY) - .unwrap() - .unwrap() - .as_ref() - .to_owned() + for _ in 0..iter_count { + let tx = store.transaction_ro().unwrap(); + let obtained_val = tx.get(MAPID.0, TEST_KEY).unwrap().unwrap(); + assert_eq!(obtained_val, val); + } } }); let thr1 = thread::spawn({ move || { - store - .transaction_ro() - .unwrap() - .get(MAPID.0, TEST_KEY) - .unwrap() - .unwrap() - .as_ref() - .to_owned() + for _ in 0..iter_count { + let tx = store.transaction_ro().unwrap(); + let obtained_val = tx.get(MAPID.0, TEST_KEY).unwrap().unwrap(); + assert_eq!(obtained_val, val); + } } }); - assert_eq!(thr0.join().unwrap(), val); - assert_eq!(thr1.join().unwrap(), val); + thr0.join().unwrap(); + thr1.join().unwrap(); } -fn write_different_keys_and_iterate>(backend_fn: Arc) { - let store = backend_fn().open(desc(1)).expect("db open to succeed"); +fn write_different_keys_and_iterate>( + backend_factory: Arc, +) { + let store = backend_factory.create().open(desc(1)).expect("db open to succeed"); let thr0 = thread::spawn({ let store = store.clone(); @@ -165,10 +212,12 @@ fn write_different_keys_and_iterate>(backend_fn: Arc assert!(contents.eq(expected)); } -tests![ +shared_backend_tests![ commutative_read_modify_write, read_initialize_race, read_write_race, - threaded_reads_consistent, + threaded_reads_consistent_for_shared_backend, write_different_keys_and_iterate, ]; + +common_tests![threaded_reads_consistent_for_ordinary_backend]; diff --git a/storage/backend-test-suite/src/frontend.rs b/storage/backend-test-suite/src/frontend.rs index 7ea14f796f..d14761edb2 100644 --- a/storage/backend-test-suite/src/frontend.rs +++ b/storage/backend-test-suite/src/frontend.rs @@ -45,10 +45,10 @@ mod iter_sort_preserving_numbers { } } - pub fn test1>(backend_fn: Arc) { + pub fn test1>(backend_factory: Arc) { use test_schema1::{Schema, TestMap}; - let storage = Storage::<_, Schema>::new(backend_fn()).unwrap(); + let mut storage = Storage::<_, Schema>::new(backend_factory.create()).unwrap(); with_rng_seed(move |seed| { let mut rng = make_seedable_rng(seed); @@ -111,10 +111,10 @@ mod iter_sort_preserving_numbers { } } - pub fn test2>(backend_fn: Arc) { + pub fn test2>(backend_factory: Arc) { use test_schema2::{Schema, TestMap}; - let storage = Storage::<_, Schema>::new(backend_fn()).unwrap(); + let mut storage = Storage::<_, Schema>::new(backend_factory.create()).unwrap(); with_rng_seed(move |seed| { let mut rng = make_seedable_rng(seed); @@ -164,4 +164,4 @@ mod iter_sort_preserving_numbers { } } -tests![iter_sort_preserving_numbers::test1, iter_sort_preserving_numbers::test2]; +common_tests![iter_sort_preserving_numbers::test1, iter_sort_preserving_numbers::test2]; diff --git a/storage/backend-test-suite/src/lib.rs b/storage/backend-test-suite/src/lib.rs index 6200b9a679..4bcb87a6fe 100644 --- a/storage/backend-test-suite/src/lib.rs +++ b/storage/backend-test-suite/src/lib.rs @@ -32,7 +32,13 @@ mod property; #[cfg(loom)] mod property { // No property tests with loom for now - pub fn tests(_backend_fn: F) -> impl Iterator { + pub fn common_tests(_backend_factory: F) -> impl Iterator { + std::iter::empty() + } + + pub fn common_tests_for_shared_backend( + _backend_factory: F, + ) -> impl Iterator { std::iter::empty() } } @@ -40,23 +46,76 @@ mod property { use prelude::*; use test_utils::random::Seed; -/// Get all tests -fn tests>(backend_fn: F) -> Vec { - let backend_fn = Arc::new(backend_fn); +/// Get all general tests for a Backend +fn common_tests>( + backend_factory: Arc, +) -> Vec { std::iter::empty() - .chain(basic::tests(Arc::clone(&backend_fn))) - .chain(concurrent::tests(Arc::clone(&backend_fn))) - .chain(frontend::tests(Arc::clone(&backend_fn))) - .chain(property::tests(backend_fn)) + .chain(basic::common_tests(Arc::clone(&backend_factory))) + .chain(concurrent::common_tests(Arc::clone(&backend_factory))) + .chain(frontend::common_tests(Arc::clone(&backend_factory))) + .chain(property::common_tests(backend_factory)) .collect() } -/// Main test suite entry point +/// Get all general tests for a SharedBackend +fn common_tests_for_shared_backend>( + backend_factory: Arc, +) -> Vec { + std::iter::empty() + .chain(basic::common_tests_for_shared_backend(Arc::clone( + &backend_factory, + ))) + .chain(concurrent::common_tests_for_shared_backend(Arc::clone( + &backend_factory, + ))) + .chain(frontend::common_tests_for_shared_backend(Arc::clone( + &backend_factory, + ))) + .chain(property::common_tests_for_shared_backend(backend_factory)) + .collect() +} + +/// Get all tests specific for shared backends +fn shared_backend_tests>( + backend_factory: F, +) -> Vec { + let backend_factory = Arc::new(backend_factory); + std::iter::empty() + .chain(concurrent::tests(Arc::clone(&backend_factory))) + .collect() +} + +/// Main test suite entry point. +/// +/// Both `backend_factory` and `shared_backend_factory` are supposed to create the same type +/// of backend, but the latter will only be used for backends that implement SharedBackend. #[must_use = "Test outcome ignored, add a call to .exit()"] -pub fn main>(backend_fn: F) -> libtest_mimic::Conclusion { +pub fn main( + backend_factory: F, + shared_backend_factory: Option, +) -> libtest_mimic::Conclusion +where + B: Backend + 'static, + F: BackendFactory, + SB: SharedBackend + 'static, + SF: BackendFactory, +{ logging::init_logging(); let args = libtest_mimic::Arguments::from_args(); - libtest_mimic::run(&args, tests(backend_fn)) + let backend_factory = Arc::new(backend_factory); + let mut tests = common_tests(backend_factory); + + if let Some(shared_backend_factory) = shared_backend_factory { + let shared_backend_factory = Arc::new(shared_backend_factory); + + tests.extend(common_tests_for_shared_backend(Arc::clone( + &shared_backend_factory, + ))); + tests.extend(shared_backend_tests(shared_backend_factory)); + } + + libtest_mimic::run(&args, tests) } /// Generate a seed and pass it to the specified function. If the function panics, print diff --git a/storage/backend-test-suite/src/prelude.rs b/storage/backend-test-suite/src/prelude.rs index 3c208427db..61b8a974ea 100644 --- a/storage/backend-test-suite/src/prelude.rs +++ b/storage/backend-test-suite/src/prelude.rs @@ -13,19 +13,37 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::{marker::PhantomData, ops::Deref as _}; + +use utils::shallow_clone::ShallowClone; + // Re-export a bunch of often used items pub use crate::model::{ApplyActions, Model, WriteAction}; pub use storage_core::{ - backend::{Backend, BackendImpl, Data, ReadOps, TxRo, TxRw, WriteOps}, + backend::{ + Backend, BackendImpl, Data, ReadOps, SharedBackend, SharedBackendImpl, TxRo, TxRw, WriteOps, + }, DbDesc, DbMapCount, DbMapDesc, DbMapId, DbMapsData, }; pub use utils::{sync, thread}; pub use std::{mem::drop, sync::Arc}; -/// A function to construct a backend -pub trait BackendFn: Fn() -> B + Send + Sync + 'static {} -impl B + Send + Sync + 'static> BackendFn for F {} +pub trait BackendFactory: Send + Sync + 'static { + fn create(&self) -> B; +} + +impl B + Send + Sync + 'static> BackendFactory for F { + fn create(&self) -> B { + self() + } +} + +impl> BackendFactory for Arc { + fn create(&self) -> B { + self.deref().create() + } +} /// A couple of DB map ID constants pub const MAPID: (DbMapId, DbMapId) = (DbMapId::new(0), DbMapId::new(1)); @@ -36,9 +54,9 @@ pub fn desc(n: usize) -> DbDesc { } /// Run tests with backend using proptest -pub fn using_proptest, S: proptest::prelude::Strategy>( +pub fn using_proptest, S: proptest::prelude::Strategy>( source_file: &'static str, - backend_fn: impl std::ops::Deref, + backend_factory: impl std::ops::Deref, strategy: S, test: impl Fn(B, S::Value), ) { @@ -50,39 +68,179 @@ pub fn using_proptest, S: proptest::prelude::Strateg }; let mut runner = proptest::test_runner::TestRunner::new(config); let result = runner.run(&strategy, |val| { - test(backend_fn(), val); + test(backend_factory.create(), val); Ok(()) }); result.unwrap_or_else(|e| panic!("{}{}", &e, &runner)) } +/// This is only needed so that we can pass None as the second parameter for +/// `storage_backend_test_suite::main` when the backend is not a shared one. +pub struct BogusSharedBackend(PhantomData B>); + +impl Backend for BogusSharedBackend { + type Impl = BogusSharedBackendImpl<::Impl>; + + fn open(self, _desc: DbDesc) -> storage_core::Result { + unimplemented!(); + } +} + +impl SharedBackend for BogusSharedBackend { + type ImplHelper = BogusSharedBackendImpl<::Impl>; +} + +pub struct BogusSharedBackendImpl(PhantomData B>); + +impl BackendImpl for BogusSharedBackendImpl { + type TxRo<'a> = ::TxRo<'a>; + type TxRw<'a> = ::TxRw<'a>; + + fn transaction_ro(&self) -> storage_core::Result> { + unimplemented!(); + } + + fn transaction_rw(&mut self, _size: Option) -> storage_core::Result> { + unimplemented!(); + } +} + +impl SharedBackendImpl for BogusSharedBackendImpl { + fn transaction_rw(&self, _size: Option) -> storage_core::Result> { + unimplemented!(); + } +} + +impl Clone for BogusSharedBackendImpl { + fn clone(&self) -> Self { + Self(PhantomData) + } +} + +impl ShallowClone for BogusSharedBackendImpl { + fn shallow_clone(&self) -> Self { + Self(PhantomData) + } +} + /// Test helper function not exported with the prelude pub mod support { - use super::*; + use std::marker::PhantomData; + use libtest_mimic::Trial; + use super::*; + /// Create the test list - pub fn create_tests>( - backend_fn: Arc, + pub fn create_tests>( + backend_factory: Arc, tests: impl IntoIterator))>, ) -> impl Iterator { tests.into_iter().map(move |(name, test)| { - let backend_fn = Arc::clone(&backend_fn); + let backend_factory = Arc::clone(&backend_factory); + let test_fn = move || { + utils::concurrency::model(move || test(backend_factory.clone())); + Ok(()) + }; + Trial::test(name, test_fn) + }) + } + + pub fn create_common_tests_for_shared_backend< + B: SharedBackend + 'static, + F: BackendFactory, + >( + backend_factory: Arc, + tests: impl IntoIterator< + Item = ( + &'static str, + fn(Arc>>), + ), + >, + ) -> impl Iterator { + let backend_factory = Arc::new(SharedBackendWrapperFactory(backend_factory, PhantomData)); + + tests.into_iter().map(move |(name, test)| { + let backend_factory = Arc::clone(&backend_factory); let test_fn = move || { - utils::concurrency::model(move || test(backend_fn.clone())); + utils::concurrency::model(move || test(backend_factory.clone())); Ok(()) }; Trial::test(name, test_fn) }) } + + // A wrapper for a SharedBackend that implements Backend whose impl's transaction_rw invokes + // SharedBackendImpl::transaction_rw (i.e. via a stared reference to self). + // This is used to check that, if invoked for a SharedBackend, generic tests work both when + // the tx is created via BackendImpl::transaction_rw and via SharedBackendImpl::transaction_rw. + pub struct SharedBackendWrapper(B); + + impl Backend for SharedBackendWrapper { + type Impl = SharedBackendImplWrapper<::Impl>; + + fn open(self, desc: DbDesc) -> storage_core::Result { + self.0.open(desc).map(SharedBackendImplWrapper) + } + } + + pub struct SharedBackendImplWrapper(B); + + impl BackendImpl for SharedBackendImplWrapper { + type TxRo<'a> = ::TxRo<'a>; + type TxRw<'a> = ::TxRw<'a>; + + fn transaction_ro(&self) -> storage_core::Result> { + self.0.transaction_ro() + } + + fn transaction_rw(&mut self, size: Option) -> storage_core::Result> { + ::transaction_rw(&self.0, size) + } + } + + // This wraps `BackendFactory `and implements `BackendFactory>`. + pub struct SharedBackendWrapperFactory>( + F, + PhantomData B>, + ); + + impl> BackendFactory> + for SharedBackendWrapperFactory + { + fn create(&self) -> SharedBackendWrapper { + SharedBackendWrapper(self.0.create()) + } + } +} + +macro_rules! common_tests { + ($($name:path),* $(,)?) => { + pub fn common_tests>( + backend_factory: Arc, + ) -> impl std::iter::Iterator { + $crate::prelude::support::create_tests(backend_factory, [ + $((concat!(module_path!(), "::", stringify!($name)), $name as fn(Arc)),)* + ]) + } + + pub fn common_tests_for_shared_backend>( + backend_factory: Arc, + ) -> impl std::iter::Iterator { + $crate::prelude::support::create_common_tests_for_shared_backend(backend_factory, [ + $((concat!(module_path!(), "::", stringify!($name), "_as_shared_backend"), + $name as fn(Arc<$crate::prelude::support::SharedBackendWrapperFactory>>)),)* + ]) + } + } } -macro_rules! tests { +macro_rules! shared_backend_tests { ($($name:path),* $(,)?) => { - pub fn tests>( - backend_fn: Arc, + pub fn tests>( + backend_factory: Arc, ) -> impl std::iter::Iterator { - $crate::prelude::support::create_tests(backend_fn, [ + $crate::prelude::support::create_tests(backend_factory, [ $((concat!(module_path!(), "::", stringify!($name)), $name as fn(Arc)),)* ]) } diff --git a/storage/backend-test-suite/src/property.rs b/storage/backend-test-suite/src/property.rs index 148fb1ef41..5ece69629a 100644 --- a/storage/backend-test-suite/src/property.rs +++ b/storage/backend-test-suite/src/property.rs @@ -61,14 +61,14 @@ mod gen { } } -fn overwrite_and_abort>(backend_fn: Arc) { +fn overwrite_and_abort>(backend_factory: Arc) { using_proptest( file!(), - backend_fn, + backend_factory, (gen::key(100), gen::any::(), gen::any::()) .prop_filter("not equal", |(_, a, b)| a != b), |backend, (key, val0, val1)| { - let store = backend.open(desc(1)).expect("db open to succeed"); + let mut store = backend.open(desc(1)).expect("db open to succeed"); // Check the store returns None for given key initially let dbtx = store.transaction_ro().unwrap(); @@ -117,14 +117,14 @@ fn overwrite_and_abort>(backend_fn: Arc) { ) } -fn add_and_delete>(backend_fn: Arc) { +fn add_and_delete>(backend_factory: Arc) { const NUM_DBS: usize = 5; using_proptest( file!(), - backend_fn, + backend_factory, gen::entries(NUM_DBS, 0usize..20), |backend, entries| { - let store = backend.open(desc(NUM_DBS)).expect("db open to succeed"); + let mut store = backend.open(desc(NUM_DBS)).expect("db open to succeed"); // Add all entries to the database let mut dbtx = store.transaction_rw(None).unwrap(); @@ -160,16 +160,16 @@ fn add_and_delete>(backend_fn: Arc) { ) } -fn last_write_wins>(backend_fn: Arc) { +fn last_write_wins>(backend_factory: Arc) { using_proptest( file!(), - backend_fn, + backend_factory, ( gen::key(1000), gen::prop::collection::vec(gen::any::(), 0..100), ), |backend, (key, vals)| { - let store = backend.open(desc(1)).expect("db open to succeed"); + let mut store = backend.open(desc(1)).expect("db open to succeed"); let last = vals.last().cloned(); // Add all entries to the database @@ -188,18 +188,18 @@ fn last_write_wins>(backend_fn: Arc) { ) } -fn add_and_delete_some>(backend_fn: Arc) { +fn add_and_delete_some>(backend_factory: Arc) { const NUM_DBS: usize = 5; using_proptest( file!(), - backend_fn, + backend_factory, ( gen::entries(NUM_DBS, 0usize..20), gen::entries(NUM_DBS, 0usize..20), proptest::collection::vec((gen::map_id(NUM_DBS), gen::big_key()), 0usize..10), ), |backend, (entries1, entries2, extra_keys)| { - let store = backend.open(desc(NUM_DBS)).expect("db open to succeed"); + let mut store = backend.open(desc(NUM_DBS)).expect("db open to succeed"); // Add all entries to the database let mut dbtx = store.transaction_rw(None).unwrap(); @@ -244,10 +244,10 @@ fn add_and_delete_some>(backend_fn: Arc) { ) } -fn add_modify_abort_modify_commit>(backend_fn: Arc) { +fn add_modify_abort_modify_commit>(backend_factory: Arc) { using_proptest( file!(), - backend_fn, + backend_factory, ( gen::actions(100, 0..20), gen::actions(100, 0..20), @@ -255,7 +255,7 @@ fn add_modify_abort_modify_commit>(backend_fn: Arc>(backend_fn: Arc>(backend_fn: Arc) { +fn add_modify_abort_replay_commit>(backend_factory: Arc) { using_proptest( file!(), - backend_fn, + backend_factory, (gen::actions(100, 0..20), gen::actions(100, 0..20)), |backend, (initial, actions)| { - let store = backend.open(desc(1)).expect("db open to succeed"); + let mut store = backend.open(desc(1)).expect("db open to succeed"); // Pre-populate the db with initial data, check the contents against the model let mut dbtx = store.transaction_rw(None).unwrap(); @@ -320,13 +320,13 @@ fn add_modify_abort_replay_commit>(backend_fn: Arc>(backend_fn: Arc) { +fn db_writes_do_not_interfere>(backend_factory: Arc) { using_proptest( file!(), - backend_fn, + backend_factory, (gen::actions(100, 0..20), gen::actions(100, 0..20)), |backend, (actions0, actions1)| { - let store = backend.open(desc(2)).expect("db open to succeed"); + let mut store = backend.open(desc(2)).expect("db open to succeed"); // Apply one set of operations to key-value map 0 let mut dbtx = store.transaction_rw(None).unwrap(); @@ -345,16 +345,16 @@ fn db_writes_do_not_interfere>(backend_fn: Arc) { ) } -fn empty_after_abort>(backend_fn: Arc) { +fn empty_after_abort>(backend_factory: Arc) { using_proptest( file!(), - backend_fn, + backend_factory, ( gen::actions(100, 0..20), gen::prop::collection::vec(gen::key(100), 0..20), ), |backend, (actions, keys)| { - let store = backend.open(desc(5)).expect("db open to succeed"); + let mut store = backend.open(desc(5)).expect("db open to succeed"); // Apply one set of operations to key-value map 0 let model = Model::from_actions(actions.clone()); @@ -376,10 +376,10 @@ fn empty_after_abort>(backend_fn: Arc) { ) } -fn prefix_iteration>(backend_fn: Arc) { +fn prefix_iteration>(backend_factory: Arc) { using_proptest( file!(), - backend_fn, + backend_factory, (gen::actions(100, 0..20), gen::actions(100, 0..20)), |backend, (actions_a, actions_b)| { // Add prefixes to action keys @@ -393,7 +393,7 @@ fn prefix_iteration>(backend_fn: Arc) { actions_b.into_iter().map(|act| act.map_key(|k| add_prefix(b'b', k))).collect(); // Open storage - let store = backend.open(desc(5)).expect("db open to succeed"); + let mut store = backend.open(desc(5)).expect("db open to succeed"); // Populate the database let mut dbtx = store.transaction_rw(None).unwrap(); @@ -436,14 +436,14 @@ fn prefix_iteration>(backend_fn: Arc) { ) } -fn post_commit_consistency>(backend_fn: Arc) { +fn post_commit_consistency>(backend_factory: Arc) { using_proptest( file!(), - backend_fn, + backend_factory, gen::actions(100, 0..50), |backend, actions| { // Open storage - let store = backend.open(desc(1)).expect("db open to succeed"); + let mut store = backend.open(desc(1)).expect("db open to succeed"); let mut dbtx = store.transaction_rw(None).unwrap(); dbtx.apply_actions(MAPID.0, actions.into_iter()); @@ -457,7 +457,7 @@ fn post_commit_consistency>(backend_fn: Arc) { ) } -tests![ +common_tests![ add_and_delete, add_and_delete_some, add_modify_abort_modify_commit, diff --git a/storage/core/src/adaptor/locking.rs b/storage/core/src/adaptor/locking.rs index f81d06e374..7c77f35b04 100644 --- a/storage/core/src/adaptor/locking.rs +++ b/storage/core/src/adaptor/locking.rs @@ -176,6 +176,12 @@ impl backend::BackendImpl for TransactionLoc Ok(TxRo(self.db.read().expect("lock to be alive"))) } + fn transaction_rw(&mut self, size: Option) -> crate::Result> { + ::transaction_rw(self, size) + } +} + +impl backend::SharedBackendImpl for TransactionLockImpl { fn transaction_rw(&self, _size: Option) -> crate::Result> { Ok(TxRw { db: self.db.write().expect("lock to be alive"), diff --git a/storage/core/src/backend.rs b/storage/core/src/backend.rs index 32f837842e..ec9ed111df 100644 --- a/storage/core/src/backend.rs +++ b/storage/core/src/backend.rs @@ -64,26 +64,87 @@ pub trait TxRw: ReadOps + WriteOps { fn commit(self) -> crate::Result<()>; } -/// Storage backend internal implementation type -pub trait BackendImpl: Send + Sync + ShallowClone + 'static { +/// The "normal" storage backend type. +/// +/// Note that the corresponding `BackendImpl` is non-clonable and that creating an rw transaction +/// borrows it as mutable. +pub trait Backend { + /// Implementation type corresponding to this backend + type Impl: BackendImpl; + + /// Open the database, giving an implementation-specific handle + fn open(self, desc: DbDesc) -> crate::Result; +} + +/// The implementation type for the "normal" storage backend. +// TODO: ideally, we should get rid of the 'static bound here. This may be useful e.g. in wallet +// tests, where it'd be nice to have a 'decorator' for a reference to an existing storage +// (e.g. to test wallet reloading and to ensure that only ro methods are used). +pub trait BackendImpl: Send + Sync + 'static { /// Read-only transaction internal type type TxRo<'a>: TxRo + 'a; - /// Start a read-write transaction + /// Read-write transaction internal type type TxRw<'a>: TxRw + 'a; /// Start a read-only transaction fn transaction_ro(&self) -> crate::Result>; - /// Start a read-write transaction + /// Start a read-write transaction, borrowing `self` as mutable. + fn transaction_rw(&mut self, size: Option) -> crate::Result>; +} + +/// The "shared" storage backend type. +/// +/// Note that the corresponding `SharedBackend` is shallow-clonable and that an rw transaction +/// can be created from a shared reference. +/// +/// Also note that we use this fancy syntax `Backend::ImplHelper>` +/// in order to force trait bounds propagation, so that requiring `B: SharedBackend` would imply +/// `B: Backend`. +pub trait SharedBackend: Backend::ImplHelper> { + type ImplHelper: SharedBackendImpl; +} + +/// The implementation type for the "shared" storage backend. +pub trait SharedBackendImpl: BackendImpl + ShallowClone { + /// Start a read-write transaction from a shared reference to self. fn transaction_rw(&self, size: Option) -> crate::Result>; } -/// Storage backend type. Used to set up storage. -pub trait Backend { - /// Implementation type corresponding to this backend - type Impl: BackendImpl; +/// Using `BackendWithSendableTransactions` as a bound will ensure that `TxRo` and `TxRw` +/// are `Send`, to avoid using the verbose bounds on `TxRo`/`TxRw` themselves. +/// +/// Note that here we also use the `Backend` syntax in order to +/// force trait bound propagation. Due to this, we can't have an umbrella implementation +/// for this trait for any type T that implements `Backend`, because setting `ImplHelper` to +/// `T::Impl` will lead to infinite recursion during compilation. +pub trait BackendWithSendableTransactions: + Backend::ImplHelper> +where + for<'a> ::TxRo<'a>: Send, + for<'a> ::TxRw<'a>: Send, +{ + type ImplHelper: BackendImpl; +} - /// Open the database, giving an implementation-specific handle - fn open(self, desc: DbDesc) -> crate::Result; +// Note: since these tests are compile time only, there is no need to hide the module +// under `cfg(test)`. +mod tests { + use super::*; + + // Check that if `BackendWithSendableTransactions` is used as a trait bound, then + // the transactions are Send. + #[allow(unused)] + #[allow(clippy::unwrap_used)] + fn test_sendable_tx_trait_bound(t: ::Impl) { + let tx = t.transaction_ro().unwrap(); + test_send(tx); + + let mut t = t; + let tx = t.transaction_rw(None).unwrap(); + test_send(tx); + } + + fn test_send(_: T) {} } diff --git a/storage/core/src/lib.rs b/storage/core/src/lib.rs index 048f4e4a4d..a45aa33ad0 100644 --- a/storage/core/src/lib.rs +++ b/storage/core/src/lib.rs @@ -91,7 +91,7 @@ pub mod types; pub mod util; // Re-export some commonly used items -pub use backend::Backend; +pub use backend::{Backend, BackendWithSendableTransactions, SharedBackend}; pub use error::Error; pub use types::{DbDesc, DbMapCount, DbMapDesc, DbMapId, DbMapsData}; diff --git a/storage/failing/src/backend.rs b/storage/failing/src/backend.rs index 608899308d..f8578bdbcf 100644 --- a/storage/failing/src/backend.rs +++ b/storage/failing/src/backend.rs @@ -66,6 +66,10 @@ impl backend::Backend for Failing { } } +impl backend::SharedBackend for Failing { + type ImplHelper = FailingImpl; +} + pub struct FailingImpl { inner: T, config: Arc, @@ -84,14 +88,22 @@ impl FailingImpl { } fn make_rng(&self) -> TestRng { - TestRng::new(Seed(self.rng.lock().expect("lock poisoned").gen())) + Self::make_rng_impl(&self.rng) + } + + fn make_rng_impl(rng: &Mutex) -> TestRng { + TestRng::new(Seed(rng.lock().expect("lock poisoned").gen())) } - fn make_rw_tx_state(&self) -> RwTxState<'_> { + fn make_rw_tx_state<'a>( + config: &'a FailureConfig, + rng: &Mutex, + total_failures: &'a AcqRelAtomicU32, + ) -> RwTxState<'a> { RwTxState { - config: &self.config, - rng: self.make_rng(), - total_failures: &self.total_failures, + config, + rng: Self::make_rng_impl(rng), + total_failures, transaction_failures: 0, } } @@ -123,8 +135,17 @@ impl backend::BackendImpl for FailingImpl { self.inner.transaction_ro() } + fn transaction_rw(&mut self, size: Option) -> storage_core::Result> { + let mut state = Self::make_rw_tx_state(&self.config, &self.rng, &self.total_failures); + state.emit_error(self.config.error_generation_for_start_rw_tx())?; + let inner = self.inner.transaction_rw(size)?; + Ok(TxRw { inner, state }) + } +} + +impl backend::SharedBackendImpl for FailingImpl { fn transaction_rw(&self, size: Option) -> storage_core::Result> { - let mut state = self.make_rw_tx_state(); + let mut state = Self::make_rw_tx_state(&self.config, &self.rng, &self.total_failures); state.emit_error(self.config.error_generation_for_start_rw_tx())?; let inner = self.inner.transaction_rw(size)?; Ok(TxRw { inner, state }) diff --git a/storage/inmemory/src/lib.rs b/storage/inmemory/src/lib.rs index 36189c9c52..4e83214733 100644 --- a/storage/inmemory/src/lib.rs +++ b/storage/inmemory/src/lib.rs @@ -74,6 +74,10 @@ impl backend::Backend for InMemory { } } +impl backend::SharedBackend for InMemory { + type ImplHelper = ::Impl; +} + impl InMemory { /// Create a new in-memory storage backend pub fn new() -> Self { diff --git a/storage/inmemory/tests/backend.rs b/storage/inmemory/tests/backend.rs index ff037bcfb0..30e57c4f38 100644 --- a/storage/inmemory/tests/backend.rs +++ b/storage/inmemory/tests/backend.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use storage_inmemory::InMemory; + fn main() { - storage_backend_test_suite::main(storage_inmemory::InMemory::new).exit(); + storage_backend_test_suite::main(InMemory::new, Some(InMemory::new)).exit(); } diff --git a/storage/lmdb/src/lib.rs b/storage/lmdb/src/lib.rs index 27ab57493d..a9a564e606 100644 --- a/storage/lmdb/src/lib.rs +++ b/storage/lmdb/src/lib.rs @@ -220,6 +220,7 @@ impl utils::shallow_clone::ShallowClone for LmdbImpl { } } } + impl backend::BackendImpl for LmdbImpl { type TxRo<'a> = DbTxRo<'a>; @@ -229,6 +230,12 @@ impl backend::BackendImpl for LmdbImpl { self.start_transaction(lmdb::Environment::begin_ro_txn) } + fn transaction_rw(&mut self, size: Option) -> storage_core::Result> { + ::transaction_rw(self, size) + } +} + +impl backend::SharedBackendImpl for LmdbImpl { fn transaction_rw(&self, size: Option) -> storage_core::Result> { self.resize_if_resize_scheduled(); self.start_transaction(|env| lmdb::Environment::begin_rw_txn(env, size)) @@ -340,5 +347,9 @@ impl backend::Backend for Lmdb { } } +impl backend::SharedBackend for Lmdb { + type ImplHelper = LmdbImpl; +} + #[cfg(test)] mod resize_tests; diff --git a/storage/lmdb/src/resize_tests.rs b/storage/lmdb/src/resize_tests.rs index 2a7c2bda69..a8ee330317 100644 --- a/storage/lmdb/src/resize_tests.rs +++ b/storage/lmdb/src/resize_tests.rs @@ -18,7 +18,7 @@ use std::{collections::BTreeMap, sync::Mutex}; use rstest::rstest; use memsize::MemSize; -use storage_core::backend::{Backend, BackendImpl, ReadOps, TxRw, WriteOps}; +use storage_core::backend::{Backend, BackendImpl, ReadOps, SharedBackendImpl, TxRw, WriteOps}; use test_utils::random::{make_seedable_rng, CryptoRng, Rng, Seed}; use super::*; diff --git a/storage/lmdb/tests/backend.rs b/storage/lmdb/tests/backend.rs index 70d623b71a..4375b87add 100644 --- a/storage/lmdb/tests/backend.rs +++ b/storage/lmdb/tests/backend.rs @@ -34,7 +34,7 @@ fn main() { }; // Now run the tests - let result = storage_backend_test_suite::main(create_backend); + let result = storage_backend_test_suite::main(create_backend.clone(), Some(create_backend)); // Remove the test directory unless there was a failure. // In case of failure, it is kept to give us the opportunity to inspect database contents. diff --git a/storage/sqlite/Cargo.toml b/storage/sqlite/Cargo.toml index c1b4d14413..9356251941 100644 --- a/storage/sqlite/Cargo.toml +++ b/storage/sqlite/Cargo.toml @@ -12,12 +12,16 @@ utils = { path = '../../utils' } hex.workspace = true rusqlite = { workspace = true, features = ["bundled"] } +static_assertions.workspace = true [dev-dependencies] +storage = { path = ".." } storage-backend-test-suite = { path = "../backend-test-suite" } test-utils = { path = "../../test-utils" } utils = { path = '../../utils' } +tempfile.workspace = true + [[test]] name = "backend" harness = false diff --git a/storage/sqlite/src/lib.rs b/storage/sqlite/src/lib.rs index d8a36a2595..cc9c4b8e2d 100644 --- a/storage/sqlite/src/lib.rs +++ b/storage/sqlite/src/lib.rs @@ -13,68 +13,139 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! A `Backend` implementation for Sqlite whose transactions are `Send`, so it's usable +//! in an async context. + extern crate core; mod error; mod queries; +use std::{ + borrow::Cow, + cmp::max, + path::{Path, PathBuf}, + sync::{Arc, Mutex, MutexGuard}, +}; + use rusqlite::{Connection, OpenFlags, OptionalExtension}; -use std::borrow::Cow; -use std::cmp::max; -use std::path::{Path, PathBuf}; -use std::sync::{Mutex, MutexGuard}; -use crate::queries::SqliteQueries; use error::process_sqlite_error; use storage_core::{backend, Data, DbDesc, DbMapId}; -use utils::shallow_clone::ShallowClone; -use utils::sync::Arc; -pub struct DbTx<'m> { - connection: MutexGuard<'m, Connection>, +use crate::queries::SqliteQueries; + +// Note: DbTx holds the mutex itself and locks it on every operation instead of just holding a lock +// all the time. This is because we want it to be Send, and locks are not. +pub struct DbTx<'m, const IS_READONLY: bool> { + connection: Arc>, queries: &'m SqliteQueries, } -impl<'m> DbTx<'m> { +impl<'m, const IS_READONLY: bool> DbTx<'m, IS_READONLY> { fn start_transaction(sqlite: &'m SqliteImpl) -> storage_core::Result { - let connection = sqlite - .0 - .connection - .lock() - .map_err(|e| storage_core::error::Fatal::InternalError(e.to_string()))?; + let conn_lock = sqlite.connection.lock().expect("poisoned mutex"); + + // Note: there shouldn't be any potentially panicking code between the DbTx creation + // and the increment of read_only_tx_count, otherwise DbTx::drop will make an erroneous + // decrement and potentially panic itself due to an assertion failure (which is probably + // not critical if we're already panicking, but still). + // This is why we create `conn_lock` before creating the `tx`. let tx = DbTx { - connection, - queries: &sqlite.0.queries, + connection: Arc::clone(&sqlite.connection), + queries: &sqlite.queries, }; - tx.connection.execute("BEGIN TRANSACTION", ()).map_err(process_sqlite_error)?; + tx.init(conn_lock)?; + Ok(tx) } - fn commit_transaction(&self) -> storage_core::Result<()> { - let _res = self + // Note: the purpose of this function is to ensure that whatever happens the lock + // will be dropped before the tx itself, because otherwise `drop` may deadlock when + // it also tries to lock the same mutex. + fn init(&self, mut conn_lock: MutexGuard<'_, SqliteConnection>) -> storage_core::Result<()> { + let need_start_actual_tx = if IS_READONLY { + // Only start the actual transaction once, so that multiple ro transaction objects + // can co-exist. See the comment near `read_only_tx_count` for details. + conn_lock.read_only_tx_count += 1; + conn_lock.read_only_tx_count == 1 + } else { + // Sanity check + assert!(conn_lock.read_only_tx_count == 0); + + true + }; + + if need_start_actual_tx { + let _res = conn_lock + .connection + .execute("BEGIN TRANSACTION", ()) + .map_err(process_sqlite_error)?; + }; + + Ok(()) + } + + fn lock_connection(&self) -> MutexGuard<'_, SqliteConnection> { + self.connection.lock().expect("poisoned mutex") + } +} + +impl<'m> DbTx<'m, false> { + fn commit_transaction(self) -> storage_core::Result<()> { + let conn_lock = self.lock_connection(); + + // Sanity check + assert!(conn_lock.read_only_tx_count == 0); + + let _res = conn_lock .connection .execute("COMMIT TRANSACTION", ()) .map_err(process_sqlite_error)?; + Ok(()) } } -impl Drop for DbTx<'_> { +impl Drop for DbTx<'_, IS_READONLY> { fn drop(&mut self) { - if self.connection.is_autocommit() { + let mut conn_lock = self.lock_connection(); + + // Note: is_autocommit basically checks whether there are no existing transaction + // (sqlite is in the autocommit mode by default; it switches to the manual mode on + // BEGIN and returns to autocommit mode after COMMIT or ROLLBACK). + // We can only get into this 'if' if `drop` is being called as a result of `commit_transaction`. + if conn_lock.connection.is_autocommit() { + assert!(!IS_READONLY); return; } - let res = self.connection.execute("ROLLBACK TRANSACTION", ()); - if let Err(err) = res { - logging::log::error!("Error: transaction rollback failed: {}", err); + let need_actual_rollback = if IS_READONLY { + assert!(conn_lock.read_only_tx_count > 0); + + conn_lock.read_only_tx_count -= 1; + conn_lock.read_only_tx_count == 0 + } else { + // Sanity check + assert!(conn_lock.read_only_tx_count == 0); + + true + }; + + if need_actual_rollback { + let res = conn_lock.connection.execute("ROLLBACK TRANSACTION", ()); + if let Err(err) = res { + logging::log::error!("Error: transaction rollback failed: {}", err); + } } } } -impl backend::ReadOps for DbTx<'_> { +impl backend::ReadOps for DbTx<'_, IS_READONLY> { fn get(&self, map_id: DbMapId, key: &[u8]) -> storage_core::Result>> { - let mut stmt = self + let conn_lock = self.lock_connection(); + + let mut stmt = conn_lock .connection .prepare_cached(self.queries[map_id].get_query()) .map_err(process_sqlite_error)?; @@ -95,7 +166,8 @@ impl backend::ReadOps for DbTx<'_> { ) -> storage_core::Result + '_> { // TODO check if prefix.is_empty() // TODO Perform the filtering in the SQL query itself - let mut stmt = self + let conn_lock = self.lock_connection(); + let mut stmt = conn_lock .connection .prepare_cached(self.queries[map_id].prefix_iter_query()) .map_err(process_sqlite_error)?; @@ -119,7 +191,8 @@ impl backend::ReadOps for DbTx<'_> { map_id: DbMapId, key: Data, ) -> storage_core::Result + '_> { - let mut stmt = self + let conn_lock = self.lock_connection(); + let mut stmt = conn_lock .connection .prepare_cached(&self.queries[map_id].greater_equal_iter_query(&key)) .map_err(process_sqlite_error)?; @@ -139,9 +212,10 @@ impl backend::ReadOps for DbTx<'_> { } } -impl backend::WriteOps for DbTx<'_> { +impl backend::WriteOps for DbTx<'_, false> { fn put(&mut self, map_id: DbMapId, key: Data, val: Data) -> storage_core::Result<()> { - let mut stmt = self + let conn_lock = self.lock_connection(); + let mut stmt = conn_lock .connection .prepare_cached(self.queries[map_id].put_query()) .map_err(process_sqlite_error)?; @@ -153,7 +227,8 @@ impl backend::WriteOps for DbTx<'_> { } fn del(&mut self, map_id: DbMapId, key: &[u8]) -> storage_core::Result<()> { - let mut stmt = self + let conn_lock = self.lock_connection(); + let mut stmt = conn_lock .connection .prepare_cached(self.queries[map_id].delete_query()) .map_err(process_sqlite_error)?; @@ -165,50 +240,68 @@ impl backend::WriteOps for DbTx<'_> { } } -impl backend::TxRo for DbTx<'_> {} +impl backend::TxRo for DbTx<'_, IS_READONLY> {} -impl backend::TxRw for DbTx<'_> { +impl backend::TxRw for DbTx<'_, false> { fn commit(self) -> storage_core::Result<()> { self.commit_transaction() } } /// Struct that holds the details for an Sqlite connection -pub struct SqliteConnection { - /// Handle to an Sqlite database connection - connection: Mutex, - - /// List of sql queries - queries: SqliteQueries, +struct SqliteConnection { + /// The underlying `rusqlite::Connection` object. + /// + /// Note that this object is not `Sync`, therefore if we want our `DbTx` to be `Send`, we can't + /// just put a reference to `Connection` inside it. This is why it's also under the mutex. + connection: Connection, + + /// The number of readonly `DbTx` instances that currently exist for this connection. + /// + /// Note: this is needed because sqlite doesn't allow nested transactions, but we still want + /// for multiple ro transaction objects to co-exist (because nothing prevents the user code + /// from calling SqliteImpl::transaction_ro multiple times). So every time an ro DbTx is + /// created, we'll increment this counter and we'll create a real transaction only if it's + /// the first increment. + read_only_tx_count: usize, } -#[derive(Clone)] -pub struct SqliteImpl(Arc); - -impl SqliteImpl { - /// Start a transaction using the low-level method provided - fn start_transaction(&self) -> storage_core::Result> { - DbTx::start_transaction(self) +impl SqliteConnection { + fn new(connection: Connection) -> Self { + Self { + connection, + read_only_tx_count: 0, + } } } -impl ShallowClone for SqliteImpl { - fn shallow_clone(&self) -> Self { - Self(self.0.shallow_clone()) - } +// Note: this struct is deliberately non-clonable even though it's technically trivial to clone +// (especially if `SqliteQueries` was put inside `SqliteConnection`); as a result, `Sqlite` +// doesn't implement `SharedBackend`. +// The reason is that if it were clonable, we would have to protect against having multiple rw +// transactions at the same time; the only way to do this is to keep the `Mutex` +// locked for the entire lifetime of the transaction (which was done in the original implementation +// of this backend). But this would make `DbTx` non-`Send` (and also dangerous to use, because +// creating two rw transactions on the same thread would lead to a deadlock). +pub struct SqliteImpl { + /// The current connection. + connection: Arc>, + + /// List of sql queries. + queries: SqliteQueries, } impl backend::BackendImpl for SqliteImpl { - type TxRo<'a> = DbTx<'a>; + type TxRo<'a> = DbTx<'a, true>; - type TxRw<'a> = DbTx<'a>; + type TxRw<'a> = DbTx<'a, false>; fn transaction_ro(&self) -> storage_core::Result> { - self.start_transaction() + DbTx::start_transaction(self) } - fn transaction_rw(&self, _size: Option) -> storage_core::Result> { - self.start_transaction() + fn transaction_rw(&mut self, _size: Option) -> storage_core::Result> { + DbTx::start_transaction(self) } } @@ -230,7 +323,7 @@ impl Default for Options { #[derive(Eq, PartialEq, Clone, Debug)] enum SqliteStorageMode { - InMemory, + InMemory(Option), File(PathBuf), } @@ -241,9 +334,26 @@ pub struct Sqlite { } impl Sqlite { + /// Create a distinct "unnamed" in-memory database. + /// + /// Only one connection to the database may exist. + /// Different calls to `Sqlite::new_in_memory().open(...)` will always create different databases. pub fn new_in_memory() -> Self { Self { - backend: SqliteStorageMode::InMemory, + backend: SqliteStorageMode::InMemory(None), + options: Default::default(), + } + } + + /// Create/open a "named" in-memory database (the one using "shared cache" in the Sqlite's + /// terminology). + /// + /// Different calls to `Sqlite::new_named_in_memory("foo").open(...)` will open the same + /// database, provided that at least one connection to it still exists. + /// After all connections to the database are dropped, it is deleted. + pub fn new_named_in_memory(name: &str) -> Self { + Self { + backend: SqliteStorageMode::InMemory(Some(name.to_owned())), options: Default::default(), } } @@ -270,11 +380,16 @@ impl Sqlite { OpenFlags::SQLITE_OPEN_CREATE, ]); - let connection = match self.backend { - SqliteStorageMode::InMemory => Connection::open_in_memory_with_flags(flags)?, - SqliteStorageMode::File(path) => Connection::open_with_flags(path, flags)?, + let path = match self.backend { + SqliteStorageMode::File(path) => path, + SqliteStorageMode::InMemory(None) => ":memory:".into(), + SqliteStorageMode::InMemory(Some(name)) => { + format!("file:{name}?mode=memory&cache=shared").into() + } }; + let connection = Connection::open_with_flags(path, flags)?; + let Options { disable_fsync } = self.options; // Set the locking mode to exclusive @@ -340,9 +455,27 @@ impl backend::Backend for Sqlite { let connection = self.open_db(desc).map_err(process_sqlite_error)?; - Ok(SqliteImpl(Arc::new(SqliteConnection { - connection: Mutex::new(connection), + Ok(SqliteImpl { + connection: Arc::new(Mutex::new(SqliteConnection::new(connection))), queries, - }))) + }) } } + +impl backend::BackendWithSendableTransactions for Sqlite { + type ImplHelper = SqliteImpl; +} + +// Note: since these tests are compile time only, there is no need to hide the module +// under `cfg(test)`. +mod compile_time_tests { + use super::*; + + static_assertions::assert_not_impl_any!(SqliteImpl: Clone, Copy); + + static_assertions::assert_impl_all!(DbTx<'static, true>: Send); + static_assertions::assert_impl_all!(DbTx<'static, false>: Send); +} + +#[cfg(test)] +mod tests; diff --git a/storage/sqlite/src/tests.rs b/storage/sqlite/src/tests.rs new file mode 100644 index 0000000000..3a93e4a210 --- /dev/null +++ b/storage/sqlite/src/tests.rs @@ -0,0 +1,224 @@ +// Copyright (c) 2021-2025 RBB S.r.l +// opensource@mintlayer.org +// SPDX-License-Identifier: MIT +// Licensed under the MIT License; +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://github.com/mintlayer/mintlayer-core/blob/master/LICENSE +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use storage::error::Fatal; +use storage_backend_test_suite::prelude::{desc, MAPID}; +use storage_core::{ + backend::{BackendImpl as _, ReadOps as _, TxRw as _, WriteOps as _}, + Backend as _, +}; +use test_utils::assert_matches_return_val; + +use crate::Sqlite; + +// Force tx creation to produce an error and check the error. +// This checks for a regression where an error during tx creation would cause a deadlock: +// 1) Inside `start_transaction` the drop order was first db tx, then the mutex lock. +// 2) So, when `DbTx::drop` tried to lock the same mutex, it would deadlock. +// Due to an explicit `drop` call, this was only reproducible on an erroneous execution path. +#[test] +fn test_error_on_tx_opening() { + let db = Sqlite::new_in_memory(); + let opened_db = db.open(desc(1)).unwrap(); + + { + let conn_lock = opened_db.connection.lock().unwrap(); + conn_lock.connection.execute("BEGIN TRANSACTION", ()).unwrap(); + } + + let res = opened_db.transaction_ro(); + // Note: can't use unwrap_err or assert_matches_return_val on res directly because the tx + // doesn't implement Debug. + let err_str = match res { + Ok(_) => panic!("Got Ok while expecting an error"), + Err(err) => { + assert_matches_return_val!(err, storage::Error::Fatal(Fatal::InternalError(err)), err) + } + }; + assert!(err_str.contains("cannot start a transaction within a transaction")); +} + +// Check that multiple ro txs can co-exist in the same thread. +// Note that this test can't be moved to backend-test-suite, because e.g. LMDB doesn't support +// this (though it does support having multiple ro txs in different threads, for which we have +// separate tests). +#[test] +fn multiple_ro_txs() { + let db = Sqlite::new_in_memory(); + let mut opened_db = db.open(desc(1)).unwrap(); + + // Create an rw transaction, modify storage and commit + let mut dbtx = opened_db.transaction_rw(None).unwrap(); + dbtx.put(MAPID.0, b"hello".to_vec(), b"world".to_vec()).unwrap(); + dbtx.commit().unwrap(); + + // Create 2 ro transactions, check the modification. + let dbtx1 = opened_db.transaction_ro().unwrap(); + let dbtx2 = opened_db.transaction_ro().unwrap(); + assert_eq!( + dbtx1.get(MAPID.0, b"hello").unwrap().as_ref().map(|v| v.as_ref()).unwrap(), + b"world" + ); + assert_eq!( + dbtx2.get(MAPID.0, b"hello").unwrap().as_ref().map(|v| v.as_ref()).unwrap(), + b"world" + ); + drop(dbtx1); + drop(dbtx2); +} + +// Open a db, commit something, close the db and open again. +// This checks for a regression where `DbTx::commit_transaction` would call `forget` on self +// to avoid a seemingly unnecessary `drop`, which would lead to the underlying collection +// object being leaked, so the db wouldn't be properly closed. +#[test] +fn db_reopen_after_commit() { + let temp_file = tempfile::NamedTempFile::new().unwrap(); + + // Open a db, create an rw transaction, modify the storage and commit. + { + let db = Sqlite::new(&temp_file); + let mut opened_db = db.open(desc(1)).unwrap(); + + let mut dbtx = opened_db.transaction_rw(None).unwrap(); + + dbtx.put(MAPID.0, b"hello".to_vec(), b"world".to_vec()).unwrap(); + dbtx.commit().unwrap(); + } + + // Open the same db, check the previously written data. + { + let db = Sqlite::new(&temp_file); + let opened_db = db.open(desc(1)).unwrap(); + + let dbtx = opened_db.transaction_ro().unwrap(); + assert_eq!( + dbtx.get(MAPID.0, b"hello").unwrap().as_ref().map(|v| v.as_ref()).unwrap(), + b"world" + ); + } +} + +#[test] +fn db_open_in_memory_unnamed() { + // Create an unnamed in-memory db + let mut db1 = Sqlite::new_in_memory().open(desc(1)).unwrap(); + + // Modify db1. + { + let mut dbtx = db1.transaction_rw(None).unwrap(); + + dbtx.put(MAPID.0, b"hello".to_vec(), b"world".to_vec()).unwrap(); + dbtx.commit().unwrap(); + + // Sanity check - the data is there + let dbtx = db1.transaction_ro().unwrap(); + assert_eq!( + dbtx.get(MAPID.0, b"hello").unwrap().as_ref().map(|v| v.as_ref()).unwrap(), + b"world" + ); + } + + // Create an unnamed in-memory db again + let db2 = Sqlite::new_in_memory().open(desc(1)).unwrap(); + + // Check that the modification from above is not there. + { + let dbtx = db2.transaction_ro().unwrap(); + assert!(dbtx.get(MAPID.0, b"hello").unwrap().is_none()); + } + + // Both objects co-existed all this time + drop(db1); + drop(db2); +} + +#[test] +fn db_open_in_memory_named() { + // Create an in-memory db named "foo". + let mut db_foo1 = Sqlite::new_named_in_memory("foo").open(desc(1)).unwrap(); + + // Modify db_foo1. + { + let mut dbtx = db_foo1.transaction_rw(None).unwrap(); + + dbtx.put(MAPID.0, b"hello".to_vec(), b"foo".to_vec()).unwrap(); + dbtx.commit().unwrap(); + + // Sanity check - the data is there + let dbtx = db_foo1.transaction_ro().unwrap(); + assert_eq!( + dbtx.get(MAPID.0, b"hello").unwrap().as_ref().map(|v| v.as_ref()).unwrap(), + b"foo" + ); + } + + // Create an in-memory db named "bar". + let mut db_bar1 = Sqlite::new_named_in_memory("bar").open(desc(1)).unwrap(); + + // Modify db_bar1 (using different data than in db_foo1). + { + let mut dbtx = db_bar1.transaction_rw(None).unwrap(); + + dbtx.put(MAPID.0, b"hello".to_vec(), b"bar".to_vec()).unwrap(); + dbtx.commit().unwrap(); + + // Sanity check - the data is there + let dbtx = db_bar1.transaction_ro().unwrap(); + assert_eq!( + dbtx.get(MAPID.0, b"hello").unwrap().as_ref().map(|v| v.as_ref()).unwrap(), + b"bar" + ); + } + + // Create/open an in-memory db named "foo" again. + let db_foo2 = Sqlite::new_named_in_memory("foo").open(desc(1)).unwrap(); + + // Check db_foo2 - the data is there + { + let dbtx = db_foo2.transaction_ro().unwrap(); + assert_eq!( + dbtx.get(MAPID.0, b"hello").unwrap().as_ref().map(|v| v.as_ref()).unwrap(), + b"foo" + ); + } + + // Create/open an in-memory db named "bar" again. + let db_bar2 = Sqlite::new_named_in_memory("bar").open(desc(1)).unwrap(); + + // Check db_foo2 - the data is there + { + let dbtx = db_bar2.transaction_ro().unwrap(); + assert_eq!( + dbtx.get(MAPID.0, b"hello").unwrap().as_ref().map(|v| v.as_ref()).unwrap(), + b"bar" + ); + } + + // All objects co-existed all this time + drop(db_foo1); + drop(db_foo2); + drop(db_bar1); + drop(db_bar2); + + // Create an in-memory db named "foo" yet again, after all previous connections have been dropped. + let db_foo3 = Sqlite::new_named_in_memory("foo").open(desc(1)).unwrap(); + + // Check db_foo3 - the data is not there + { + let dbtx = db_foo3.transaction_ro().unwrap(); + assert!(dbtx.get(MAPID.0, b"hello").unwrap().is_none()); + } +} diff --git a/storage/sqlite/tests/backend.rs b/storage/sqlite/tests/backend.rs index 48ab3b94e1..e1b8857007 100644 --- a/storage/sqlite/tests/backend.rs +++ b/storage/sqlite/tests/backend.rs @@ -13,6 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use storage_backend_test_suite::prelude::BogusSharedBackend; use storage_sqlite::Sqlite; fn main() { @@ -35,7 +36,10 @@ fn main() { }; // Now run the tests - let result = storage_backend_test_suite::main(create_backend); + let result = storage_backend_test_suite::main( + create_backend, + None as Option BogusSharedBackend>, + ); // Remove the test directory unless there was a failure. // In case of failure, it is kept to give us the opportunity to inspect database contents. diff --git a/storage/src/database/mod.rs b/storage/src/database/mod.rs index 101e731998..8c79322b41 100644 --- a/storage/src/database/mod.rs +++ b/storage/src/database/mod.rs @@ -27,7 +27,7 @@ use crate::schema::{self, Schema}; use serialization::{encoded::Encoded, Encode, EncodeLike}; use storage_core::{ backend::{self, TxRw, WriteOps}, - Backend, DbMapId, + Backend, DbMapId, SharedBackend, }; /// The main storage type @@ -67,12 +67,15 @@ impl Storage { let _schema = std::marker::PhantomData; Ok(Self { backend, _schema }) } +} +impl Storage { /// Create new storage with given backend and raw dump pub fn new_from_dump(backend: B, dump: raw::StorageContents) -> crate::Result { - let backend = backend.open(storage_core::types::construct::db_desc(Sch::desc_iter()))?; + let mut backend = + backend.open(storage_core::types::construct::db_desc(Sch::desc_iter()))?; let _schema = std::marker::PhantomData; - let mut dbtx = backend::BackendImpl::transaction_rw(&backend, None)?; + let mut dbtx = backend::BackendImpl::transaction_rw(&mut backend, None)?; for (map_id, map_values) in dump { for (key, val) in map_values { @@ -92,8 +95,24 @@ impl Storage { } /// Start a read-write transaction - pub fn transaction_rw(&self, size: Option) -> crate::Result> { - let dbtx = backend::BackendImpl::transaction_rw(&self.backend, size)?; + pub fn transaction_rw( + &mut self, + size: Option, + ) -> crate::Result> { + let dbtx = backend::BackendImpl::transaction_rw(&mut self.backend, size)?; + let _schema = std::marker::PhantomData; + Ok(TransactionRw { dbtx, _schema }) + } +} + +pub trait StorageSharedWrite { + fn transaction_rw(&self, size: Option) -> crate::Result>; +} + +impl StorageSharedWrite for Storage { + /// Start a read-write transaction from a shared reference to self. + fn transaction_rw(&self, size: Option) -> crate::Result> { + let dbtx = backend::SharedBackendImpl::transaction_rw(&self.backend, size)?; let _schema = std::marker::PhantomData; Ok(TransactionRw { dbtx, _schema }) } diff --git a/storage/src/database/raw.rs b/storage/src/database/raw.rs index 79b5f80f0a..4f5e6ab936 100644 --- a/storage/src/database/raw.rs +++ b/storage/src/database/raw.rs @@ -17,10 +17,10 @@ use crate::{ schema::{self, HasDbMap, Schema}, - Backend, TransactionRo, + TransactionRo, }; use std::collections::BTreeMap; -use storage_core::backend::ReadOps; +use storage_core::{backend::ReadOps, Backend}; pub use storage_core::Data; @@ -176,7 +176,7 @@ mod test { #[test] fn basic_dump() { utils::concurrency::model(|| { - let storage = Storage::<_, TestSchema>::new(InMemory::new()).unwrap(); + let mut storage = Storage::<_, TestSchema>::new(InMemory::new()).unwrap(); let db1 = DbMapId::new::(); let db2 = DbMapId::new::(); diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 216ef4a305..84b05604f9 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -99,7 +99,9 @@ pub mod schema; // Re-export user-facing items from core pub use order_preserving_value::OrderPreservingValue; -pub use storage_core::{error, Backend, Error, Result}; +pub use storage_core::{ + error, Backend, BackendWithSendableTransactions, Error, Result, SharedBackend, +}; // Re-export the interface types pub use database::*; diff --git a/wallet/Cargo.toml b/wallet/Cargo.toml index 81e634698c..d14cb2ea00 100644 --- a/wallet/Cargo.toml +++ b/wallet/Cargo.toml @@ -27,6 +27,7 @@ utxo = { path = "../utxo" } wallet-storage = { path = "./storage" } wallet-types = { path = "./types" } +async-trait.workspace = true bip39 = { workspace = true, default-features = false, features = [ "std", "zeroize", @@ -43,6 +44,13 @@ zeroize.workspace = true [dev-dependencies] chainstate-test-framework = { path = "../chainstate/test-framework" } test-utils = { path = "../test-utils" } +tokio = { workspace = true, default-features = false, features = [ + "io-util", + "macros", + "net", + "rt", + "sync", +] } ctor.workspace = true lazy_static.workspace = true diff --git a/wallet/src/account/mod.rs b/wallet/src/account/mod.rs index 36d6ff743c..6675f2d312 100644 --- a/wallet/src/account/mod.rs +++ b/wallet/src/account/mod.rs @@ -691,7 +691,7 @@ impl Account { pub fn process_send_request_and_sign( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, request: SendRequest, inputs: SelectedInputs, change_addresses: BTreeMap>, @@ -713,7 +713,7 @@ impl Account { fn decommission_stake_pool_impl( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, pool_id: PoolId, pool_balance: Amount, output_address: Option, @@ -777,7 +777,7 @@ impl Account { pub fn decommission_stake_pool( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, pool_id: PoolId, pool_balance: Amount, output_address: Option, @@ -794,7 +794,7 @@ impl Account { pub fn decommission_stake_pool_request( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, pool_id: PoolId, pool_balance: Amount, output_address: Option, @@ -945,7 +945,7 @@ impl Account { pub fn create_htlc_tx( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, output_value: OutputValue, htlc: HashedTimelockContract, median_time: BlockTimestamp, @@ -968,7 +968,7 @@ impl Account { pub fn create_order_tx( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, ask_value: OutputValue, give_value: OutputValue, conclude_address: Address, @@ -994,7 +994,7 @@ impl Account { pub fn create_conclude_order_tx( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, order_id: OrderId, order_info: RpcOrderInfo, output_address: Option, @@ -1064,7 +1064,7 @@ impl Account { #[allow(clippy::too_many_arguments)] pub fn create_fill_order_tx( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, order_id: OrderId, order_info: RpcOrderInfo, fill_amount_in_ask_currency: Amount, @@ -1158,7 +1158,7 @@ impl Account { pub fn create_freeze_order_tx( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, order_id: OrderId, order_info: RpcOrderInfo, median_time: BlockTimestamp, @@ -1183,7 +1183,7 @@ impl Account { pub fn create_issue_nft_tx( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, nft_issue_arguments: IssueNftArguments, median_time: BlockTimestamp, fee_rate: CurrentFeeRate, @@ -1248,7 +1248,7 @@ impl Account { pub fn mint_tokens( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, token_info: &UnconfirmedTokenInfo, address: Address, amount: Amount, @@ -1276,7 +1276,7 @@ impl Account { pub fn unmint_tokens( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, token_info: &UnconfirmedTokenInfo, amount: Amount, median_time: BlockTimestamp, @@ -1303,7 +1303,7 @@ impl Account { pub fn lock_token_supply( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, token_info: &UnconfirmedTokenInfo, median_time: BlockTimestamp, fee_rate: CurrentFeeRate, @@ -1327,7 +1327,7 @@ impl Account { pub fn freeze_token( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, token_info: &UnconfirmedTokenInfo, is_token_unfreezable: IsTokenUnfreezable, median_time: BlockTimestamp, @@ -1354,7 +1354,7 @@ impl Account { pub fn unfreeze_token( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, token_info: &UnconfirmedTokenInfo, median_time: BlockTimestamp, fee_rate: CurrentFeeRate, @@ -1378,7 +1378,7 @@ impl Account { pub fn change_token_authority( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, token_info: &UnconfirmedTokenInfo, address: Address, median_time: BlockTimestamp, @@ -1405,7 +1405,7 @@ impl Account { pub fn change_token_metadata_uri( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, token_info: &UnconfirmedTokenInfo, metadata_uri: Vec, median_time: BlockTimestamp, @@ -1433,7 +1433,7 @@ impl Account { authority: Destination, tx_input: TxInput, outputs: Vec, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, median_time: BlockTimestamp, fee_rate: CurrentFeeRate, ) -> Result { @@ -1455,7 +1455,7 @@ impl Account { pub fn create_stake_pool_with_vrf_key( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, mut stake_pool_arguments: StakePoolCreationArguments, median_time: BlockTimestamp, fee_rate: CurrentFeeRate, @@ -1476,7 +1476,7 @@ impl Account { fn create_stake_pool_impl( &mut self, stake_pool_arguments: StakePoolCreationArguments, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, vrf_public_key: VRFPublicKey, median_time: BlockTimestamp, fee_rate: CurrentFeeRate, @@ -2500,7 +2500,7 @@ impl Account { pub fn create_stake_pool( &mut self, - db_tx: &mut impl WalletStorageWriteUnlocked, + db_tx: &mut impl WalletStorageWriteLocked, mut stake_pool_arguments: StakePoolCreationArguments, median_time: BlockTimestamp, fee_rate: CurrentFeeRate, diff --git a/wallet/src/account/tests.rs b/wallet/src/account/tests.rs index 19a0642df2..2af3908892 100644 --- a/wallet/src/account/tests.rs +++ b/wallet/src/account/tests.rs @@ -28,7 +28,7 @@ const MNEMONIC: &str = #[test] fn account_addresses() { let config = Arc::new(create_regtest()); - let db = Arc::new(Store::new(DefaultBackend::new_in_memory()).unwrap()); + let mut db = Store::new(DefaultBackend::new_in_memory()).unwrap(); let mut db_tx = db.transaction_rw_unlocked(None).unwrap(); let master_key_chain = MasterKeyChain::new_from_mnemonic( @@ -63,7 +63,7 @@ fn account_addresses() { #[test] fn account_addresses_lookahead() { let config = Arc::new(create_regtest()); - let db = Arc::new(Store::new(DefaultBackend::new_in_memory()).unwrap()); + let mut db = Store::new(DefaultBackend::new_in_memory()).unwrap(); let mut db_tx = db.transaction_rw_unlocked(None).unwrap(); let master_key_chain = MasterKeyChain::new_from_mnemonic( diff --git a/wallet/src/key_chain/account_key_chain/tests.rs b/wallet/src/key_chain/account_key_chain/tests.rs index 975921d308..f5ed126ce1 100644 --- a/wallet/src/key_chain/account_key_chain/tests.rs +++ b/wallet/src/key_chain/account_key_chain/tests.rs @@ -32,7 +32,7 @@ const MNEMONIC: &str = #[case("030d1d07a8e45110d14f4e2c8623e8db556c11a90c0aac6be9a88f2464e446ee95")] fn check_mine_methods(#[case] public: &str) { let chain_config = Arc::new(create_mainnet()); - let db = Arc::new(Store::new(DefaultBackend::new_in_memory()).unwrap()); + let mut db = Store::new(DefaultBackend::new_in_memory()).unwrap(); let mut db_tx = db.transaction_rw_unlocked(None).unwrap(); let master_key_chain = MasterKeyChain::new_from_mnemonic( diff --git a/wallet/src/key_chain/tests.rs b/wallet/src/key_chain/tests.rs index 821cc5b60f..5a627b6ece 100644 --- a/wallet/src/key_chain/tests.rs +++ b/wallet/src/key_chain/tests.rs @@ -68,7 +68,7 @@ fn key_chain_creation( #[case] chaincode: &str, ) { let chain_config = Arc::new(create_mainnet()); - let db = Arc::new(Store::new(DefaultBackend::new_in_memory()).unwrap()); + let mut db = Store::new(DefaultBackend::new_in_memory()).unwrap(); let mut db_tx = db.transaction_rw_unlocked(None).unwrap(); let master_key_chain = MasterKeyChain::new_from_mnemonic( chain_config, @@ -131,7 +131,7 @@ fn key_chain_creation( #[case(KeyPurpose::Change)] fn key_lookahead(#[case] purpose: KeyPurpose) { let chain_config = Arc::new(create_unit_test_config()); - let db = Arc::new(Store::new(DefaultBackend::new_in_memory()).unwrap()); + let mut db = Store::new(DefaultBackend::new_in_memory()).unwrap(); let mut db_tx = db.transaction_rw_unlocked(None).unwrap(); let master_key_chain = MasterKeyChain::new_from_mnemonic( chain_config.clone(), @@ -214,7 +214,7 @@ fn key_lookahead(#[case] purpose: KeyPurpose) { #[case(KeyPurpose::Change)] fn top_up_and_lookahead(#[case] purpose: KeyPurpose) { let chain_config = Arc::new(create_unit_test_config()); - let db = Arc::new(Store::new(DefaultBackend::new_in_memory()).unwrap()); + let mut db = Store::new(DefaultBackend::new_in_memory()).unwrap(); let mut db_tx = db.transaction_rw_unlocked(None).unwrap(); let master_key_chain = MasterKeyChain::new_from_mnemonic( chain_config.clone(), diff --git a/wallet/src/signer/mod.rs b/wallet/src/signer/mod.rs index 2acbde939a..c3375c3862 100644 --- a/wallet/src/signer/mod.rs +++ b/wallet/src/signer/mod.rs @@ -18,6 +18,8 @@ mod tests; use std::sync::Arc; +use async_trait::async_trait; + use common::{ address::AddressError, chain::{ @@ -110,14 +112,15 @@ type SignerResult = Result; /// Signer trait responsible for signing transactions or challenges using a software or hardware /// wallet +#[async_trait] pub trait Signer { /// Sign a partially signed transaction and return the before and after signature statuses. - fn sign_tx( + async fn sign_tx( &mut self, tx: PartiallySignedTransaction, tokens_additional_info: &TokensAdditionalInfo, - key_chain: &impl AccountKeyChains, - db_tx: &impl WalletStorageReadUnlocked, + key_chain: &(impl AccountKeyChains + Sync), + db_tx: impl WalletStorageReadUnlocked + Send, block_height: BlockHeight, ) -> SignerResult<( PartiallySignedTransaction, @@ -126,30 +129,30 @@ pub trait Signer { )>; /// Sign an arbitrary message for a destination known to this key chain. - fn sign_challenge( + async fn sign_challenge( &mut self, message: &[u8], destination: &Destination, - key_chain: &impl AccountKeyChains, - db_tx: &impl WalletStorageReadUnlocked, + key_chain: &(impl AccountKeyChains + Sync), + db_tx: impl WalletStorageReadUnlocked + Send, ) -> SignerResult; /// Sign a transaction intent. The number of `input_destinations` must be the same as /// the number of inputs in the transaction; all of the destinations must be known /// to this key chain. - fn sign_transaction_intent( + async fn sign_transaction_intent( &mut self, transaction: &Transaction, input_destinations: &[Destination], intent: &str, - key_chain: &impl AccountKeyChains, - db_tx: &impl WalletStorageReadUnlocked, + key_chain: &(impl AccountKeyChains + Sync), + db_tx: impl WalletStorageReadUnlocked + Send, ) -> SignerResult; } pub trait SignerProvider { - type S: Signer; - type K: AccountKeyChains; + type S: Signer + Send; + type K: AccountKeyChains + Sync + Send; fn provide(&mut self, chain_config: Arc, account_index: U31) -> Self::S; diff --git a/wallet/src/signer/software_signer/mod.rs b/wallet/src/signer/software_signer/mod.rs index 29a03e9653..e8f58f05f4 100644 --- a/wallet/src/signer/software_signer/mod.rs +++ b/wallet/src/signer/software_signer/mod.rs @@ -15,6 +15,7 @@ use std::sync::{Arc, Mutex}; +use async_trait::async_trait; use itertools::Itertools; use common::{ @@ -74,7 +75,7 @@ use super::{utils::is_htlc_utxo, Signer, SignerError, SignerProvider, SignerResu pub struct SoftwareSigner { chain_config: Arc, account_index: U31, - sig_aux_data_provider: Mutex>, + sig_aux_data_provider: Mutex>, } impl SoftwareSigner { @@ -100,7 +101,7 @@ impl SoftwareSigner { pub fn new_with_sig_aux_data_provider( chain_config: Arc, account_index: U31, - sig_aux_data_provider: Box, + sig_aux_data_provider: Box, ) -> Self { Self { chain_config, @@ -275,13 +276,14 @@ impl SoftwareSigner { } } +#[async_trait] impl Signer for SoftwareSigner { - fn sign_tx( + async fn sign_tx( &mut self, ptx: PartiallySignedTransaction, _tokens_additional_info: &TokensAdditionalInfo, - key_chain: &impl AccountKeyChains, - db_tx: &impl WalletStorageReadUnlocked, + key_chain: &(impl AccountKeyChains + Sync), + db_tx: impl WalletStorageReadUnlocked + Send, block_height: BlockHeight, ) -> SignerResult<( PartiallySignedTransaction, @@ -338,7 +340,7 @@ impl Signer for SoftwareSigner { &input_commitments, sig_components, key_chain, - db_tx, + &db_tx, )?; let signature = @@ -374,7 +376,7 @@ impl Signer for SoftwareSigner { &input_commitments, key_chain, htlc_secret, - db_tx, + &db_tx, )?; Ok((sig, SignatureStatus::NotSigned, status)) } @@ -389,15 +391,15 @@ impl Signer for SoftwareSigner { Ok((ptx.with_witnesses(witnesses)?, prev_statuses, new_statuses)) } - fn sign_challenge( + async fn sign_challenge( &mut self, message: &[u8], destination: &Destination, - key_chain: &impl AccountKeyChains, - db_tx: &impl WalletStorageReadUnlocked, + key_chain: &(impl AccountKeyChains + Sync), + db_tx: impl WalletStorageReadUnlocked + Send, ) -> SignerResult { let private_key = self - .get_private_key_for_destination(destination, key_chain, db_tx)? + .get_private_key_for_destination(destination, key_chain, &db_tx)? .ok_or(SignerError::DestinationNotFromThisWallet)?; let sig = ArbitraryMessageSignature::produce_uniparty_signature( @@ -410,20 +412,20 @@ impl Signer for SoftwareSigner { Ok(sig) } - fn sign_transaction_intent( + async fn sign_transaction_intent( &mut self, transaction: &Transaction, input_destinations: &[Destination], intent: &str, - key_chain: &impl AccountKeyChains, - db_tx: &impl WalletStorageReadUnlocked, + key_chain: &(impl AccountKeyChains + Sync), + db_tx: impl WalletStorageReadUnlocked + Send, ) -> SignerResult { SignedTransactionIntent::produce_from_transaction( transaction, input_destinations, intent, |dest| { - self.get_private_key_for_destination(dest, key_chain, db_tx)? + self.get_private_key_for_destination(dest, key_chain, &db_tx)? .ok_or(SignerError::DestinationNotFromThisWallet) }, self.sig_aux_data_provider.lock().expect("poisoned mutex").as_mut(), diff --git a/wallet/src/signer/software_signer/tests.rs b/wallet/src/signer/software_signer/tests.rs index 3171d5ee5e..c3b25a938c 100644 --- a/wallet/src/signer/software_signer/tests.rs +++ b/wallet/src/signer/software_signer/tests.rs @@ -33,7 +33,8 @@ use crate::signer::tests::{ #[rstest] #[trace] #[case(Seed::from_entropy())] -fn test_sign_message(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_sign_message(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); test_sign_message_generic( @@ -41,16 +42,18 @@ fn test_sign_message(#[case] seed: Seed) { MessageToSign::Random, make_software_signer, no_another_signer(), - ); + ) + .await; } #[rstest] #[trace] #[case(Seed::from_entropy())] -fn test_sign_transaction_intent(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_sign_transaction_intent(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); - test_sign_transaction_intent_generic(&mut rng, make_software_signer, no_another_signer()); + test_sign_transaction_intent_generic(&mut rng, make_software_signer, no_another_signer()).await; } #[rstest] @@ -58,7 +61,8 @@ fn test_sign_transaction_intent(#[case] seed: Seed) { #[case(Seed::from_entropy(), SighashInputCommitmentVersion::V0)] #[trace] #[case(Seed::from_entropy(), SighashInputCommitmentVersion::V1)] -fn test_sign_transaction( +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_sign_transaction( #[case] seed: Seed, #[case] input_commitments_version: SighashInputCommitmentVersion, ) { @@ -69,16 +73,18 @@ fn test_sign_transaction( input_commitments_version, make_software_signer, no_another_signer(), - ); + ) + .await; } #[rstest] #[trace] #[case(Seed::from_entropy())] -fn test_fixed_signatures(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_fixed_signatures(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); - test_fixed_signatures_generic(&mut rng, make_deterministic_software_signer); + test_fixed_signatures_generic(&mut rng, make_deterministic_software_signer).await; } #[rstest] @@ -86,7 +92,8 @@ fn test_fixed_signatures(#[case] seed: Seed) { #[case(Seed::from_entropy(), SighashInputCommitmentVersion::V0)] #[trace] #[case(Seed::from_entropy(), SighashInputCommitmentVersion::V1)] -fn test_fixed_signatures2( +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_fixed_signatures2( #[case] seed: Seed, #[case] input_commitments_version: SighashInputCommitmentVersion, ) { @@ -96,7 +103,8 @@ fn test_fixed_signatures2( &mut rng, input_commitments_version, make_deterministic_software_signer, - ); + ) + .await; } #[rstest] @@ -104,7 +112,8 @@ fn test_fixed_signatures2( #[case(Seed::from_entropy(), SighashInputCommitmentVersion::V0)] #[trace] #[case(Seed::from_entropy(), SighashInputCommitmentVersion::V1)] -fn test_fixed_signatures_htlc_refunding( +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_fixed_signatures_htlc_refunding( #[case] seed: Seed, #[case] input_commitments_version: SighashInputCommitmentVersion, ) { @@ -114,5 +123,6 @@ fn test_fixed_signatures_htlc_refunding( &mut rng, input_commitments_version, make_deterministic_software_signer, - ); + ) + .await; } diff --git a/wallet/src/signer/tests/generic_fixed_signature_tests.rs b/wallet/src/signer/tests/generic_fixed_signature_tests.rs index a23fcca780..05ce069339 100644 --- a/wallet/src/signer/tests/generic_fixed_signature_tests.rs +++ b/wallet/src/signer/tests/generic_fixed_signature_tests.rs @@ -125,8 +125,10 @@ lazy_static::lazy_static! { }; } -pub fn test_fixed_signatures_generic(rng: &mut (impl Rng + CryptoRng), make_signer: MkS) -where +pub async fn test_fixed_signatures_generic( + rng: &mut (impl Rng + CryptoRng), + make_signer: MkS, +) where MkS: Fn(Arc, U31) -> S, S: Signer, { @@ -157,7 +159,7 @@ where .build(), ); - let db = Arc::new(Store::new(DefaultBackend::new_in_memory()).unwrap()); + let mut db = Store::new(DefaultBackend::new_in_memory()).unwrap(); let mut db_tx = db.transaction_rw_unlocked(None).unwrap(); let master_key_chain = MasterKeyChain::new_from_mnemonic( @@ -378,9 +380,10 @@ where orig_ptx, &tokens_additional_info, account.key_chain(), - &db_tx, + &mut db_tx, tx_block_height, ) + .await .unwrap(); assert!(ptx.all_signatures_available()); @@ -432,7 +435,7 @@ where /// 2) v1 order inputs; /// 3) htlc inputs; /// 4) v1 input commitments. -pub fn test_fixed_signatures_generic2( +pub async fn test_fixed_signatures_generic2( rng: &mut (impl Rng + CryptoRng), input_commitments_version: SighashInputCommitmentVersion, make_signer: MkS, @@ -475,7 +478,7 @@ pub fn test_fixed_signatures_generic2( .build(), ); - let db = Arc::new(Store::new(DefaultBackend::new_in_memory()).unwrap()); + let mut db = Store::new(DefaultBackend::new_in_memory()).unwrap(); let mut db_tx = db.transaction_rw_unlocked(None).unwrap(); let mut account1 = account_from_mnemonic(&chain_config, &mut db_tx, DEFAULT_ACCOUNT_INDEX); @@ -921,9 +924,10 @@ pub fn test_fixed_signatures_generic2( ptx, &tokens_additional_info, account1.key_chain(), - &db_tx, + &mut db_tx, tx_block_height, ) + .await .unwrap(); assert!(ptx.all_signatures_available()); @@ -934,9 +938,10 @@ pub fn test_fixed_signatures_generic2( ptx, &tokens_additional_info, account2.key_chain(), - &db_tx, + &mut db_tx, tx_block_height, ) + .await .unwrap(); assert!(ptx.all_signatures_available()); @@ -1143,7 +1148,7 @@ pub fn test_fixed_signatures_generic2( // for completeness). // We also add one non-htlc input (pool decommissioning), so that signatures differ for different // input commitment versions. -pub fn test_fixed_signatures_generic_htlc_refunding( +pub async fn test_fixed_signatures_generic_htlc_refunding( rng: &mut (impl Rng + CryptoRng), input_commitments_version: SighashInputCommitmentVersion, make_signer: MkS, @@ -1186,7 +1191,7 @@ pub fn test_fixed_signatures_generic_htlc_refunding( .build(), ); - let db = Arc::new(Store::new(DefaultBackend::new_in_memory()).unwrap()); + let mut db = Store::new(DefaultBackend::new_in_memory()).unwrap(); let mut db_tx = db.transaction_rw_unlocked(None).unwrap(); let mut account1 = account_from_mnemonic(&chain_config, &mut db_tx, DEFAULT_ACCOUNT_INDEX); @@ -1380,9 +1385,10 @@ pub fn test_fixed_signatures_generic_htlc_refunding( ptx, &TokensAdditionalInfo::new(), account1.key_chain(), - &db_tx, + &mut db_tx, tx_block_height, ) + .await .unwrap(); assert!(ptx.all_signatures_available()); @@ -1395,9 +1401,10 @@ pub fn test_fixed_signatures_generic_htlc_refunding( ptx, &TokensAdditionalInfo::new(), account2.key_chain(), - &db_tx, + &mut db_tx, tx_block_height, ) + .await .unwrap(); assert!(ptx.all_signatures_available()); @@ -1687,7 +1694,7 @@ fn make_htlc_uniparty_pub_key_hash_refund_sig( StandardInputSignature::new(SigHashType::ALL.try_into().unwrap(), spend.encode()) } -fn new_dest_from_account( +fn new_dest_from_account( account: &mut Account, db_tx: &mut impl TransactionRwUnlocked, purpose: KeyPurpose, @@ -1695,7 +1702,7 @@ fn new_dest_from_account( account.get_new_address(db_tx, purpose).unwrap().1.into_object() } -fn new_pub_key_from_account( +fn new_pub_key_from_account( account: &mut Account, db_tx: &mut impl TransactionRwUnlocked, purpose: KeyPurpose, @@ -1704,7 +1711,7 @@ fn new_pub_key_from_account( find_pub_key_for_pkh_dest(&dest, &*account) } -fn find_pub_key_for_pkh_dest( +fn find_pub_key_for_pkh_dest( dest: &Destination, account: &Account, ) -> PublicKey { diff --git a/wallet/src/signer/tests/generic_tests.rs b/wallet/src/signer/tests/generic_tests.rs index a12b90dd56..12515d07c2 100644 --- a/wallet/src/signer/tests/generic_tests.rs +++ b/wallet/src/signer/tests/generic_tests.rs @@ -85,7 +85,7 @@ pub enum MessageToSign { Predefined(Vec), } -pub fn test_sign_message_generic( +pub async fn test_sign_message_generic( rng: &mut (impl Rng + CryptoRng), message_to_sign: MessageToSign, make_signer: MkS1, @@ -97,7 +97,7 @@ pub fn test_sign_message_generic( S2: Signer, { let chain_config = Arc::new(create_regtest()); - let db = Arc::new(Store::new(DefaultBackend::new_in_memory()).unwrap()); + let mut db = Store::new(DefaultBackend::new_in_memory()).unwrap(); let mut db_tx = db.transaction_rw_unlocked(None).unwrap(); let mut account = account_from_mnemonic(&chain_config, &mut db_tx, DEFAULT_ACCOUNT_INDEX); @@ -143,7 +143,8 @@ pub fn test_sign_message_generic( let mut signer = make_signer(chain_config.clone(), account.account_index()); let res = signer - .sign_challenge(&message, &destination, account.key_chain(), &db_tx) + .sign_challenge(&message, &destination, account.key_chain(), &mut db_tx) + .await .unwrap(); res.verify_signature(&chain_config, &destination, &message_challenge).unwrap(); @@ -152,7 +153,8 @@ pub fn test_sign_message_generic( make_another_signer(chain_config.clone(), account.account_index()); let another_res = another_signer - .sign_challenge(&message, &destination, account.key_chain(), &db_tx) + .sign_challenge(&message, &destination, account.key_chain(), &mut db_tx) + .await .unwrap(); another_res .verify_signature(&chain_config, &destination, &message_challenge) @@ -174,14 +176,15 @@ pub fn test_sign_message_generic( &message, &random_pk_destination, account.key_chain(), - &db_tx, + &mut db_tx, ) + .await .unwrap_err(); assert_eq!(err, SignerError::DestinationNotFromThisWallet); } -pub fn test_sign_transaction_intent_generic( +pub async fn test_sign_transaction_intent_generic( rng: &mut (impl Rng + CryptoRng), make_signer: MkS1, make_another_signer: Option, @@ -192,7 +195,7 @@ pub fn test_sign_transaction_intent_generic( S2: Signer, { let chain_config = Arc::new(create_regtest()); - let db = Arc::new(Store::new(DefaultBackend::new_in_memory()).unwrap()); + let mut db = Store::new(DefaultBackend::new_in_memory()).unwrap(); let mut db_tx = db.transaction_rw_unlocked(None).unwrap(); let mut account = account_from_mnemonic(&chain_config, &mut db_tx, DEFAULT_ACCOUNT_INDEX); @@ -259,8 +262,9 @@ pub fn test_sign_transaction_intent_generic( &input_destinations, &intent, account.key_chain(), - &db_tx, + &mut db_tx, ) + .await .unwrap(); res.verify(&chain_config, &input_destinations, &expected_signed_message) .unwrap(); @@ -273,8 +277,9 @@ pub fn test_sign_transaction_intent_generic( &input_destinations, &intent, account.key_chain(), - &db_tx, + &mut db_tx, ) + .await .unwrap(); another_res .verify(&chain_config, &input_destinations, &expected_signed_message) @@ -294,14 +299,15 @@ pub fn test_sign_transaction_intent_generic( &input_destinations, &intent, account.key_chain(), - &db_tx, + &mut db_tx, ) + .await .unwrap_err(); assert_eq!(err, SignerError::DestinationNotFromThisWallet); } -pub fn test_sign_transaction_generic( +pub async fn test_sign_transaction_generic( rng: &mut (impl Rng + CryptoRng), input_commitments_version: SighashInputCommitmentVersion, make_signer: MkS1, @@ -346,7 +352,7 @@ pub fn test_sign_transaction_generic( .build(), ); - let db = Arc::new(Store::new(DefaultBackend::new_in_memory()).unwrap()); + let mut db = Store::new(DefaultBackend::new_in_memory()).unwrap(); let mut db_tx = db.transaction_rw_unlocked(None).unwrap(); let mut account = account_from_mnemonic(&chain_config, &mut db_tx, DEFAULT_ACCOUNT_INDEX); @@ -794,9 +800,10 @@ pub fn test_sign_transaction_generic( orig_ptx.clone(), &tokens_additional_info, account.key_chain(), - &db_tx, + &mut db_tx, tx_block_height, ) + .await .unwrap(); if first_account_can_sign_htlc { assert!(ptx.all_signatures_available()); @@ -809,9 +816,10 @@ pub fn test_sign_transaction_generic( orig_ptx, &tokens_additional_info, account.key_chain(), - &db_tx, + &mut db_tx, tx_block_height, ) + .await .unwrap(); if first_account_can_sign_htlc { assert!(another_ptx.all_signatures_available()); @@ -885,9 +893,10 @@ pub fn test_sign_transaction_generic( orig_ptx.clone(), &tokens_additional_info, account2.key_chain(), - &db_tx, + &mut db_tx, tx_block_height, ) + .await .unwrap(); assert!(ptx.all_signatures_available()); @@ -899,9 +908,10 @@ pub fn test_sign_transaction_generic( orig_ptx, &tokens_additional_info, account2.key_chain(), - &db_tx, + &mut db_tx, tx_block_height, ) + .await .unwrap(); assert!(another_ptx.all_signatures_available()); diff --git a/wallet/src/signer/trezor_signer/mod.rs b/wallet/src/signer/trezor_signer/mod.rs index 27edc9f132..4d2e241a4d 100644 --- a/wallet/src/signer/trezor_signer/mod.rs +++ b/wallet/src/signer/trezor_signer/mod.rs @@ -18,6 +18,7 @@ use std::{ sync::{Arc, Mutex}, }; +use async_trait::async_trait; use itertools::{izip, Itertools}; use common::{ @@ -185,7 +186,7 @@ pub struct TrezorSigner { chain_config: Arc, client: Arc>, session_id: Vec, - sig_aux_data_provider: Mutex>, + sig_aux_data_provider: Mutex>, } impl TrezorSigner { @@ -206,7 +207,7 @@ impl TrezorSigner { chain_config: Arc, client: Arc>, session_id: Vec, - sig_aux_data_provider: Box, + sig_aux_data_provider: Box, ) -> Self { Self { chain_config, @@ -222,7 +223,7 @@ impl TrezorSigner { /// the function will attempt to reconnect to the Trezor device once before returning an error. fn check_session( &mut self, - db_tx: &impl WalletStorageReadLocked, + db_tx: &mut impl WalletStorageReadLocked, key_chain: &impl AccountKeyChains, ) -> SignerResult<()> { let mut client = self.client.lock().expect("poisoned lock"); @@ -260,7 +261,7 @@ impl TrezorSigner { fn perform_trezor_operation( &mut self, operation: F, - db_tx: &impl WalletStorageReadLocked, + db_tx: &mut impl WalletStorageReadLocked, key_chain: &impl AccountKeyChains, ) -> SignerResult where @@ -508,13 +509,14 @@ fn find_trezor_device_from_db( } } +#[async_trait] impl Signer for TrezorSigner { - fn sign_tx( + async fn sign_tx( &mut self, ptx: PartiallySignedTransaction, tokens_additional_info: &TokensAdditionalInfo, - key_chain: &impl AccountKeyChains, - db_tx: &impl WalletStorageReadUnlocked, + key_chain: &(impl AccountKeyChains + Sync), + mut db_tx: impl WalletStorageReadUnlocked + Send, block_height: BlockHeight, ) -> SignerResult<( PartiallySignedTransaction, @@ -526,7 +528,7 @@ impl Signer for TrezorSigner { tokens_additional_info, key_chain, &self.chain_config, - db_tx, + &db_tx, )?; let outputs = self.to_trezor_output_msgs(&ptx, tokens_additional_info)?; let utxos = to_trezor_utxo_msgs(&ptx, tokens_additional_info, &self.chain_config)?; @@ -557,7 +559,7 @@ impl Signer for TrezorSigner { input_commitment_version, ) }, - db_tx, + &mut db_tx, key_chain, )?; @@ -749,12 +751,12 @@ impl Signer for TrezorSigner { Ok((ptx.with_witnesses(witnesses)?, prev_statuses, new_statuses)) } - fn sign_challenge( + async fn sign_challenge( &mut self, message: &[u8], destination: &Destination, - key_chain: &impl AccountKeyChains, - db_tx: &impl WalletStorageReadUnlocked, + key_chain: &(impl AccountKeyChains + Sync), + mut db_tx: impl WalletStorageReadUnlocked + Send, ) -> SignerResult { let data = match key_chain.find_public_key(destination) { Some(FoundPubKey::Hierarchy(xpub)) => { @@ -796,7 +798,7 @@ impl Signer for TrezorSigner { message.to_vec(), ) }, - db_tx, + &mut db_tx, key_chain, )?; @@ -846,13 +848,13 @@ impl Signer for TrezorSigner { Ok(sig) } - fn sign_transaction_intent( + async fn sign_transaction_intent( &mut self, transaction: &Transaction, input_destinations: &[Destination], intent: &str, - key_chain: &impl AccountKeyChains, - db_tx: &impl WalletStorageReadUnlocked, + key_chain: &(impl AccountKeyChains + Sync), + mut db_tx: impl WalletStorageReadUnlocked + Send, ) -> SignerResult { let tx_id = transaction.get_id(); let message_to_sign = SignedTransactionIntent::get_message_to_sign(intent, &tx_id); @@ -860,7 +862,9 @@ impl Signer for TrezorSigner { let mut signatures = Vec::with_capacity(input_destinations.len()); for dest in input_destinations { let dest = SignedTransactionIntent::normalize_destination(dest); - let sig = self.sign_challenge(message_to_sign.as_bytes(), &dest, key_chain, db_tx)?; + let sig = self + .sign_challenge(message_to_sign.as_bytes(), &dest, key_chain, &mut db_tx) + .await?; signatures.push(sig.into_raw()); } diff --git a/wallet/src/signer/trezor_signer/tests.rs b/wallet/src/signer/trezor_signer/tests.rs index 4cdcee8437..402f72b753 100644 --- a/wallet/src/signer/trezor_signer/tests.rs +++ b/wallet/src/signer/trezor_signer/tests.rs @@ -76,7 +76,8 @@ pub fn make_deterministic_trezor_signer( #[trace] #[serial] #[case(Seed::from_entropy())] -fn test_sign_message( +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_sign_message( #[case] seed: Seed, #[values( MessageToSign::Random, @@ -97,21 +98,23 @@ fn test_sign_message( message_to_sign, make_trezor_signer, no_another_signer(), - ); + ) + .await; } #[rstest] #[trace] #[serial] #[case(Seed::from_entropy())] -fn test_sign_transaction_intent(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_sign_transaction_intent(#[case] seed: Seed) { log::debug!("test_sign_transaction_intent, seed = {seed:?}"); let _join_guard = maybe_spawn_auto_confirmer(); let mut rng = make_seedable_rng(seed); - test_sign_transaction_intent_generic(&mut rng, make_trezor_signer, no_another_signer()); + test_sign_transaction_intent_generic(&mut rng, make_trezor_signer, no_another_signer()).await; } #[rstest] @@ -121,7 +124,8 @@ fn test_sign_transaction_intent(#[case] seed: Seed) { #[trace] #[serial] #[case(Seed::from_entropy(), SighashInputCommitmentVersion::V1)] -fn test_sign_transaction( +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_sign_transaction( #[case] seed: Seed, #[case] input_commitments_version: SighashInputCommitmentVersion, ) { @@ -136,21 +140,23 @@ fn test_sign_transaction( input_commitments_version, make_trezor_signer, no_another_signer(), - ); + ) + .await; } #[rstest] #[trace] #[serial] #[case(Seed::from_entropy())] -fn test_fixed_signatures(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_fixed_signatures(#[case] seed: Seed) { log::debug!("test_fixed_signatures, seed = {seed:?}"); let _join_guard = maybe_spawn_auto_confirmer(); let mut rng = make_seedable_rng(seed); - test_fixed_signatures_generic(&mut rng, make_deterministic_trezor_signer); + test_fixed_signatures_generic(&mut rng, make_deterministic_trezor_signer).await; } #[rstest] @@ -160,7 +166,8 @@ fn test_fixed_signatures(#[case] seed: Seed) { #[trace] #[serial] #[case(Seed::from_entropy(), SighashInputCommitmentVersion::V1)] -fn test_fixed_signatures2( +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_fixed_signatures2( #[case] seed: Seed, #[case] input_commitments_version: SighashInputCommitmentVersion, ) { @@ -174,7 +181,8 @@ fn test_fixed_signatures2( &mut rng, input_commitments_version, make_deterministic_trezor_signer, - ); + ) + .await; } #[rstest] @@ -184,7 +192,8 @@ fn test_fixed_signatures2( #[trace] #[serial] #[case(Seed::from_entropy(), SighashInputCommitmentVersion::V1)] -fn test_fixed_signatures_htlc_refunding( +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_fixed_signatures_htlc_refunding( #[case] seed: Seed, #[case] input_commitments_version: SighashInputCommitmentVersion, ) { @@ -198,14 +207,16 @@ fn test_fixed_signatures_htlc_refunding( &mut rng, input_commitments_version, make_deterministic_trezor_signer, - ); + ) + .await; } #[rstest] #[trace] #[serial] #[case(Seed::from_entropy())] -fn test_sign_message_sig_consistency(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_sign_message_sig_consistency(#[case] seed: Seed) { log::debug!("test_sign_message_sig_consistency, seed = {seed:?}"); let _join_guard = maybe_spawn_auto_confirmer(); @@ -217,14 +228,16 @@ fn test_sign_message_sig_consistency(#[case] seed: Seed) { MessageToSign::Random, make_deterministic_trezor_signer, Some(make_deterministic_software_signer), - ); + ) + .await; } #[rstest] #[trace] #[serial] #[case(Seed::from_entropy())] -fn test_sign_transaction_intent_sig_consistency(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_sign_transaction_intent_sig_consistency(#[case] seed: Seed) { log::debug!("test_sign_transaction_intent_sig_consistency, seed = {seed:?}"); let _join_guard = maybe_spawn_auto_confirmer(); @@ -235,7 +248,8 @@ fn test_sign_transaction_intent_sig_consistency(#[case] seed: Seed) { &mut rng, make_deterministic_trezor_signer, Some(make_deterministic_software_signer), - ); + ) + .await; } #[rstest] @@ -245,7 +259,8 @@ fn test_sign_transaction_intent_sig_consistency(#[case] seed: Seed) { #[trace] #[serial] #[case(Seed::from_entropy(), SighashInputCommitmentVersion::V1)] -fn test_sign_transaction_sig_consistency( +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_sign_transaction_sig_consistency( #[case] seed: Seed, #[case] input_commitments_version: SighashInputCommitmentVersion, ) { @@ -260,5 +275,6 @@ fn test_sign_transaction_sig_consistency( input_commitments_version, make_deterministic_trezor_signer, Some(make_deterministic_software_signer), - ); + ) + .await; } diff --git a/wallet/src/wallet/mod.rs b/wallet/src/wallet/mod.rs index a875cb9bd3..cf95222bec 100644 --- a/wallet/src/wallet/mod.rs +++ b/wallet/src/wallet/mod.rs @@ -354,7 +354,7 @@ impl WalletCreation { impl Wallet where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, P: SignerProvider, { pub fn create_new_wallet) -> WalletResult

>( @@ -384,7 +384,7 @@ where fn new_wallet) -> WalletResult

>( chain_config: Arc, - db: Store, + mut db: Store, wallet_type: WalletType, signer_provider: F, ) -> WalletResult> { @@ -442,7 +442,7 @@ where /// * save the chain info in the DB based on the chain type specified by the user /// * reset transactions fn migration_v2( - db: &Store, + db: &mut Store, chain_config: Arc, signer_provider: &mut P, ) -> WalletResult<()> { @@ -469,7 +469,7 @@ where /// Migrate the wallet DB from version 2 to version 3 /// * reset transactions as now we store SignedTransaction instead of Transaction in WalletTx - fn migration_v3(db: &Store, chain_config: Arc) -> WalletResult<()> { + fn migration_v3(db: &mut Store, chain_config: Arc) -> WalletResult<()> { let mut db_tx = db.transaction_rw_unlocked(None)?; // reset wallet transaction as now we will need to rescan the blockchain to store the // correct order of the transactions to avoid bugs in loading them in the wrong order @@ -487,7 +487,7 @@ where /// Migrate the wallet DB from version 3 to version 4 /// * set lookahead_size in the DB - fn migration_v4(db: &Store) -> WalletResult<()> { + fn migration_v4(db: &mut Store) -> WalletResult<()> { let mut db_tx = db.transaction_rw_unlocked(None)?; db_tx.set_lookahead_size(LOOKAHEAD_SIZE)?; @@ -504,7 +504,7 @@ where /// Migrate the wallet DB from version 4 to version 5 /// * set vrf key_chain usage fn migration_v5( - db: &Store, + db: &mut Store, chain_config: Arc, signer_provider: &P, ) -> WalletResult<()> { @@ -552,7 +552,7 @@ where Ok(()) } - fn migration_v6(db: &Store, _chain_config: Arc) -> WalletResult<()> { + fn migration_v6(db: &mut Store, _chain_config: Arc) -> WalletResult<()> { let mut db_tx = db.transaction_rw(None)?; // nothing to do the seed phrase na passphrase are backwards compatible db_tx.set_storage_version(WALLET_VERSION_V6)?; @@ -566,7 +566,7 @@ where } fn migration_v7( - db: &Store, + db: &mut Store, chain_config: Arc, controller_mode: WalletControllerMode, ) -> WalletResult<()> { @@ -598,7 +598,7 @@ where F: Fn(u32) -> Result<(), WalletError>, F2: FnOnce(&StoreTxRo) -> WalletResult

, >( - db: &Store, + db: &mut Store, chain_config: Arc, pre_migration: F, controller_mode: WalletControllerMode, @@ -671,7 +671,7 @@ where Ok(()) } - fn migrate_cold_to_hot_wallet(db: &Store) -> WalletResult<()> { + fn migrate_cold_to_hot_wallet(db: &mut Store) -> WalletResult<()> { let mut db_tx = db.transaction_rw(None)?; db_tx.set_wallet_type(WalletType::Hot)?; db_tx.commit()?; @@ -679,7 +679,7 @@ where } fn migrate_hot_to_cold_wallet( - db: &Store, + db: &mut Store, chain_config: Arc, signer_provider: &P, ) -> WalletResult<()> { @@ -692,7 +692,7 @@ where fn force_migrate_wallet_type( wallet_type: WalletType, - db: &Store, + db: &mut Store, chain_config: Arc, signer_provider: &P, ) -> Result<(), WalletError> { @@ -833,7 +833,7 @@ where } let signer_provider = match Self::check_and_migrate_db( - &db, + &mut db, chain_config.clone(), pre_migration, controller_mode, @@ -850,7 +850,7 @@ where if force_change_wallet_type { Self::force_migrate_wallet_type( controller_mode.into(), - &db, + &mut db, chain_config.clone(), &signer_provider, )?; @@ -908,7 +908,7 @@ where self.db.transaction_ro_unlocked()?.get_seed_phrase().map_err(WalletError::from) } - pub fn delete_seed_phrase(&self) -> WalletResult> { + pub fn delete_seed_phrase(&mut self) -> WalletResult> { let mut tx = self.db.transaction_rw_unlocked(None)?; let seed_phrase = tx.del_seed_phrase().map_err(WalletError::from)?; tx.commit()?; @@ -1117,8 +1117,9 @@ where } Err(err) => { db_tx.abort(); - // In case of an error reload the keys in case the operation issued new ones and - // are saved in the cache but not in the DB + // In case of an error we should reload the keys, in the case that the operation has issued new ones; + // we do this to prevent exhausting the keys from many failed operations, and to + // keep the cache in sync with the DB, as the DB transaction will roll back. let db_tx = self.db.transaction_ro()?; account.reload_keys(&db_tx)?; Err(err) @@ -1126,7 +1127,66 @@ where } } - fn for_account_rw_unlocked_and_check_tx_generic( + async fn async_for_account_rw_unlocked( + &mut self, + account_index: U31, + create_request: impl FnOnce(&mut Account, &mut StoreTxRwUnlocked) -> R, + sign_request: impl AsyncFnOnce( + R, + &P::K, + &mut StoreTxRwUnlocked, + Arc, +

::S, + ) -> WalletResult + + Send, + ) -> WalletResult { + let account = Self::get_account_mut(&mut self.accounts, account_index)?; + let mut db_tx = self.db.transaction_rw_unlocked(None)?; + let result = create_request(account, &mut db_tx); + let signer = self.signer_provider.provide(self.chain_config.clone(), account_index); + let config = self.chain_config.clone(); + let result = sign_request(result, account.key_chain(), &mut db_tx, config, signer).await; + + match result { + Ok(value) => { + // Abort the process if the DB transaction fails. See `for_account_rw` for more information. + db_tx.commit().expect("RW transaction commit failed unexpectedly"); + Ok(value) + } + Err(err) => { + db_tx.abort(); + // In case of an error we should reload the keys, in the case that the operation has issued new ones; + // we do this to prevent exhausting the keys from many failed operations, and to + // keep the cache in sync with the DB, as the DB transaction will roll back. + let db_tx = self.db.transaction_ro()?; + account.reload_keys(&db_tx)?; + Err(err) + } + } + } + + async fn async_for_account_key_chain_rw_unlocked( + &mut self, + account_index: U31, + f: impl AsyncFnOnce( + &P::K, + &mut StoreTxRwUnlocked, + Arc, +

::S, + ) -> WalletResult + + Send, + ) -> WalletResult { + self.async_for_account_rw_unlocked( + account_index, + |_, _| (), + async move |_, key_chain, db_tx, chain_config, signer| { + f(key_chain, db_tx, chain_config, signer).await + }, + ) + .await + } + + async fn async_for_account_rw_unlocked_and_check_tx_custom_error( &mut self, account_index: U31, additional_info: TxAdditionalInfo, @@ -1134,32 +1194,33 @@ where &mut Account, &mut StoreTxRwUnlocked, ) -> WalletResult<(SendRequest, AddlData)>, - error_mapper: impl FnOnce(WalletError) -> WalletError, + error_mapper: impl FnOnce(WalletError) -> WalletError + Send, ) -> WalletResult<(SignedTxWithFees, AddlData)> { let (_, best_block_height) = self.get_best_block_for_account(account_index)?; let next_block_height = best_block_height.next_height(); - self.for_account_rw_unlocked( + self.async_for_account_rw_unlocked( account_index, - |account, db_tx, chain_config, signer_provider| { - let (mut request, additional_data) = f(account, db_tx)?; + f, + async move |request, key_chain, store, chain_config, mut signer| { + let (mut request, additional_data) = request?; + let fees = request.get_fees(); let ptx = request.into_partially_signed_tx(additional_info.ptx_additional_info)?; - let mut signer = - signer_provider.provide(Arc::new(chain_config.clone()), account_index); let ptx = signer .sign_tx( ptx, &additional_info.tokens_additional_info, - account.key_chain(), - db_tx, + key_chain, + store, next_block_height, ) + .await .map(|(ptx, _, _)| ptx)?; let input_commitments = - ptx.make_sighash_input_commitments_at_height(chain_config, next_block_height)?; + ptx.make_sighash_input_commitments_at_height(&chain_config, next_block_height)?; let is_fully_signed = ptx.destinations().iter().enumerate().zip(ptx.witnesses()).all( @@ -1169,7 +1230,7 @@ where let input_utxo = ptx.input_utxos()[i].clone(); tx_verifier::input_check::signature_only_check::verify_tx_signature( - chain_config, + &chain_config, destination, &ptx, &input_commitments, @@ -1189,27 +1250,28 @@ where let tx = ptx.into_signed_tx().map_err(|e| error_mapper(e.into()))?; - check_transaction(chain_config, next_block_height, &tx)?; + check_transaction(&chain_config, next_block_height, &tx)?; let tx = SignedTxWithFees { tx, fees }; Ok((tx, additional_data)) }, ) + .await } - fn for_account_rw_unlocked_and_check_tx_with_fees( + async fn async_for_account_rw_unlocked_and_check_tx( &mut self, account_index: U31, additional_info: TxAdditionalInfo, f: impl FnOnce(&mut Account, &mut StoreTxRwUnlocked) -> WalletResult, ) -> WalletResult { - Ok(self - .for_account_rw_unlocked_and_check_tx_generic( - account_index, - additional_info, - |account, db_tx| Ok((f(account, db_tx)?, ())), - |err| err, - )? - .0) + self.async_for_account_rw_unlocked_and_check_tx_custom_error( + account_index, + additional_info, + |account, db_tx| Ok((f(account, db_tx)?, ())), + |err| err, + ) + .await + .map(|(tx, _)| tx) } fn get_account(&self, account_index: U31) -> WalletResult<&Account> { @@ -1555,7 +1617,7 @@ where /// /// A `WalletResult` containing the signed transaction if successful, or an error indicating the reason for failure. #[allow(clippy::too_many_arguments)] - pub fn create_transaction_to_addresses( + pub async fn create_transaction_to_addresses( &mut self, account_index: U31, outputs: impl IntoIterator, @@ -1575,14 +1637,15 @@ where consolidate_fee_rate, |_s| (), additional_info, - )? + ) + .await? .0) } /// Same as `create_transaction_to_addresses`, but it also allows to specify the "intent" for the transaction, /// which will be concatenated with the transaction id and signed with all the keys used to sign the transaction's inputs. #[allow(clippy::too_many_arguments)] - pub fn create_transaction_to_addresses_with_intent( + pub async fn create_transaction_to_addresses_with_intent( &mut self, account_index: U31, outputs: impl IntoIterator, @@ -1593,38 +1656,43 @@ where consolidate_fee_rate: FeeRate, additional_info: TxAdditionalInfo, ) -> WalletResult<(SignedTxWithFees, SignedTransactionIntent)> { - let (signed_tx, input_destinations) = self.create_transaction_to_addresses_impl( - account_index, - outputs, - inputs, - change_addresses, - current_fee_rate, - consolidate_fee_rate, - |send_request| send_request.destinations().to_owned(), - additional_info, - )?; + let (signed_tx, input_destinations) = self + .create_transaction_to_addresses_impl( + account_index, + outputs, + inputs, + change_addresses, + current_fee_rate, + consolidate_fee_rate, + |send_request| send_request.destinations().to_owned(), + additional_info, + ) + .await?; - let signed_intent = self.for_account_rw_unlocked( - account_index, - |account, db_tx, chain_config, signer_provider| { - let mut signer = - signer_provider.provide(Arc::new(chain_config.clone()), account_index); - - Ok(signer.sign_transaction_intent( - signed_tx.tx.transaction(), - &input_destinations, - &intent, - account.key_chain(), - db_tx, - )?) - }, - )?; + let transaction = signed_tx.transaction(); + let signed_intent = self + .async_for_account_key_chain_rw_unlocked( + account_index, + async move |key_chain, store, _chain_config, mut signer| { + signer + .sign_transaction_intent( + transaction, + &input_destinations, + &intent, + key_chain, + store, + ) + .await + .map_err(Into::into) + }, + ) + .await?; Ok((signed_tx, signed_intent)) } #[allow(clippy::too_many_arguments)] - fn create_transaction_to_addresses_impl( + async fn create_transaction_to_addresses_impl( &mut self, account_index: U31, outputs: impl IntoIterator, @@ -1637,7 +1705,7 @@ where ) -> WalletResult<(SignedTxWithFees, AddlData)> { let request = SendRequest::new().with_outputs(outputs); let latest_median_time = self.latest_median_time; - self.for_account_rw_unlocked_and_check_tx_generic( + self.async_for_account_rw_unlocked_and_check_tx_custom_error( account_index, additional_info, |account, db_tx| { @@ -1658,6 +1726,7 @@ where }, |err| err, ) + .await } #[allow(clippy::too_many_arguments)] @@ -1691,7 +1760,7 @@ where }) } - pub fn create_sweep_transaction( + pub async fn create_sweep_transaction( &mut self, account_index: U31, destination: Destination, @@ -1706,14 +1775,15 @@ where &|_| None, )?; - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, additional_info, |account, _| account.sweep_addresses(destination, request, current_fee_rate), ) + .await } - pub fn create_sweep_from_delegation_transaction( + pub async fn create_sweep_from_delegation_transaction( &mut self, account_index: U31, address: Address, @@ -1721,16 +1791,17 @@ where delegation_share: Amount, current_fee_rate: FeeRate, ) -> WalletResult { - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, TxAdditionalInfo::new(), |account, _| { account.sweep_delegation(address, delegation_id, delegation_share, current_fee_rate) }, ) + .await } - pub fn create_transaction_to_addresses_from_delegation( + pub async fn create_transaction_to_addresses_from_delegation( &mut self, account_index: U31, address: Address, @@ -1739,7 +1810,7 @@ where delegation_share: Amount, current_fee_rate: FeeRate, ) -> WalletResult { - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, TxAdditionalInfo::new(), |account, _| { @@ -1752,9 +1823,10 @@ where ) }, ) + .await } - pub fn mint_tokens( + pub async fn mint_tokens( &mut self, account_index: U31, token_info: &UnconfirmedTokenInfo, @@ -1765,7 +1837,7 @@ where ) -> WalletResult { let latest_median_time = self.latest_median_time; let additional_info = to_token_additional_info(token_info); - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, additional_info, |account, db_tx| { @@ -1782,9 +1854,10 @@ where ) }, ) + .await } - pub fn unmint_tokens( + pub async fn unmint_tokens( &mut self, account_index: U31, token_info: &UnconfirmedTokenInfo, @@ -1794,7 +1867,7 @@ where ) -> WalletResult { let latest_median_time = self.latest_median_time; let additional_info = to_token_additional_info(token_info); - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, additional_info, |account, db_tx| { @@ -1810,9 +1883,10 @@ where ) }, ) + .await } - pub fn lock_token_supply( + pub async fn lock_token_supply( &mut self, account_index: U31, token_info: &UnconfirmedTokenInfo, @@ -1821,7 +1895,7 @@ where ) -> WalletResult { let latest_median_time = self.latest_median_time; let additional_info = to_token_additional_info(token_info); - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, additional_info, |account, db_tx| { @@ -1836,9 +1910,10 @@ where ) }, ) + .await } - pub fn freeze_token( + pub async fn freeze_token( &mut self, account_index: U31, token_info: &UnconfirmedTokenInfo, @@ -1848,7 +1923,7 @@ where ) -> WalletResult { let latest_median_time = self.latest_median_time; let additional_info = to_token_additional_info(token_info); - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, additional_info, |account, db_tx| { @@ -1864,9 +1939,10 @@ where ) }, ) + .await } - pub fn unfreeze_token( + pub async fn unfreeze_token( &mut self, account_index: U31, token_info: &UnconfirmedTokenInfo, @@ -1875,7 +1951,7 @@ where ) -> WalletResult { let latest_median_time = self.latest_median_time; let additional_info = to_token_additional_info(token_info); - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, additional_info, |account, db_tx| { @@ -1890,9 +1966,10 @@ where ) }, ) + .await } - pub fn change_token_authority( + pub async fn change_token_authority( &mut self, account_index: U31, token_info: &UnconfirmedTokenInfo, @@ -1902,7 +1979,7 @@ where ) -> WalletResult { let latest_median_time = self.latest_median_time; let additional_info = to_token_additional_info(token_info); - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, additional_info, |account, db_tx| { @@ -1918,9 +1995,10 @@ where ) }, ) + .await } - pub fn change_token_metadata_uri( + pub async fn change_token_metadata_uri( &mut self, account_index: U31, token_info: &UnconfirmedTokenInfo, @@ -1930,7 +2008,7 @@ where ) -> WalletResult { let latest_median_time = self.latest_median_time; let additional_info = to_token_additional_info(token_info); - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, additional_info, |account, db_tx| { @@ -1946,6 +2024,7 @@ where ) }, ) + .await } pub fn find_used_tokens( @@ -1965,27 +2044,29 @@ where self.get_account(account_index)?.get_token_unconfirmed_info(token_info) } - pub fn create_delegation( + pub async fn create_delegation( &mut self, account_index: U31, outputs: Vec, current_fee_rate: FeeRate, consolidate_fee_rate: FeeRate, ) -> WalletResult<(DelegationId, SignedTxWithFees)> { - let tx = self.create_transaction_to_addresses( - account_index, - outputs, - SelectedInputs::Utxos(vec![]), - BTreeMap::new(), - current_fee_rate, - consolidate_fee_rate, - TxAdditionalInfo::new(), - )?; + let tx = self + .create_transaction_to_addresses( + account_index, + outputs, + SelectedInputs::Utxos(vec![]), + BTreeMap::new(), + current_fee_rate, + consolidate_fee_rate, + TxAdditionalInfo::new(), + ) + .await?; let delegation_id = make_delegation_id(tx.transaction().inputs())?; Ok((delegation_id, tx)) } - pub fn issue_new_token( + pub async fn issue_new_token( &mut self, account_index: U31, token_issuance: TokenIssuance, @@ -1994,15 +2075,17 @@ where ) -> WalletResult<(TokenId, SignedTxWithFees)> { let outputs = make_issue_token_outputs(token_issuance, self.chain_config.as_ref())?; - let tx = self.create_transaction_to_addresses( - account_index, - outputs, - SelectedInputs::Utxos(vec![]), - BTreeMap::new(), - current_fee_rate, - consolidate_fee_rate, - TxAdditionalInfo::new(), - )?; + let tx = self + .create_transaction_to_addresses( + account_index, + outputs, + SelectedInputs::Utxos(vec![]), + BTreeMap::new(), + current_fee_rate, + consolidate_fee_rate, + TxAdditionalInfo::new(), + ) + .await?; let token_id = make_token_id( self.chain_config.as_ref(), self.get_best_block_for_account(account_index)?.1.next_height(), @@ -2011,7 +2094,7 @@ where Ok((token_id, tx)) } - pub fn issue_new_nft( + pub async fn issue_new_nft( &mut self, account_index: U31, address: Address, @@ -2022,24 +2105,26 @@ where let destination = address.into_object(); let latest_median_time = self.latest_median_time; - let signed_transaction = self.for_account_rw_unlocked_and_check_tx_with_fees( - account_index, - TxAdditionalInfo::new(), - |account, db_tx| { - account.create_issue_nft_tx( - db_tx, - IssueNftArguments { - metadata, - destination, - }, - latest_median_time, - CurrentFeeRate { - current_fee_rate, - consolidate_fee_rate, - }, - ) - }, - )?; + let signed_transaction = self + .async_for_account_rw_unlocked_and_check_tx( + account_index, + TxAdditionalInfo::new(), + |account, db_tx| { + account.create_issue_nft_tx( + db_tx, + IssueNftArguments { + metadata, + destination, + }, + latest_median_time, + CurrentFeeRate { + current_fee_rate, + consolidate_fee_rate, + }, + ) + }, + ) + .await?; let token_id = make_token_id( self.chain_config.as_ref(), @@ -2049,7 +2134,7 @@ where Ok((token_id, signed_transaction)) } - pub fn create_stake_pool_with_vrf_key( + pub async fn create_stake_pool_with_vrf_key( &mut self, account_index: U31, current_fee_rate: FeeRate, @@ -2057,7 +2142,7 @@ where stake_pool_arguments: StakePoolCreationArguments, ) -> WalletResult { let latest_median_time = self.latest_median_time; - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, TxAdditionalInfo::new(), |account, db_tx| { @@ -2072,9 +2157,10 @@ where ) }, ) + .await } - pub fn decommission_stake_pool( + pub async fn decommission_stake_pool( &mut self, account_index: U31, pool_id: PoolId, @@ -2084,28 +2170,27 @@ where ) -> WalletResult { let additional_info = TxAdditionalInfo::new().with_pool_info(pool_id, PoolAdditionalInfo { staker_balance }); - Ok(self - .for_account_rw_unlocked_and_check_tx_generic( - account_index, - additional_info, - |account, db_tx| { - Ok(( - account.decommission_stake_pool( - db_tx, - pool_id, - staker_balance, - output_address, - current_fee_rate, - )?, - (), - )) - }, - |_err| WalletError::PartiallySignedTransactionInDecommissionCommand, - )? - .0) + self.async_for_account_rw_unlocked_and_check_tx_custom_error( + account_index, + additional_info, + |account: &mut Account<

::K>, db_tx| { + account + .decommission_stake_pool( + db_tx, + pool_id, + staker_balance, + output_address, + current_fee_rate, + ) + .map(|r| (r, ())) + }, + |_err| WalletError::PartiallySignedTransactionInDecommissionCommand, + ) + .await + .map(|(tx, _)| tx) } - pub fn decommission_stake_pool_request( + pub async fn decommission_stake_pool_request( &mut self, account_index: U31, pool_id: PoolId, @@ -2118,40 +2203,42 @@ where let ptx_additional_info = PtxAdditionalInfo::new().with_pool_info(pool_id, PoolAdditionalInfo { staker_balance }); - self.for_account_rw_unlocked( + self.async_for_account_rw_unlocked( account_index, - |account, db_tx, chain_config, signer_provider| { - let request = account.decommission_stake_pool_request( + |account, db_tx| { + account.decommission_stake_pool_request( db_tx, pool_id, staker_balance, output_address, current_fee_rate, - )?; - - let ptx = request.into_partially_signed_tx(ptx_additional_info)?; + ) + }, + async move |request, key_chain, store, _chain_config, mut signer| { + let ptx = request?.into_partially_signed_tx(ptx_additional_info)?; - let mut signer = - signer_provider.provide(Arc::new(chain_config.clone()), account_index); let ptx = signer .sign_tx( ptx, &TokensAdditionalInfo::new(), - account.key_chain(), - db_tx, + key_chain, + store, next_block_height, ) + .await .map(|(ptx, _, _)| ptx)?; if ptx.all_signatures_available() { return Err(WalletError::FullySignedTransactionInDecommissionReq); } + Ok(ptx) }, ) + .await } - pub fn create_htlc_tx( + pub async fn create_htlc_tx( &mut self, account_index: U31, output_value: OutputValue, @@ -2161,7 +2248,7 @@ where additional_info: TxAdditionalInfo, ) -> WalletResult { let latest_median_time = self.latest_median_time; - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, additional_info, |account, db_tx| { @@ -2177,6 +2264,7 @@ where ) }, ) + .await } pub fn get_orders( @@ -2188,7 +2276,7 @@ where } #[allow(clippy::too_many_arguments)] - pub fn create_order_tx( + pub async fn create_order_tx( &mut self, account_index: U31, ask_value: OutputValue, @@ -2199,29 +2287,31 @@ where additional_info: TxAdditionalInfo, ) -> WalletResult<(OrderId, SignedTxWithFees)> { let latest_median_time = self.latest_median_time; - let tx = self.for_account_rw_unlocked_and_check_tx_with_fees( - account_index, - additional_info, - |account, db_tx| { - account.create_order_tx( - db_tx, - ask_value, - give_value, - conclude_key, - latest_median_time, - CurrentFeeRate { - current_fee_rate, - consolidate_fee_rate, - }, - ) - }, - )?; + let tx = self + .async_for_account_rw_unlocked_and_check_tx( + account_index, + additional_info, + |account, db_tx| { + account.create_order_tx( + db_tx, + ask_value, + give_value, + conclude_key, + latest_median_time, + CurrentFeeRate { + current_fee_rate, + consolidate_fee_rate, + }, + ) + }, + ) + .await?; let order_id = make_order_id(tx.tx.inputs())?; Ok((order_id, tx)) } #[allow(clippy::too_many_arguments)] - pub fn create_conclude_order_tx( + pub async fn create_conclude_order_tx( &mut self, account_index: U31, order_id: OrderId, @@ -2232,7 +2322,7 @@ where additional_info: TxAdditionalInfo, ) -> WalletResult { let latest_median_time = self.latest_median_time; - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, additional_info, |account, db_tx| { @@ -2249,10 +2339,11 @@ where ) }, ) + .await } #[allow(clippy::too_many_arguments)] - pub fn create_fill_order_tx( + pub async fn create_fill_order_tx( &mut self, account_index: U31, order_id: OrderId, @@ -2264,7 +2355,7 @@ where additional_info: TxAdditionalInfo, ) -> WalletResult { let latest_median_time = self.latest_median_time; - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, additional_info, |account, db_tx| { @@ -2282,9 +2373,10 @@ where ) }, ) + .await } - pub fn create_freeze_order_tx( + pub async fn create_freeze_order_tx( &mut self, account_index: U31, order_id: OrderId, @@ -2294,7 +2386,7 @@ where additional_info: TxAdditionalInfo, ) -> WalletResult { let latest_median_time = self.latest_median_time; - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, additional_info, |account, db_tx| { @@ -2310,9 +2402,10 @@ where ) }, ) + .await } - pub fn sign_raw_transaction( + pub async fn sign_raw_transaction( &mut self, account_index: U31, ptx: PartiallySignedTransaction, @@ -2325,40 +2418,40 @@ where let (_, best_block_height) = self.get_best_block_for_account(account_index)?; let next_block_height = best_block_height.next_height(); - self.for_account_rw_unlocked( + self.async_for_account_key_chain_rw_unlocked( account_index, - |account, db_tx, chain_config, signer_provider| { - let mut signer = - signer_provider.provide(Arc::new(chain_config.clone()), account_index); - - let res = signer.sign_tx( - ptx, - tokens_additional_info, - account.key_chain(), - db_tx, - next_block_height, - )?; - Ok(res) + async move |key_chain, store, _chain_config, mut signer| { + signer + .sign_tx( + ptx, + tokens_additional_info, + key_chain, + store, + next_block_height, + ) + .await + .map_err(Into::into) }, ) + .await } - pub fn sign_challenge( + pub async fn sign_challenge( &mut self, account_index: U31, challenge: &[u8], destination: &Destination, ) -> WalletResult { - self.for_account_rw_unlocked( + self.async_for_account_key_chain_rw_unlocked( account_index, - |account, db_tx, chain_config, signer_provider| { - let mut signer = - signer_provider.provide(Arc::new(chain_config.clone()), account_index); - let msg = - signer.sign_challenge(challenge, destination, account.key_chain(), db_tx)?; - Ok(msg) + async move |key_chain, store, _chain_config, mut signer| { + signer + .sign_challenge(challenge, destination, key_chain, store) + .await + .map_err(Into::into) }, ) + .await } /// Returns the last scanned block hash and height for all accounts. @@ -2540,7 +2633,7 @@ fn to_token_additional_info(token_info: &UnconfirmedTokenInfo) -> TxAdditionalIn impl Wallet where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, P: SignerProvider, { pub fn get_vrf_key( @@ -2568,7 +2661,7 @@ where Ok(account.get_legacy_vrf_public_key()) } - pub fn create_stake_pool( + pub async fn create_stake_pool( &mut self, account_index: U31, current_fee_rate: FeeRate, @@ -2576,7 +2669,7 @@ where stake_pool_arguments: StakePoolCreationArguments, ) -> WalletResult { let latest_median_time = self.latest_median_time; - self.for_account_rw_unlocked_and_check_tx_with_fees( + self.async_for_account_rw_unlocked_and_check_tx( account_index, TxAdditionalInfo::new(), |account, db_tx| { @@ -2591,6 +2684,7 @@ where ) }, ) + .await } pub fn get_pos_gen_block_data( diff --git a/wallet/src/wallet/test_helpers.rs b/wallet/src/wallet/test_helpers.rs index 728d4ee3e9..4a3f8de5c5 100644 --- a/wallet/src/wallet/test_helpers.rs +++ b/wallet/src/wallet/test_helpers.rs @@ -24,6 +24,7 @@ use common::{ chain::{Block, ChainConfig}, primitives::BlockHeight, }; +use wallet_storage::{DefaultBackend, Store}; use wallet_types::{seed_phrase::StoreSeedPhrase, wallet_type::WalletType}; use crate::{ @@ -59,9 +60,44 @@ pub fn create_wallet_with_mnemonic( .unwrap() } +pub fn create_named_in_memory_backend(db_name: &str) -> DefaultBackend { + DefaultBackend::new_named_in_memory(db_name) +} + +pub fn create_named_in_memory_store(db_name: &str) -> Store { + Store::new(create_named_in_memory_backend(db_name)).unwrap() +} + +pub fn create_wallet_with_mnemonic_and_named_db( + chain_config: Arc, + mnemonic: &str, + db_name: &str, +) -> DefaultWallet { + let db = create_named_in_memory_store(db_name); + let genesis_block_id = chain_config.genesis_block_id(); + Wallet::create_new_wallet( + chain_config.clone(), + db, + (BlockHeight::new(0), genesis_block_id), + WalletType::Hot, + |db_tx| { + Ok(SoftwareSignerProvider::new_from_mnemonic( + chain_config, + db_tx, + mnemonic, + None, + StoreSeedPhrase::DoNotStore, + )?) + }, + ) + .unwrap() + .wallet() + .unwrap() +} + pub fn scan_wallet(wallet: &mut Wallet, height: BlockHeight, blocks: Vec) where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, P: SignerProvider, { for account in wallet.get_best_block().keys() { diff --git a/wallet/src/wallet/tests.rs b/wallet/src/wallet/tests.rs index a946a98f27..74a44202cc 100644 --- a/wallet/src/wallet/tests.rs +++ b/wallet/src/wallet/tests.rs @@ -48,6 +48,7 @@ use storage::raw::DbMapId; use test_utils::{ assert_matches, assert_matches_return_val, random::{make_seedable_rng, Seed}, + random_ascii_alphanumeric_string, token_utils::random_token_issuance_v1_with_min_supply, }; use wallet_storage::{schema, WalletStorageEncryptionRead}; @@ -66,7 +67,10 @@ use crate::{ key_chain::{make_account_path, LOOKAHEAD_SIZE}, send_request::{make_address_output, make_create_delegation_output}, signer::software_signer::SoftwareSignerProvider, - wallet::test_helpers::{create_wallet_with_mnemonic, scan_wallet}, + wallet::test_helpers::{ + create_named_in_memory_backend, create_named_in_memory_store, create_wallet_with_mnemonic, + create_wallet_with_mnemonic_and_named_db, scan_wallet, + }, wallet_events::WalletEventsNoOp, DefaultWallet, }; @@ -85,7 +89,7 @@ const NETWORK_FEE: u128 = 10000; fn get_best_block(wallet: &Wallet) -> (Id, BlockHeight) where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, P: SignerProvider, { *wallet.get_best_block().first_key_value().unwrap().1 @@ -180,7 +184,7 @@ fn get_address( fn get_coin_balance_for_acc(wallet: &Wallet, account: U31) -> Amount where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, P: SignerProvider, { let coin_balance = wallet @@ -194,7 +198,7 @@ where fn get_coin_balance_with_inactive(wallet: &Wallet) -> Amount where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, P: SignerProvider, { let coin_balance = wallet @@ -227,7 +231,7 @@ fn get_balance_with( fn get_coin_balance(wallet: &Wallet) -> Amount where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, P: SignerProvider, { get_coin_balance_for_acc(wallet, DEFAULT_ACCOUNT_INDEX) @@ -235,7 +239,7 @@ where fn get_currency_balances(wallet: &Wallet) -> (Amount, Vec<(TokenId, Amount)>) where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, P: SignerProvider, { let mut currency_balances = wallet @@ -259,19 +263,21 @@ where } #[track_caller] -fn verify_wallet_balance( +fn verify_wallet_balance( chain_config: &Arc, wallet: &Wallet, expected_balance: Amount, + reopen_db_func: RDF, ) where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, P: SignerProvider, + RDF: FnOnce() -> Store, { let coin_balance = get_coin_balance(wallet); assert_eq!(coin_balance, expected_balance); // Loading a copy of the wallet from the same DB should be safe because loading is an R/O operation - let db_copy = wallet.db.clone(); + let db_copy = reopen_db_func(); let wallet = Wallet::load_wallet( Arc::clone(chain_config), db_copy, @@ -285,8 +291,6 @@ fn verify_wallet_balance( .wallet() .unwrap(); - wallet.get_best_block(); - let coin_balance = get_coin_balance(&wallet); // Check that the loaded wallet has the same balance assert_eq!(coin_balance, expected_balance); @@ -307,7 +311,7 @@ fn create_block_with_reward_address( address: Destination, ) -> Block where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, P: SignerProvider, { let block1 = Block::new( @@ -332,7 +336,7 @@ fn create_block( block_height: u64, ) -> (Address, Block) where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, P: SignerProvider, { let address = wallet.get_new_address(DEFAULT_ACCOUNT_INDEX).unwrap().1; @@ -352,6 +356,7 @@ fn test_balance_from_genesis( chain_type: ChainType, utxos: Vec, expected_balance: Amount, + rng: &mut impl Rng, ) { let genesis = Genesis::new( String::new(), @@ -366,9 +371,12 @@ fn test_balance_from_genesis( .build(), ); - let wallet = create_wallet(chain_config.clone()); + let db_name = random_ascii_alphanumeric_string(rng, 10..20); + let wallet = create_wallet_with_mnemonic_and_named_db(chain_config.clone(), MNEMONIC, &db_name); - verify_wallet_balance(&chain_config, &wallet, expected_balance); + verify_wallet_balance(&chain_config, &wallet, expected_balance, || { + create_named_in_memory_store(&db_name) + }); } #[test] @@ -428,7 +436,8 @@ fn wallet_migration_to_v2(#[case] seed: Seed) { ); let chain_type = ChainType::Regtest; let chain_config = Arc::new(Builder::new(chain_type).genesis_custom(genesis).build()); - let db = create_wallet_in_memory().unwrap(); + let db_name = random_ascii_alphanumeric_string(&mut rng, 10..20); + let db = create_named_in_memory_store(&db_name); let genesis_block_id = chain_config.genesis_block_id(); let mut wallet = Wallet::create_new_wallet( Arc::clone(&chain_config), @@ -449,13 +458,15 @@ fn wallet_migration_to_v2(#[case] seed: Seed) { .wallet() .unwrap(); - verify_wallet_balance(&chain_config, &wallet, genesis_amount); + verify_wallet_balance(&chain_config, &wallet, genesis_amount, || { + create_named_in_memory_store(&db_name) + }); let password = Some("password".into()); wallet.encrypt_wallet(&password).unwrap(); wallet.lock_wallet().unwrap(); let default_acc_id = wallet.accounts.get(&DEFAULT_ACCOUNT_INDEX).unwrap().get_account_id(); - let db = wallet.db; + let mut db = wallet.db; // set version back to v1 let mut db_tx = db.transaction_rw(None).unwrap(); @@ -488,7 +499,9 @@ fn wallet_migration_to_v2(#[case] seed: Seed) { ); } - let new_db = Store::new_from_dump(DefaultBackend::new_in_memory(), raw_db).unwrap(); + let new_db_name = random_ascii_alphanumeric_string(&mut rng, 10..20); + let new_db = + Store::new_from_dump(create_named_in_memory_backend(&new_db_name), raw_db).unwrap(); let wallet = Wallet::load_wallet( Arc::clone(&chain_config), @@ -514,7 +527,9 @@ fn wallet_migration_to_v2(#[case] seed: Seed) { wallet.get_best_block_for_account(DEFAULT_ACCOUNT_INDEX).unwrap(), (chain_config.genesis_block_id(), BlockHeight::new(0)) ); - verify_wallet_balance(&chain_config, &wallet, genesis_amount); + verify_wallet_balance(&chain_config, &wallet, genesis_amount, || { + create_named_in_memory_store(&new_db_name) + }); } #[rstest] @@ -717,8 +732,11 @@ fn wallet_seed_phrase_check_address() { assert_eq!(expected_pk2, pk.hex_encode().strip_prefix("00").unwrap()); } -#[test] -fn wallet_balance_genesis() { +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +fn wallet_balance_genesis(#[case] seed: Seed) { + let mut rng = make_seedable_rng(seed); let chain_type = ChainType::Mainnet; let genesis_amount = Amount::from_atoms(12345); @@ -735,7 +753,12 @@ fn wallet_balance_genesis() { address.as_object().clone(), ); - test_balance_from_genesis(chain_type, vec![genesis_output.clone()], genesis_amount); + test_balance_from_genesis( + chain_type, + vec![genesis_output.clone()], + genesis_amount, + &mut rng, + ); let genesis_amount_2 = Amount::from_atoms(54321); let genesis_output_2 = TxOutput::LockThenTransfer( @@ -748,6 +771,7 @@ fn wallet_balance_genesis() { chain_type, vec![genesis_output, genesis_output_2], (genesis_amount + genesis_amount_2).unwrap(), + &mut rng, ); let address_indexes = [0, LOOKAHEAD_SIZE - 1, LOOKAHEAD_SIZE]; @@ -766,9 +790,14 @@ fn wallet_balance_genesis() { let genesis_output = make_address_output(address.into_object(), genesis_amount); if address_index.into_u32() == LOOKAHEAD_SIZE { - test_balance_from_genesis(chain_type, vec![genesis_output], Amount::ZERO); + test_balance_from_genesis(chain_type, vec![genesis_output], Amount::ZERO, &mut rng); } else { - test_balance_from_genesis(chain_type, vec![genesis_output], genesis_amount); + test_balance_from_genesis( + chain_type, + vec![genesis_output], + genesis_amount, + &mut rng, + ); } } } @@ -817,11 +846,16 @@ fn locked_wallet_balance_works(#[case] seed: Seed) { assert_eq!(coin_balance, genesis_amount); } -#[test] -fn wallet_balance_block_reward() { +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +fn wallet_balance_block_reward(#[case] seed: Seed) { + let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); - let mut wallet = create_wallet(chain_config.clone()); + let db_name = random_ascii_alphanumeric_string(&mut rng, 10..20); + let mut wallet = + create_wallet_with_mnemonic_and_named_db(chain_config.clone(), MNEMONIC, &db_name); let coin_balance = get_coin_balance(&wallet); assert_eq!(coin_balance, Amount::ZERO); @@ -837,7 +871,9 @@ fn wallet_balance_block_reward() { let (best_block_id, best_block_height) = get_best_block(&wallet); assert_eq!(best_block_id, block1.get_id()); assert_eq!(best_block_height, BlockHeight::new(1)); - verify_wallet_balance(&chain_config, &wallet, block1_amount); + verify_wallet_balance(&chain_config, &wallet, block1_amount, || { + create_named_in_memory_store(&db_name) + }); // Create the second block that sends the reward to the wallet let block2_amount = Amount::from_atoms(20000); @@ -870,6 +906,7 @@ fn wallet_balance_block_reward() { &chain_config, &wallet, (block1_amount + block2_amount).unwrap(), + || create_named_in_memory_store(&db_name), ); // Create a new block to replace the second block @@ -903,14 +940,20 @@ fn wallet_balance_block_reward() { &chain_config, &wallet, (block1_amount + block2_amount_new).unwrap(), + || create_named_in_memory_store(&db_name), ); } -#[test] -fn wallet_balance_block_transactions() { +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +fn wallet_balance_block_transactions(#[case] seed: Seed) { + let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); - let mut wallet = create_wallet(chain_config.clone()); + let db_name = random_ascii_alphanumeric_string(&mut rng, 10..20); + let mut wallet = + create_wallet_with_mnemonic_and_named_db(chain_config.clone(), MNEMONIC, &db_name); let tx_amount1 = Amount::from_atoms(10000); let address = get_address( @@ -936,15 +979,22 @@ fn wallet_balance_block_transactions() { 0, ); - verify_wallet_balance(&chain_config, &wallet, tx_amount1); + verify_wallet_balance(&chain_config, &wallet, tx_amount1, || { + create_named_in_memory_store(&db_name) + }); } -#[test] // Verify that outputs can be created and consumed in the same block -fn wallet_balance_parent_child_transactions() { +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +fn wallet_balance_parent_child_transactions(#[case] seed: Seed) { + let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); - let mut wallet = create_wallet(chain_config.clone()); + let db_name = random_ascii_alphanumeric_string(&mut rng, 10..20); + let mut wallet = + create_wallet_with_mnemonic_and_named_db(chain_config.clone(), MNEMONIC, &db_name); let tx_amount1 = Amount::from_atoms(20000); let tx_amount2 = Amount::from_atoms(10000); @@ -990,22 +1040,26 @@ fn wallet_balance_parent_child_transactions() { 0, ); - verify_wallet_balance(&chain_config, &wallet, tx_amount2); + verify_wallet_balance(&chain_config, &wallet, tx_amount2, || { + create_named_in_memory_store(&db_name) + }); } #[track_caller] -fn test_wallet_accounts( +fn test_wallet_accounts( chain_config: &Arc, wallet: &Wallet, expected_accounts: Vec, + reopen_db_func: RDF, ) where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, P: SignerProvider, + RDF: FnOnce() -> Store, { let accounts = wallet.account_indexes().cloned().collect::>(); assert_eq!(accounts, expected_accounts); - let db_copy = wallet.db.clone(); + let db_copy = reopen_db_func(); let wallet = Wallet::load_wallet( Arc::clone(chain_config), db_copy, @@ -1022,13 +1076,21 @@ fn test_wallet_accounts( assert_eq!(accounts, expected_accounts); } -#[test] -fn wallet_accounts_creation() { +#[rstest] +#[trace] +#[case(Seed::from_entropy())] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn wallet_accounts_creation(#[case] seed: Seed) { + let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); - let mut wallet = create_wallet(chain_config.clone()); + let db_name = random_ascii_alphanumeric_string(&mut rng, 10..20); + let mut wallet = + create_wallet_with_mnemonic_and_named_db(chain_config.clone(), MNEMONIC, &db_name); - test_wallet_accounts(&chain_config, &wallet, vec![DEFAULT_ACCOUNT_INDEX]); + test_wallet_accounts(&chain_config, &wallet, vec![DEFAULT_ACCOUNT_INDEX], || { + create_named_in_memory_store(&db_name) + }); // DEFAULT_ACCOUNT_INDEX now has 1 transaction so next account can be created let _ = create_block( &chain_config, @@ -1059,6 +1121,7 @@ fn wallet_accounts_creation() { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; @@ -1183,7 +1246,8 @@ fn wallet_recover_new_account(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn locked_wallet_cant_sign_transaction(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn locked_wallet_cant_sign_transaction(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -1211,15 +1275,17 @@ fn locked_wallet_cant_sign_transaction(#[case] seed: Seed) { ); assert_eq!( - wallet.create_transaction_to_addresses( - DEFAULT_ACCOUNT_INDEX, - [new_output.clone()], - SelectedInputs::Utxos(vec![]), - BTreeMap::new(), - FeeRate::from_amount_per_kb(Amount::ZERO), - FeeRate::from_amount_per_kb(Amount::ZERO), - TxAdditionalInfo::new(), - ), + wallet + .create_transaction_to_addresses( + DEFAULT_ACCOUNT_INDEX, + [new_output.clone()], + SelectedInputs::Utxos(vec![]), + BTreeMap::new(), + FeeRate::from_amount_per_kb(Amount::ZERO), + FeeRate::from_amount_per_kb(Amount::ZERO), + TxAdditionalInfo::new(), + ) + .await, Err(WalletError::DatabaseError( wallet_storage::Error::WalletLocked )) @@ -1238,6 +1304,7 @@ fn locked_wallet_cant_sign_transaction(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap(); } else { // check if we remove the password it should fail to lock @@ -1268,6 +1335,7 @@ fn locked_wallet_cant_sign_transaction(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap(); } } @@ -1275,7 +1343,8 @@ fn locked_wallet_cant_sign_transaction(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn locked_wallet_standalone_keys( +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn locked_wallet_standalone_keys( #[case] seed: Seed, #[values(true, false)] insert_before_encrypt: bool, #[values(true, false)] change_password: bool, @@ -1382,15 +1451,17 @@ fn locked_wallet_standalone_keys( ); assert_eq!( - wallet.create_transaction_to_addresses( - DEFAULT_ACCOUNT_INDEX, - [new_output.clone()], - SelectedInputs::Utxos(vec![]), - BTreeMap::new(), - FeeRate::from_amount_per_kb(Amount::ZERO), - FeeRate::from_amount_per_kb(Amount::ZERO), - TxAdditionalInfo::new(), - ), + wallet + .create_transaction_to_addresses( + DEFAULT_ACCOUNT_INDEX, + [new_output.clone()], + SelectedInputs::Utxos(vec![]), + BTreeMap::new(), + FeeRate::from_amount_per_kb(Amount::ZERO), + FeeRate::from_amount_per_kb(Amount::ZERO), + TxAdditionalInfo::new(), + ) + .await, Err(WalletError::DatabaseError( wallet_storage::Error::WalletLocked )) @@ -1408,13 +1479,15 @@ fn locked_wallet_standalone_keys( FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap(); } #[rstest] #[trace] #[case(Seed::from_entropy())] -fn wallet_get_transaction(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn wallet_get_transaction(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -1436,6 +1509,7 @@ fn wallet_get_transaction(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; @@ -1473,7 +1547,8 @@ fn wallet_get_transaction(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn wallet_list_mainchain_transactions(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn wallet_list_mainchain_transactions(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_regtest()); @@ -1497,6 +1572,7 @@ fn wallet_list_mainchain_transactions(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; @@ -1521,6 +1597,7 @@ fn wallet_list_mainchain_transactions(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; let spend_from_tx_id = tx.transaction().get_id(); @@ -1553,7 +1630,8 @@ fn wallet_list_mainchain_transactions(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn wallet_transactions_with_fees(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn wallet_transactions_with_fees(#[case] seed: Seed) { use crate::destination_getters::{get_tx_output_destination, HtlcSpendingCondition}; let mut rng = make_seedable_rng(seed); @@ -1589,6 +1667,7 @@ fn wallet_transactions_with_fees(#[case] seed: Seed) { very_big_feerate, TxAdditionalInfo::new(), ) + .await .unwrap_err(); match err { @@ -1630,6 +1709,7 @@ fn wallet_transactions_with_fees(#[case] seed: Seed) { feerate, TxAdditionalInfo::new(), ) + .await .unwrap(); let tx_size = serialization::Encode::encoded_size(&tx); @@ -1686,6 +1766,7 @@ fn wallet_transactions_with_fees(#[case] seed: Seed) { feerate, TxAdditionalInfo::new(), ) + .await .unwrap(); let tx_size = serialization::Encode::encoded_size(&tx); @@ -1738,7 +1819,8 @@ fn lock_wallet_fail_empty_password() { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn spend_from_user_specified_utxos(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn spend_from_user_specified_utxos(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -1791,6 +1873,7 @@ fn spend_from_user_specified_utxos(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap_err(); assert_eq!(err, WalletError::CannotFindUtxo(missing_utxo.clone())); @@ -1818,6 +1901,7 @@ fn spend_from_user_specified_utxos(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; @@ -1857,6 +1941,7 @@ fn spend_from_user_specified_utxos(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap_err(); assert_eq!( @@ -1869,7 +1954,8 @@ fn spend_from_user_specified_utxos(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn create_stake_pool_and_list_pool_ids(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn create_stake_pool_and_list_pool_ids(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_regtest()); @@ -1911,6 +1997,7 @@ fn create_stake_pool_and_list_pool_ids(#[case] seed: Seed) { vrf_public_key: None, }, ) + .await .unwrap_err(); assert_eq!(err, WalletError::VrfKeyMustBeProvided); @@ -1928,8 +2015,10 @@ fn create_stake_pool_and_list_pool_ids(#[case] seed: Seed) { vrf_public_key: None, }, ) + .await .unwrap() .tx; + let stake_pool_transaction_id = stake_pool_transaction.transaction().get_id(); let (addr, block2) = create_block( &chain_config, @@ -2027,6 +2116,7 @@ fn create_stake_pool_and_list_pool_ids(#[case] seed: Seed) { None, FeeRate::from_amount_per_kb(Amount::from_atoms(0)), ) + .await .unwrap() .tx; @@ -2058,7 +2148,8 @@ fn create_stake_pool_and_list_pool_ids(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn create_stake_pool_for_different_wallet_and_list_pool_ids(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn create_stake_pool_for_different_wallet_and_list_pool_ids(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_regtest()); @@ -2116,6 +2207,7 @@ fn create_stake_pool_for_different_wallet_and_list_pool_ids(#[case] seed: Seed) vrf_public_key: Some(staker_vrf_public_key.clone()), }, ) + .await .unwrap_err(); assert_eq!(err, WalletError::StakerDestinationMustBePublicKey); @@ -2139,6 +2231,7 @@ fn create_stake_pool_for_different_wallet_and_list_pool_ids(#[case] seed: Seed) vrf_public_key: Some(staker_vrf_public_key.clone()), }, ) + .await .unwrap() .tx; let stake_pool_transaction_id = stake_pool_transaction.transaction().get_id(); @@ -2157,6 +2250,7 @@ fn create_stake_pool_for_different_wallet_and_list_pool_ids(#[case] seed: Seed) vrf_public_key: Some(staker_vrf_public_key.clone()), }, ) + .await .unwrap() .tx; let stake_pool_transaction_id2 = stake_pool_transaction2.transaction().get_id(); @@ -2298,6 +2392,7 @@ fn create_stake_pool_for_different_wallet_and_list_pool_ids(#[case] seed: Seed) None, FeeRate::from_amount_per_kb(Amount::from_atoms(0)), ) + .await .unwrap() .tx; @@ -2324,7 +2419,8 @@ fn create_stake_pool_for_different_wallet_and_list_pool_ids(#[case] seed: Seed) #[rstest] #[trace] #[case(Seed::from_entropy())] -fn reset_keys_after_failed_transaction(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn reset_keys_after_failed_transaction(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -2347,19 +2443,21 @@ fn reset_keys_after_failed_transaction(#[case] seed: Seed) { .unwrap() .last_issued(); - let result = wallet.create_stake_pool( - DEFAULT_ACCOUNT_INDEX, - FeeRate::from_amount_per_kb(Amount::ZERO), - FeeRate::from_amount_per_kb(Amount::ZERO), - StakePoolCreationArguments { - amount: not_enough, - margin_ratio_per_thousand: PerThousand::new_from_rng(&mut rng), - cost_per_block: Amount::ZERO, - decommission_key: Destination::AnyoneCanSpend, - staker_key: None, - vrf_public_key: None, - }, - ); + let result = wallet + .create_stake_pool( + DEFAULT_ACCOUNT_INDEX, + FeeRate::from_amount_per_kb(Amount::ZERO), + FeeRate::from_amount_per_kb(Amount::ZERO), + StakePoolCreationArguments { + amount: not_enough, + margin_ratio_per_thousand: PerThousand::new_from_rng(&mut rng), + cost_per_block: Amount::ZERO, + decommission_key: Destination::AnyoneCanSpend, + staker_key: None, + vrf_public_key: None, + }, + ) + .await; // check that result is an error and we last issued address is still the same assert!(result.is_err()); assert_eq!( @@ -2374,7 +2472,8 @@ fn reset_keys_after_failed_transaction(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn send_to_unknown_delegation(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn send_to_unknown_delegation(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -2439,6 +2538,7 @@ fn send_to_unknown_delegation(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -2473,6 +2573,7 @@ fn send_to_unknown_delegation(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; @@ -2514,6 +2615,7 @@ fn send_to_unknown_delegation(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -2539,7 +2641,8 @@ fn send_to_unknown_delegation(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn create_spend_from_delegations(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn create_spend_from_delegations(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -2575,6 +2678,7 @@ fn create_spend_from_delegations(#[case] seed: Seed) { vrf_public_key: None, }, ) + .await .unwrap() .tx; @@ -2603,6 +2707,7 @@ fn create_spend_from_delegations(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -2633,6 +2738,7 @@ fn create_spend_from_delegations(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; @@ -2653,6 +2759,7 @@ fn create_spend_from_delegations(#[case] seed: Seed) { Amount::from_atoms(2), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -2686,6 +2793,7 @@ fn create_spend_from_delegations(#[case] seed: Seed) { Amount::from_atoms(1), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; wallet @@ -2758,6 +2866,7 @@ fn create_spend_from_delegations(#[case] seed: Seed) { Amount::from_atoms(1), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; wallet @@ -2785,7 +2894,8 @@ fn create_spend_from_delegations(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn issue_and_transfer_tokens(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn issue_and_transfer_tokens(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -2859,6 +2969,7 @@ fn issue_and_transfer_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -2905,6 +3016,7 @@ fn issue_and_transfer_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; wallet @@ -2927,6 +3039,7 @@ fn issue_and_transfer_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -2954,6 +3067,7 @@ fn issue_and_transfer_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); random_issuing_wallet @@ -2973,6 +3087,7 @@ fn issue_and_transfer_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; (issued_token_id, vec![nft_issuance_transaction, transfer_tx]) @@ -3027,6 +3142,7 @@ fn issue_and_transfer_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), additional_info.clone(), ) + .await .unwrap() .tx; wallet @@ -3082,6 +3198,7 @@ fn issue_and_transfer_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), additional_info, ) + .await .unwrap_err(); let remaining_tokens = (token_amount_to_issue - tokens_to_transfer).unwrap(); @@ -3104,7 +3221,8 @@ fn issue_and_transfer_tokens(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn check_tokens_v0_are_ignored(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn check_tokens_v0_are_ignored(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_regtest()); @@ -3126,25 +3244,27 @@ fn check_tokens_v0_are_ignored(#[case] seed: Seed) { let address2 = wallet.get_new_address(DEFAULT_ACCOUNT_INDEX).unwrap().1; let token_ticker = "XXXX".as_bytes().to_vec(); let number_of_decimals = rng.gen_range(1..18); - let result = wallet.create_transaction_to_addresses( - DEFAULT_ACCOUNT_INDEX, - [TxOutput::Transfer( - OutputValue::TokenV0(Box::new(TokenData::TokenIssuance(Box::new( - TokenIssuanceV0 { - token_ticker, - number_of_decimals, - metadata_uri: "http://uri".as_bytes().to_vec(), - amount_to_issue: Amount::from_atoms(rng.gen_range(1..10000)), - }, - )))), - address2.into_object(), - )], - SelectedInputs::Utxos(vec![]), - BTreeMap::new(), - FeeRate::from_amount_per_kb(Amount::ZERO), - FeeRate::from_amount_per_kb(Amount::ZERO), - TxAdditionalInfo::new(), - ); + let result = wallet + .create_transaction_to_addresses( + DEFAULT_ACCOUNT_INDEX, + [TxOutput::Transfer( + OutputValue::TokenV0(Box::new(TokenData::TokenIssuance(Box::new( + TokenIssuanceV0 { + token_ticker, + number_of_decimals, + metadata_uri: "http://uri".as_bytes().to_vec(), + amount_to_issue: Amount::from_atoms(rng.gen_range(1..10000)), + }, + )))), + address2.into_object(), + )], + SelectedInputs::Utxos(vec![]), + BTreeMap::new(), + FeeRate::from_amount_per_kb(Amount::ZERO), + FeeRate::from_amount_per_kb(Amount::ZERO), + TxAdditionalInfo::new(), + ) + .await; matches!( result.unwrap_err(), @@ -3162,7 +3282,8 @@ fn check_tokens_v0_are_ignored(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn freeze_and_unfreeze_tokens(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn freeze_and_unfreeze_tokens(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_regtest()); @@ -3199,6 +3320,7 @@ fn freeze_and_unfreeze_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -3238,6 +3360,7 @@ fn freeze_and_unfreeze_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -3255,6 +3378,7 @@ fn freeze_and_unfreeze_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -3284,6 +3408,7 @@ fn freeze_and_unfreeze_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -3356,6 +3481,7 @@ fn freeze_and_unfreeze_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -3383,6 +3509,7 @@ fn freeze_and_unfreeze_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), additional_info, ) + .await .unwrap() .tx; @@ -3418,6 +3545,7 @@ fn freeze_and_unfreeze_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap_err(); assert_eq!(err, WalletError::CannotFreezeAlreadyFrozenToken); @@ -3429,6 +3557,7 @@ fn freeze_and_unfreeze_tokens(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap_err(); assert_eq!(err, WalletError::CannotUnfreezeToken); @@ -3467,7 +3596,8 @@ fn freeze_and_unfreeze_tokens(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn change_token_supply_fixed(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn change_token_supply_fixed(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -3503,6 +3633,7 @@ fn change_token_supply_fixed(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -3557,6 +3688,7 @@ fn change_token_supply_fixed(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -3579,6 +3711,7 @@ fn change_token_supply_fixed(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap_err(); assert_eq!( @@ -3657,6 +3790,7 @@ fn change_token_supply_fixed(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap_err(); assert_eq!( err, @@ -3673,6 +3807,7 @@ fn change_token_supply_fixed(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -3717,6 +3852,7 @@ fn change_token_supply_fixed(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap_err(); assert_eq!(err, WalletError::CannotLockTokenSupply("Fixed")); } @@ -3724,7 +3860,8 @@ fn change_token_supply_fixed(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn change_token_supply_unlimited(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn change_token_supply_unlimited(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -3759,6 +3896,7 @@ fn change_token_supply_unlimited(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -3814,6 +3952,7 @@ fn change_token_supply_unlimited(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -3855,6 +3994,7 @@ fn change_token_supply_unlimited(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap_err(); assert_eq!( err, @@ -3871,6 +4011,7 @@ fn change_token_supply_unlimited(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; wallet @@ -3914,6 +4055,7 @@ fn change_token_supply_unlimited(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap_err(); assert_eq!(err, WalletError::CannotLockTokenSupply("Unlimited")); } @@ -3921,7 +4063,8 @@ fn change_token_supply_unlimited(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn change_and_lock_token_supply_lockable(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn change_and_lock_token_supply_lockable(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -3956,6 +4099,7 @@ fn change_and_lock_token_supply_lockable(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -4011,6 +4155,7 @@ fn change_and_lock_token_supply_lockable(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; wallet.add_unconfirmed_tx(mint_transaction.clone(), &WalletEventsNoOp).unwrap(); @@ -4051,6 +4196,7 @@ fn change_and_lock_token_supply_lockable(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap_err(); assert_eq!( err, @@ -4067,6 +4213,7 @@ fn change_and_lock_token_supply_lockable(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -4108,6 +4255,7 @@ fn change_and_lock_token_supply_lockable(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -4148,6 +4296,7 @@ fn change_and_lock_token_supply_lockable(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap_err(); assert_eq!(err, WalletError::CannotChangeLockedTokenSupply); let err = wallet @@ -4158,6 +4307,7 @@ fn change_and_lock_token_supply_lockable(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap_err(); assert_eq!(err, WalletError::CannotChangeLockedTokenSupply); @@ -4168,6 +4318,7 @@ fn change_and_lock_token_supply_lockable(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap_err(); assert_eq!(err, WalletError::CannotLockTokenSupply("Locked")); } @@ -4175,7 +4326,8 @@ fn change_and_lock_token_supply_lockable(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn lock_then_transfer(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn lock_then_transfer(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -4245,6 +4397,7 @@ fn lock_then_transfer(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; wallet @@ -4319,7 +4472,8 @@ fn lock_then_transfer(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn wallet_multiple_transactions_in_single_block(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn wallet_multiple_transactions_in_single_block(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -4367,6 +4521,7 @@ fn wallet_multiple_transactions_in_single_block(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; wallet.add_unconfirmed_tx(transaction.clone(), &WalletEventsNoOp).unwrap(); @@ -4395,7 +4550,8 @@ fn wallet_multiple_transactions_in_single_block(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn wallet_scan_multiple_transactions_from_mempool(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn wallet_scan_multiple_transactions_from_mempool(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -4456,6 +4612,7 @@ fn wallet_scan_multiple_transactions_from_mempool(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; @@ -4492,6 +4649,7 @@ fn wallet_scan_multiple_transactions_from_mempool(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; wallet.add_unconfirmed_tx(transaction.clone(), &WalletEventsNoOp).unwrap(); @@ -4532,6 +4690,7 @@ fn wallet_scan_multiple_transactions_from_mempool(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap_err(); assert_eq!( err, @@ -4558,6 +4717,7 @@ fn wallet_scan_multiple_transactions_from_mempool(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; wallet.add_unconfirmed_tx(transaction.clone(), &WalletEventsNoOp).unwrap(); @@ -4581,7 +4741,8 @@ fn wallet_scan_multiple_transactions_from_mempool(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn wallet_abandon_transactions(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn wallet_abandon_transactions(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -4640,6 +4801,7 @@ fn wallet_abandon_transactions(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; wallet @@ -4833,7 +4995,8 @@ fn wallet_set_lookahead_size(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn decommission_pool_wrong_account(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn decommission_pool_wrong_account(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_regtest()); @@ -4876,6 +5039,7 @@ fn decommission_pool_wrong_account(#[case] seed: Seed) { vrf_public_key: None, }, ) + .await .unwrap() .tx; let _ = create_block( @@ -4891,13 +5055,15 @@ fn decommission_pool_wrong_account(#[case] seed: Seed) { // Try to decommission the pool with default account let pool_id = pool_ids.first().unwrap().0; - let decommission_cmd_res = wallet.decommission_stake_pool( - acc_0_index, - pool_id, - pool_amount, - None, - FeeRate::from_amount_per_kb(Amount::from_atoms(0)), - ); + let decommission_cmd_res = wallet + .decommission_stake_pool( + acc_0_index, + pool_id, + pool_amount, + None, + FeeRate::from_amount_per_kb(Amount::from_atoms(0)), + ) + .await; assert_eq!( decommission_cmd_res.unwrap_err(), WalletError::PartiallySignedTransactionInDecommissionCommand @@ -4912,6 +5078,7 @@ fn decommission_pool_wrong_account(#[case] seed: Seed) { None, FeeRate::from_amount_per_kb(Amount::from_atoms(0)), ) + .await .unwrap() .tx; @@ -4930,7 +5097,8 @@ fn decommission_pool_wrong_account(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn decommission_pool_request_wrong_account(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn decommission_pool_request_wrong_account(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -4973,6 +5141,7 @@ fn decommission_pool_request_wrong_account(#[case] seed: Seed) { vrf_public_key: None, }, ) + .await .unwrap() .tx; let _ = create_block( @@ -4988,13 +5157,15 @@ fn decommission_pool_request_wrong_account(#[case] seed: Seed) { // Try to create decommission request from account that holds the key let pool_id = pool_ids.first().unwrap().0; - let decommission_req_res = wallet.decommission_stake_pool_request( - acc_1_index, - pool_id, - pool_amount, - None, - FeeRate::from_amount_per_kb(Amount::from_atoms(0)), - ); + let decommission_req_res = wallet + .decommission_stake_pool_request( + acc_1_index, + pool_id, + pool_amount, + None, + FeeRate::from_amount_per_kb(Amount::from_atoms(0)), + ) + .await; assert_eq!( decommission_req_res.unwrap_err(), WalletError::FullySignedTransactionInDecommissionReq @@ -5008,6 +5179,7 @@ fn decommission_pool_request_wrong_account(#[case] seed: Seed) { None, FeeRate::from_amount_per_kb(Amount::from_atoms(0)), ) + .await .unwrap(); assert!(!decommission_partial_tx.all_signatures_available()); matches!( @@ -5019,7 +5191,8 @@ fn decommission_pool_request_wrong_account(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn sign_decommission_pool_request_between_accounts(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn sign_decommission_pool_request_between_accounts(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_regtest()); @@ -5063,6 +5236,7 @@ fn sign_decommission_pool_request_between_accounts(#[case] seed: Seed) { vrf_public_key: None, }, ) + .await .unwrap() .tx; @@ -5080,6 +5254,7 @@ fn sign_decommission_pool_request_between_accounts(#[case] seed: Seed) { .unwrap(); let stake_pool_transaction = wallet .sign_raw_transaction(acc_0_index, ptx, &TokensAdditionalInfo::new()) + .await .unwrap() .0 .into_signed_tx() @@ -5107,6 +5282,7 @@ fn sign_decommission_pool_request_between_accounts(#[case] seed: Seed) { None, FeeRate::from_amount_per_kb(Amount::from_atoms(0)), ) + .await .unwrap(); let tokens_additional_info = TokensAdditionalInfo::new(); @@ -5117,6 +5293,7 @@ fn sign_decommission_pool_request_between_accounts(#[case] seed: Seed) { decommission_partial_tx.clone(), &tokens_additional_info, ) + .await .unwrap() .0; // the tx is still not fully signed @@ -5128,6 +5305,7 @@ fn sign_decommission_pool_request_between_accounts(#[case] seed: Seed) { decommission_partial_tx, &tokens_additional_info, ) + .await .unwrap() .0 .into_signed_tx() @@ -5143,7 +5321,8 @@ fn sign_decommission_pool_request_between_accounts(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn sign_decommission_pool_request_cold_wallet(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn sign_decommission_pool_request_cold_wallet(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_regtest()); @@ -5187,6 +5366,7 @@ fn sign_decommission_pool_request_cold_wallet(#[case] seed: Seed) { vrf_public_key: None, }, ) + .await .unwrap() .tx; let _ = create_block( @@ -5209,6 +5389,7 @@ fn sign_decommission_pool_request_cold_wallet(#[case] seed: Seed) { None, FeeRate::from_amount_per_kb(Amount::from_atoms(0)), ) + .await .unwrap(); let tokens_additional_info = TokensAdditionalInfo::new(); @@ -5219,6 +5400,7 @@ fn sign_decommission_pool_request_cold_wallet(#[case] seed: Seed) { decommission_partial_tx, &tokens_additional_info, ) + .await .unwrap() .0; assert!(partially_signed_transaction.all_signatures_available()); @@ -5231,6 +5413,7 @@ fn sign_decommission_pool_request_cold_wallet(#[case] seed: Seed) { partially_signed_transaction, &tokens_additional_info, ) + .await .unwrap() .0; assert!(partially_signed_transaction.all_signatures_available()); @@ -5252,7 +5435,8 @@ fn sign_decommission_pool_request_cold_wallet(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn filter_pools(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn filter_pools(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_regtest()); @@ -5294,6 +5478,7 @@ fn filter_pools(#[case] seed: Seed) { vrf_public_key: None, }, ) + .await .unwrap() .tx; // sync for wallet1 @@ -5341,7 +5526,8 @@ fn filter_pools(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn sign_send_request_cold_wallet(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn sign_send_request_cold_wallet(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_regtest()); @@ -5401,6 +5587,7 @@ fn sign_send_request_cold_wallet(#[case] seed: Seed) { send_req.clone(), &tokens_additional_info, ) + .await .unwrap() .0; // the tx is not fully signed @@ -5409,6 +5596,7 @@ fn sign_send_request_cold_wallet(#[case] seed: Seed) { // sign the tx with cold wallet let signed_tx = cold_wallet .sign_raw_transaction(DEFAULT_ACCOUNT_INDEX, send_req, &tokens_additional_info) + .await .unwrap() .0 .into_signed_tx() @@ -5457,7 +5645,8 @@ fn sign_send_request_cold_wallet(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn test_not_exhaustion_of_keys(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_not_exhaustion_of_keys(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_regtest()); @@ -5499,6 +5688,7 @@ fn test_not_exhaustion_of_keys(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap(); } } @@ -5506,7 +5696,8 @@ fn test_not_exhaustion_of_keys(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn test_add_standalone_multisig(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_add_standalone_multisig(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_regtest()); @@ -5596,6 +5787,7 @@ fn test_add_standalone_multisig(#[case] seed: Seed) { spend_multisig_tx, &tokens_additional_info, ) + .await .unwrap(); // check it is still not fully signed @@ -5605,6 +5797,7 @@ fn test_add_standalone_multisig(#[case] seed: Seed) { // try to sign it with wallet1 again let (ptx, _, statuses) = wallet1 .sign_raw_transaction(DEFAULT_ACCOUNT_INDEX, ptx, &tokens_additional_info) + .await .unwrap(); // check it is still not fully signed @@ -5614,6 +5807,7 @@ fn test_add_standalone_multisig(#[case] seed: Seed) { // try to sign it with wallet2 but wallet2 does not have the multisig added as standalone let ptx = wallet2 .sign_raw_transaction(DEFAULT_ACCOUNT_INDEX, ptx, &tokens_additional_info) + .await .unwrap() .0; @@ -5623,6 +5817,7 @@ fn test_add_standalone_multisig(#[case] seed: Seed) { // now we can sign it let (ptx, _, statuses) = wallet2 .sign_raw_transaction(DEFAULT_ACCOUNT_INDEX, ptx, &tokens_additional_info) + .await .unwrap(); // now it is fully signed @@ -5633,7 +5828,8 @@ fn test_add_standalone_multisig(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn create_htlc_and_spend(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn create_htlc_and_spend(#[case] seed: Seed) { use common::chain::htlc::HtlcSecret; let mut rng = make_seedable_rng(seed); @@ -5693,6 +5889,7 @@ fn create_htlc_and_spend(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; let create_htlc_tx_id = create_htlc_tx.transaction().get_id(); @@ -5758,6 +5955,7 @@ fn create_htlc_and_spend(#[case] seed: Seed) { let (spend_ptx, _, new_statuses) = wallet2 .sign_raw_transaction(DEFAULT_ACCOUNT_INDEX, spend_ptx, &tokens_additional_info) + .await .unwrap(); assert_eq!(vec![SignatureStatus::FullySigned], new_statuses); @@ -5774,7 +5972,8 @@ fn create_htlc_and_spend(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn create_htlc_and_refund(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn create_htlc_and_refund(#[case] seed: Seed) { use common::chain::htlc::HtlcSecret; let mut rng = make_seedable_rng(seed); @@ -5836,6 +6035,7 @@ fn create_htlc_and_refund(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; let create_htlc_tx_id = create_htlc_tx.transaction().get_id(); @@ -5893,6 +6093,7 @@ fn create_htlc_and_refund(#[case] seed: Seed) { let (refund_ptx, prev_statuses, new_statuses) = wallet2 .sign_raw_transaction(DEFAULT_ACCOUNT_INDEX, refund_ptx, &tokens_additional_info) + .await .unwrap(); assert_eq!(vec![SignatureStatus::NotSigned], prev_statuses); @@ -5906,6 +6107,7 @@ fn create_htlc_and_refund(#[case] seed: Seed) { let (refund_ptx, prev_statuses, new_statuses) = wallet1 .sign_raw_transaction(DEFAULT_ACCOUNT_INDEX, refund_ptx, &tokens_additional_info) + .await .unwrap(); assert_eq!( vec![SignatureStatus::PartialMultisig { @@ -5935,7 +6137,8 @@ fn create_htlc_and_refund(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn create_order(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn create_order(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_unit_test_config()); @@ -5970,6 +6173,7 @@ fn create_order(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -6010,6 +6214,7 @@ fn create_order(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -6049,6 +6254,7 @@ fn create_order(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), additional_info, ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -6068,7 +6274,8 @@ fn create_order(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn create_order_and_conclude(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn create_order_and_conclude(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_unit_test_config()); @@ -6103,6 +6310,7 @@ fn create_order_and_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -6143,6 +6351,7 @@ fn create_order_and_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -6182,6 +6391,7 @@ fn create_order_and_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), additional_info, ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); let order_info = RpcOrderInfo { @@ -6237,6 +6447,7 @@ fn create_order_and_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), additional_info, ) + .await .unwrap() .tx; @@ -6259,7 +6470,8 @@ fn create_order_and_conclude(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn create_order_fill_completely_conclude(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn create_order_fill_completely_conclude(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_unit_test_config()); @@ -6296,6 +6508,7 @@ fn create_order_fill_completely_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -6339,6 +6552,7 @@ fn create_order_fill_completely_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -6389,6 +6603,7 @@ fn create_order_fill_completely_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), additional_info, ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); let order_info = RpcOrderInfo { @@ -6461,6 +6676,7 @@ fn create_order_fill_completely_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), additional_info, ) + .await .unwrap() .tx; @@ -6536,6 +6752,7 @@ fn create_order_fill_completely_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), additional_info, ) + .await .unwrap() .tx; @@ -6603,6 +6820,7 @@ fn create_order_fill_completely_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), additional_info, ) + .await .unwrap() .tx; @@ -6636,7 +6854,8 @@ fn create_order_fill_completely_conclude(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn create_order_fill_partially_conclude(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn create_order_fill_partially_conclude(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_unit_test_config()); @@ -6673,6 +6892,7 @@ fn create_order_fill_partially_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -6716,6 +6936,7 @@ fn create_order_fill_partially_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -6766,6 +6987,7 @@ fn create_order_fill_partially_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), additional_info, ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); let order_info = RpcOrderInfo { @@ -6838,6 +7060,7 @@ fn create_order_fill_partially_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), additional_info, ) + .await .unwrap() .tx; @@ -6912,6 +7135,7 @@ fn create_order_fill_partially_conclude(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), additional_info, ) + .await .unwrap() .tx; @@ -6958,7 +7182,8 @@ fn create_order_fill_partially_conclude(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn conflicting_delegation_account_nonce(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn conflicting_delegation_account_nonce(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_unit_test_config()); @@ -6997,6 +7222,7 @@ fn conflicting_delegation_account_nonce(#[case] seed: Seed) { vrf_public_key: None, }, ) + .await .unwrap() .tx; @@ -7027,6 +7253,7 @@ fn conflicting_delegation_account_nonce(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -7059,6 +7286,7 @@ fn conflicting_delegation_account_nonce(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; @@ -7088,6 +7316,7 @@ fn conflicting_delegation_account_nonce(#[case] seed: Seed) { delegation_amount, FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; let spend_from_delegation_tx_1_id = spend_from_delegation_tx_1.transaction().get_id(); @@ -7110,6 +7339,7 @@ fn conflicting_delegation_account_nonce(#[case] seed: Seed) { delegation_amount, FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; let spend_from_delegation_tx_2_id = spend_from_delegation_tx_2.transaction().get_id(); @@ -7140,6 +7370,7 @@ fn conflicting_delegation_account_nonce(#[case] seed: Seed) { delegation_amount, FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; let spend_from_delegation_tx_3_id = spend_from_delegation_tx_3.transaction().get_id(); @@ -7253,7 +7484,8 @@ fn conflicting_delegation_account_nonce(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn conflicting_delegation_account_nonce_same_wallet(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn conflicting_delegation_account_nonce_same_wallet(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_unit_test_config()); @@ -7290,6 +7522,7 @@ fn conflicting_delegation_account_nonce_same_wallet(#[case] seed: Seed) { vrf_public_key: None, }, ) + .await .unwrap() .tx; @@ -7319,6 +7552,7 @@ fn conflicting_delegation_account_nonce_same_wallet(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -7350,6 +7584,7 @@ fn conflicting_delegation_account_nonce_same_wallet(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; @@ -7378,6 +7613,7 @@ fn conflicting_delegation_account_nonce_same_wallet(#[case] seed: Seed) { delegation_amount, FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -7400,6 +7636,7 @@ fn conflicting_delegation_account_nonce_same_wallet(#[case] seed: Seed) { delegation_amount, FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -7488,7 +7725,8 @@ fn conflicting_delegation_account_nonce_same_wallet(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn conflicting_order_account_nonce(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn conflicting_order_account_nonce(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = common::chain::config::create_unit_test_config_builder() .chainstate_upgrades( @@ -7528,6 +7766,7 @@ fn conflicting_order_account_nonce(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -7572,6 +7811,7 @@ fn conflicting_order_account_nonce(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; @@ -7599,6 +7839,7 @@ fn conflicting_order_account_nonce(#[case] seed: Seed) { TxAdditionalInfo::new() .with_token_info(issued_token_id, token_additional_info_for_ptx.clone()), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -7658,6 +7899,7 @@ fn conflicting_order_account_nonce(#[case] seed: Seed) { .with_order_info(order_id, order_additional_info_for_ptx.clone()) .with_token_info(issued_token_id, token_additional_info_for_ptx.clone()), ) + .await .unwrap() .tx; @@ -7690,6 +7932,7 @@ fn conflicting_order_account_nonce(#[case] seed: Seed) { .with_order_info(order_id, order_additional_info_for_ptx) .with_token_info(issued_token_id, token_additional_info_for_ptx), ) + .await .unwrap() .tx; @@ -7776,7 +8019,8 @@ fn conflicting_order_account_nonce(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn conflicting_delegation_account_nonce_multiple_inputs(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn conflicting_delegation_account_nonce_multiple_inputs(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_unit_test_config()); @@ -7815,6 +8059,7 @@ fn conflicting_delegation_account_nonce_multiple_inputs(#[case] seed: Seed) { vrf_public_key: None, }, ) + .await .unwrap() .tx; @@ -7845,6 +8090,7 @@ fn conflicting_delegation_account_nonce_multiple_inputs(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -7878,6 +8124,7 @@ fn conflicting_delegation_account_nonce_multiple_inputs(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; @@ -7934,6 +8181,7 @@ fn conflicting_delegation_account_nonce_multiple_inputs(#[case] seed: Seed) { spend_from_delegation_ptx, &tokens_additional_info, ) + .await .unwrap() .0 .into_signed_tx() @@ -7966,6 +8214,7 @@ fn conflicting_delegation_account_nonce_multiple_inputs(#[case] seed: Seed) { delegation_amount, FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; let spend_from_delegation_confirmed_tx_id = @@ -8056,7 +8305,8 @@ fn conflicting_delegation_account_nonce_multiple_inputs(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn conflicting_delegation_account_with_reorg(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn conflicting_delegation_account_with_reorg(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_unit_test_config()); @@ -8095,6 +8345,7 @@ fn conflicting_delegation_account_with_reorg(#[case] seed: Seed) { vrf_public_key: None, }, ) + .await .unwrap() .tx; @@ -8125,6 +8376,7 @@ fn conflicting_delegation_account_with_reorg(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -8157,6 +8409,7 @@ fn conflicting_delegation_account_with_reorg(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; @@ -8188,6 +8441,7 @@ fn conflicting_delegation_account_with_reorg(#[case] seed: Seed) { delegation_amount, FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; let spend_from_delegation_tx_id_1 = spend_from_delegation_tx_1.transaction().get_id(); @@ -8221,6 +8475,7 @@ fn conflicting_delegation_account_with_reorg(#[case] seed: Seed) { delegation_amount, FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .unwrap() .tx; let spend_from_delegation_tx_id_2 = spend_from_delegation_tx_2.transaction().get_id(); @@ -8288,7 +8543,8 @@ fn conflicting_delegation_account_with_reorg(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn rollback_utxos_after_abandon(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn rollback_utxos_after_abandon(#[case] seed: Seed) { let mut rng = make_seedable_rng(seed); let chain_config = Arc::new(create_mainnet()); @@ -8346,6 +8602,7 @@ fn rollback_utxos_after_abandon(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), TxAdditionalInfo::new(), ) + .await .unwrap() .tx; let tx_id = tx.transaction().get_id(); @@ -8400,7 +8657,8 @@ fn rollback_utxos_after_abandon(#[case] seed: Seed) { #[rstest] #[trace] #[case(Seed::from_entropy())] -fn token_id_generation_v1_uses_first_tx_input(#[case] seed: Seed) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn token_id_generation_v1_uses_first_tx_input(#[case] seed: Seed) { use common::chain::{self, TokenIdGenerationVersion}; let mut rng = make_seedable_rng(seed); @@ -8488,6 +8746,7 @@ fn token_id_generation_v1_uses_first_tx_input(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); @@ -8514,6 +8773,7 @@ fn token_id_generation_v1_uses_first_tx_input(#[case] seed: Seed) { FeeRate::from_amount_per_kb(Amount::ZERO), FeeRate::from_amount_per_kb(Amount::ZERO), ) + .await .map(|(id, tx)| (id, tx.tx)) .unwrap(); diff --git a/wallet/storage/src/internal/mod.rs b/wallet/storage/src/internal/mod.rs index 727999d168..ab1f7894fc 100644 --- a/wallet/storage/src/internal/mod.rs +++ b/wallet/storage/src/internal/mod.rs @@ -154,18 +154,6 @@ impl Store { } } -impl Clone for Store -where - B::Impl: Clone, -{ - fn clone(&self) -> Self { - Self { - storage: self.storage.clone(), - encryption_state: self.encryption_state.clone(), - } - } -} - impl<'tx, B: storage::Backend + 'tx> Transactional<'tx> for Store { type TransactionRoLocked = StoreTxRo<'tx, B>; type TransactionRwLocked = StoreTxRw<'tx, B>; @@ -191,7 +179,7 @@ impl<'tx, B: storage::Backend + 'tx> Transactional<'tx> for Store { } fn transaction_rw<'st: 'tx>( - &'st self, + &'st mut self, size: Option, ) -> crate::Result { self.storage @@ -201,7 +189,7 @@ impl<'tx, B: storage::Backend + 'tx> Transactional<'tx> for Store { } fn transaction_rw_unlocked<'st: 'tx>( - &'st self, + &'st mut self, size: Option, ) -> crate::Result { match self.encryption_state { diff --git a/wallet/storage/src/internal/store_tx.rs b/wallet/storage/src/internal/store_tx.rs index 04708bede8..8665f3428e 100644 --- a/wallet/storage/src/internal/store_tx.rs +++ b/wallet/storage/src/internal/store_tx.rs @@ -379,6 +379,147 @@ impl_read_ops!(StoreTxRw); impl_read_ops!(StoreTxRoUnlocked); impl_read_ops!(StoreTxRwUnlocked); +// Note: the reasons for having this impl are: +// 1) Unlike `&T`, `&mut T` can be `Send` without requiring `T` to be `Sync`, so in async code `&mut db_tx` +// can be passed across an await point but `&db_tx` can't. +// 2) Accepting `&mut impl WalletStorageReadXXX` in a generic async function would look weird, because +// the Read traits themselves don't require mutability. So this impl allows a generic function to accept +// `impl WalletStorageReadXXX` by value, while the caller may pass a `&mut db_tx` to it if needed. +impl WalletStorageReadLocked for &mut T +where + T: WalletStorageReadLocked, +{ + fn get_storage_version(&self) -> crate::Result { + (**self).get_storage_version() + } + + fn get_wallet_type(&self) -> crate::Result { + (**self).get_wallet_type() + } + + fn get_chain_info(&self) -> crate::Result { + (**self).get_chain_info() + } + + fn get_transaction(&self, id: &AccountWalletTxId) -> crate::Result> { + (**self).get_transaction(id) + } + + fn get_accounts_info(&self) -> crate::Result> { + (**self).get_accounts_info() + } + + fn get_address(&self, id: &AccountDerivationPathId) -> crate::Result> { + (**self).get_address(id) + } + + fn get_addresses( + &self, + account_id: &AccountId, + ) -> crate::Result> { + (**self).get_addresses(account_id) + } + + fn check_root_keys_sanity(&self) -> crate::Result<()> { + (**self).check_root_keys_sanity() + } + + /// Collect and return all transactions from the storage + fn get_transactions( + &self, + account_id: &AccountId, + ) -> crate::Result> { + (**self).get_transactions(account_id) + } + + /// Collect and return all signed transactions from the storage + fn get_user_transactions(&self) -> crate::Result> { + (**self).get_user_transactions() + } + + fn get_account_unconfirmed_tx_counter( + &self, + account_id: &AccountId, + ) -> crate::Result> { + (**self).get_account_unconfirmed_tx_counter(account_id) + } + + fn get_account_vrf_public_keys( + &self, + account_id: &AccountId, + ) -> crate::Result> { + (**self).get_account_vrf_public_keys(account_id) + } + + fn get_account_standalone_watch_only_keys( + &self, + account_id: &AccountId, + ) -> crate::Result> { + (**self).get_account_standalone_watch_only_keys(account_id) + } + + fn get_account_standalone_multisig_keys( + &self, + account_id: &AccountId, + ) -> crate::Result> { + (**self).get_account_standalone_multisig_keys(account_id) + } + + fn get_account_standalone_private_keys( + &self, + account_id: &AccountId, + ) -> crate::Result)>> { + (**self).get_account_standalone_private_keys(account_id) + } + + fn get_keychain_usage_state( + &self, + id: &AccountKeyPurposeId, + ) -> crate::Result> { + (**self).get_keychain_usage_state(id) + } + + fn get_vrf_keychain_usage_state( + &self, + id: &AccountId, + ) -> crate::Result> { + (**self).get_vrf_keychain_usage_state(id) + } + + fn get_keychain_usage_states( + &self, + account_id: &AccountId, + ) -> crate::Result> { + (**self).get_keychain_usage_states(account_id) + } + + fn get_public_key( + &self, + id: &AccountDerivationPathId, + ) -> crate::Result> { + (**self).get_public_key(id) + } + + fn get_public_keys( + &self, + account_id: &AccountId, + ) -> crate::Result> { + (**self).get_public_keys(account_id) + } + + fn get_median_time(&self) -> crate::Result> { + (**self).get_median_time() + } + + fn get_lookahead_size(&self) -> crate::Result { + (**self).get_lookahead_size() + } + + fn get_hardware_wallet_data(&self) -> crate::Result> { + (**self).get_hardware_wallet_data() + } +} + impl WalletStorageEncryptionRead for StoreTxRo<'_, B> { fn get_encryption_key_kdf_challenge(&self) -> crate::Result> { self.read_value::() @@ -407,6 +548,7 @@ impl WalletStorageEncryptionRead for StoreTxRo<'_, B> { })? } } + macro_rules! impl_read_unlocked_ops { ($TxType:ident) => { /// Wallet data storage transaction @@ -445,6 +587,27 @@ macro_rules! impl_read_unlocked_ops { impl_read_unlocked_ops!(StoreTxRoUnlocked); impl_read_unlocked_ops!(StoreTxRwUnlocked); +// Same note as for `impl WalletStorageReadLocked for &mut T`. +impl WalletStorageReadUnlocked for &mut T +where + T: WalletStorageReadUnlocked, +{ + fn get_root_key(&self) -> crate::Result> { + (**self).get_root_key() + } + + fn get_seed_phrase(&self) -> crate::Result> { + (**self).get_seed_phrase() + } + + fn get_account_standalone_private_key( + &self, + account_pubkey: &AccountPublicKey, + ) -> crate::Result> { + (**self).get_account_standalone_private_key(account_pubkey) + } +} + macro_rules! impl_write_ops { ($TxType:ident) => { /// Wallet data storage transaction @@ -599,7 +762,7 @@ macro_rules! impl_write_ops { self.write::(id, pub_key) } - fn det_public_key(&mut self, id: &AccountDerivationPathId) -> crate::Result<()> { + fn del_public_key(&mut self, id: &AccountDerivationPathId) -> crate::Result<()> { self.storage.get_mut::().del(id).map_err(Into::into) } diff --git a/wallet/storage/src/internal/test.rs b/wallet/storage/src/internal/test.rs index 738f19d0e5..404cc6f7d4 100644 --- a/wallet/storage/src/internal/test.rs +++ b/wallet/storage/src/internal/test.rs @@ -33,7 +33,7 @@ fn gen_random_password(rng: &mut (impl Rng + CryptoRng)) -> String { #[test] fn storage_get_default_version_in_tx() { utils::concurrency::model(|| { - let store = Store::new(DefaultBackend::new_in_memory()).unwrap(); + let mut store = Store::new(DefaultBackend::new_in_memory()).unwrap(); let mut db_tx = store.transaction_rw(None).unwrap(); db_tx.set_storage_version(1).unwrap(); diff --git a/wallet/storage/src/lib.rs b/wallet/storage/src/lib.rs index d01d17c16a..d09745f7d1 100644 --- a/wallet/storage/src/lib.rs +++ b/wallet/storage/src/lib.rs @@ -194,7 +194,7 @@ pub trait WalletStorageWriteLocked: WalletStorageReadLocked { id: &AccountDerivationPathId, content: &ExtendedPublicKey, ) -> Result<()>; - fn det_public_key(&mut self, id: &AccountDerivationPathId) -> Result<()>; + fn del_public_key(&mut self, id: &AccountDerivationPathId) -> Result<()>; fn set_median_time(&mut self, median_time: BlockTimestamp) -> Result<()>; fn set_lookahead_size(&mut self, lookahead_size: u32) -> Result<()>; fn clear_public_keys(&mut self) -> Result<()>; @@ -282,11 +282,14 @@ pub trait Transactional<'t> { fn transaction_ro_unlocked<'s: 't>(&'s self) -> Result; /// Start a read-write transaction. - fn transaction_rw<'s: 't>(&'s self, size: Option) -> Result; + fn transaction_rw<'s: 't>( + &'s mut self, + size: Option, + ) -> Result; /// Start a read-write transaction. fn transaction_rw_unlocked<'s: 't>( - &'s self, + &'s mut self, size: Option, ) -> Result; } diff --git a/wallet/wallet-controller/src/helpers/mod.rs b/wallet/wallet-controller/src/helpers/mod.rs index bea155dc64..f062e1c1a2 100644 --- a/wallet/wallet-controller/src/helpers/mod.rs +++ b/wallet/wallet-controller/src/helpers/mod.rs @@ -103,7 +103,7 @@ pub async fn fetch_order_info( ))) } -pub async fn fetch_utxo( +pub async fn fetch_utxo( rpc_client: &T, wallet: &RuntimeWallet, input: &UtxoOutPoint, @@ -129,7 +129,10 @@ pub async fn fetch_utxo( ))) } -async fn fetch_utxo_and_destination( +async fn fetch_utxo_and_destination< + T: NodeInterface, + B: storage::BackendWithSendableTransactions, +>( rpc_client: &T, wallet: &RuntimeWallet, input: &UtxoOutPoint, @@ -246,7 +249,10 @@ pub async fn into_balances( } // TODO: optimize RPC calls to the Node -pub async fn tx_to_partially_signed_tx( +pub async fn tx_to_partially_signed_tx< + T: NodeInterface, + B: storage::BackendWithSendableTransactions, +>( rpc_client: &T, wallet: &RuntimeWallet, tx: Transaction, @@ -284,7 +290,7 @@ pub async fn tx_to_partially_signed_tx( Ok(ptx) } -pub async fn fetch_input_infos( +pub async fn fetch_input_infos( rpc_client: &T, wallet: &RuntimeWallet, inputs: impl IntoIterator, @@ -316,7 +322,10 @@ pub async fn fetch_input_infos( Ok((input_utxos, ptx_additional_info, destinations)) } -async fn into_utxo_and_destination( +async fn into_utxo_and_destination< + T: NodeInterface, + B: storage::BackendWithSendableTransactions, +>( rpc_client: &T, wallet: &RuntimeWallet, tx_inp: &TxInput, diff --git a/wallet/wallet-controller/src/lib.rs b/wallet/wallet-controller/src/lib.rs index 01c0ae7a5f..e1fb90375e 100644 --- a/wallet/wallet-controller/src/lib.rs +++ b/wallet/wallet-controller/src/lib.rs @@ -220,7 +220,7 @@ impl Controller where N: NodeInterface + Clone + Send + Sync + 'static, W: WalletEvents, - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, { pub async fn new( chain_config: Arc, @@ -500,7 +500,7 @@ where } /// Delete the seed phrase if stored in the database - pub fn delete_seed_phrase(&self) -> Result, ControllerError> { + pub fn delete_seed_phrase(&mut self) -> Result, ControllerError> { self.wallet .delete_seed_phrase() .map(|opt| opt.map(SeedWithPassPhrase::from_serializable_seed_phrase)) diff --git a/wallet/wallet-controller/src/read.rs b/wallet/wallet-controller/src/read.rs index 1d1db58871..494a24cf86 100644 --- a/wallet/wallet-controller/src/read.rs +++ b/wallet/wallet-controller/src/read.rs @@ -71,7 +71,7 @@ pub struct AddressInfo { impl<'a, T, B> ReadOnlyController<'a, T, B> where T: NodeInterface, - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, { pub fn new( wallet: &'a RuntimeWallet, diff --git a/wallet/wallet-controller/src/runtime_wallet.rs b/wallet/wallet-controller/src/runtime_wallet.rs index cb583997a4..4b91db6617 100644 --- a/wallet/wallet-controller/src/runtime_wallet.rs +++ b/wallet/wallet-controller/src/runtime_wallet.rs @@ -74,7 +74,10 @@ pub enum RuntimeWallet { Trezor(Wallet), } -impl RuntimeWallet { +impl RuntimeWallet +where + B: storage::BackendWithSendableTransactions + 'static, +{ pub fn find_unspent_utxo_and_destination( &self, input: &UtxoOutPoint, @@ -126,7 +129,7 @@ impl RuntimeWallet { } } - pub fn delete_seed_phrase(&self) -> Result, WalletError> { + pub fn delete_seed_phrase(&mut self) -> Result, WalletError> { match self { RuntimeWallet::Software(w) => w.delete_seed_phrase(), #[cfg(feature = "trezor")] @@ -639,7 +642,7 @@ impl RuntimeWallet { } } - pub fn issue_new_token( + pub async fn issue_new_token( &mut self, account_index: U31, token_issuance: TokenIssuance, @@ -647,23 +650,29 @@ impl RuntimeWallet { consolidate_fee_rate: FeeRate, ) -> WalletResult<(TokenId, SignedTxWithFees)> { match self { - RuntimeWallet::Software(w) => w.issue_new_token( - account_index, - token_issuance, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Software(w) => { + w.issue_new_token( + account_index, + token_issuance, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.issue_new_token( - account_index, - token_issuance, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Trezor(w) => { + w.issue_new_token( + account_index, + token_issuance, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } } } - pub fn issue_new_nft( + pub async fn issue_new_nft( &mut self, account_index: U31, address: Address, @@ -672,25 +681,31 @@ impl RuntimeWallet { consolidate_fee_rate: FeeRate, ) -> WalletResult<(TokenId, SignedTxWithFees)> { match self { - RuntimeWallet::Software(w) => w.issue_new_nft( - account_index, - address, - metadata, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Software(w) => { + w.issue_new_nft( + account_index, + address, + metadata, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.issue_new_nft( - account_index, - address, - metadata, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Trezor(w) => { + w.issue_new_nft( + account_index, + address, + metadata, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } } } - pub fn mint_tokens( + pub async fn mint_tokens( &mut self, account_index: U31, token_info: &UnconfirmedTokenInfo, @@ -700,27 +715,33 @@ impl RuntimeWallet { consolidate_fee_rate: FeeRate, ) -> Result { match self { - RuntimeWallet::Software(w) => w.mint_tokens( - account_index, - token_info, - amount, - address, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Software(w) => { + w.mint_tokens( + account_index, + token_info, + amount, + address, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.mint_tokens( - account_index, - token_info, - amount, - address, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Trezor(w) => { + w.mint_tokens( + account_index, + token_info, + amount, + address, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } } } - pub fn unmint_tokens( + pub async fn unmint_tokens( &mut self, account_index: U31, token_info: &UnconfirmedTokenInfo, @@ -729,25 +750,31 @@ impl RuntimeWallet { consolidate_fee_rate: FeeRate, ) -> Result { match self { - RuntimeWallet::Software(w) => w.unmint_tokens( - account_index, - token_info, - amount, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Software(w) => { + w.unmint_tokens( + account_index, + token_info, + amount, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.unmint_tokens( - account_index, - token_info, - amount, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Trezor(w) => { + w.unmint_tokens( + account_index, + token_info, + amount, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } } } - pub fn lock_token_supply( + pub async fn lock_token_supply( &mut self, account_index: U31, token_info: &UnconfirmedTokenInfo, @@ -755,23 +782,29 @@ impl RuntimeWallet { consolidate_fee_rate: FeeRate, ) -> Result { match self { - RuntimeWallet::Software(w) => w.lock_token_supply( - account_index, - token_info, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Software(w) => { + w.lock_token_supply( + account_index, + token_info, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.lock_token_supply( - account_index, - token_info, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Trezor(w) => { + w.lock_token_supply( + account_index, + token_info, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } } } - pub fn freeze_token( + pub async fn freeze_token( &mut self, account_index: U31, token_info: &UnconfirmedTokenInfo, @@ -780,25 +813,31 @@ impl RuntimeWallet { consolidate_fee_rate: FeeRate, ) -> Result { match self { - RuntimeWallet::Software(w) => w.freeze_token( - account_index, - token_info, - is_token_unfreezable, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Software(w) => { + w.freeze_token( + account_index, + token_info, + is_token_unfreezable, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.freeze_token( - account_index, - token_info, - is_token_unfreezable, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Trezor(w) => { + w.freeze_token( + account_index, + token_info, + is_token_unfreezable, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } } } - pub fn unfreeze_token( + pub async fn unfreeze_token( &mut self, account_index: U31, token_info: &UnconfirmedTokenInfo, @@ -806,23 +845,29 @@ impl RuntimeWallet { consolidate_fee_rate: FeeRate, ) -> Result { match self { - RuntimeWallet::Software(w) => w.unfreeze_token( - account_index, - token_info, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Software(w) => { + w.unfreeze_token( + account_index, + token_info, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.unfreeze_token( - account_index, - token_info, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Trezor(w) => { + w.unfreeze_token( + account_index, + token_info, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } } } - pub fn change_token_authority( + pub async fn change_token_authority( &mut self, account_index: U31, token_info: &UnconfirmedTokenInfo, @@ -831,25 +876,31 @@ impl RuntimeWallet { consolidate_fee_rate: FeeRate, ) -> Result { match self { - RuntimeWallet::Software(w) => w.change_token_authority( - account_index, - token_info, - address, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Software(w) => { + w.change_token_authority( + account_index, + token_info, + address, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.change_token_authority( - account_index, - token_info, - address, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Trezor(w) => { + w.change_token_authority( + account_index, + token_info, + address, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } } } - pub fn change_token_metadata_uri( + pub async fn change_token_metadata_uri( &mut self, account_index: U31, token_info: &UnconfirmedTokenInfo, @@ -858,26 +909,32 @@ impl RuntimeWallet { consolidate_fee_rate: FeeRate, ) -> Result { match self { - RuntimeWallet::Software(w) => w.change_token_metadata_uri( - account_index, - token_info, - metadata_uri, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Software(w) => { + w.change_token_metadata_uri( + account_index, + token_info, + metadata_uri, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.change_token_metadata_uri( - account_index, - token_info, - metadata_uri, - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Trezor(w) => { + w.change_token_metadata_uri( + account_index, + token_info, + metadata_uri, + current_fee_rate, + consolidate_fee_rate, + ) + .await + } } } #[allow(clippy::too_many_arguments)] - pub fn create_transaction_to_addresses( + pub async fn create_transaction_to_addresses( &mut self, account_index: U31, outputs: impl IntoIterator, @@ -888,29 +945,35 @@ impl RuntimeWallet { additional_info: TxAdditionalInfo, ) -> WalletResult { match self { - RuntimeWallet::Software(w) => w.create_transaction_to_addresses( - account_index, - outputs, - inputs, - change_addresses, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ), + RuntimeWallet::Software(w) => { + w.create_transaction_to_addresses( + account_index, + outputs, + inputs, + change_addresses, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.create_transaction_to_addresses( - account_index, - outputs, - inputs, - change_addresses, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ), + RuntimeWallet::Trezor(w) => { + w.create_transaction_to_addresses( + account_index, + outputs, + inputs, + change_addresses, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await + } } } - pub fn create_sweep_transaction( + pub async fn create_sweep_transaction( &mut self, account_index: U31, destination_address: Destination, @@ -919,21 +982,27 @@ impl RuntimeWallet { additional_info: TxAdditionalInfo, ) -> WalletResult { match self { - RuntimeWallet::Software(w) => w.create_sweep_transaction( - account_index, - destination_address, - filtered_inputs, - current_fee_rate, - additional_info, - ), + RuntimeWallet::Software(w) => { + w.create_sweep_transaction( + account_index, + destination_address, + filtered_inputs, + current_fee_rate, + additional_info, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.create_sweep_transaction( - account_index, - destination_address, - filtered_inputs, - current_fee_rate, - additional_info, - ), + RuntimeWallet::Trezor(w) => { + w.create_sweep_transaction( + account_index, + destination_address, + filtered_inputs, + current_fee_rate, + additional_info, + ) + .await + } } } @@ -949,7 +1018,7 @@ impl RuntimeWallet { } } - pub fn create_sweep_from_delegation_transaction( + pub async fn create_sweep_from_delegation_transaction( &mut self, account_index: U31, destination_address: Address, @@ -958,21 +1027,27 @@ impl RuntimeWallet { current_fee_rate: FeeRate, ) -> WalletResult { match self { - RuntimeWallet::Software(w) => w.create_sweep_from_delegation_transaction( - account_index, - destination_address, - delegation_id, - delegation_share, - current_fee_rate, - ), + RuntimeWallet::Software(w) => { + w.create_sweep_from_delegation_transaction( + account_index, + destination_address, + delegation_id, + delegation_share, + current_fee_rate, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.create_sweep_from_delegation_transaction( - account_index, - destination_address, - delegation_id, - delegation_share, - current_fee_rate, - ), + RuntimeWallet::Trezor(w) => { + w.create_sweep_from_delegation_transaction( + account_index, + destination_address, + delegation_id, + delegation_share, + current_fee_rate, + ) + .await + } } } @@ -1013,7 +1088,7 @@ impl RuntimeWallet { } } - pub fn create_delegation( + pub async fn create_delegation( &mut self, account_index: U31, output: TxOutput, @@ -1021,23 +1096,29 @@ impl RuntimeWallet { consolidate_fee_rate: FeeRate, ) -> WalletResult<(DelegationId, SignedTxWithFees)> { match self { - RuntimeWallet::Software(w) => w.create_delegation( - account_index, - vec![output], - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Software(w) => { + w.create_delegation( + account_index, + vec![output], + current_fee_rate, + consolidate_fee_rate, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.create_delegation( - account_index, - vec![output], - current_fee_rate, - consolidate_fee_rate, - ), + RuntimeWallet::Trezor(w) => { + w.create_delegation( + account_index, + vec![output], + current_fee_rate, + consolidate_fee_rate, + ) + .await + } } } - pub fn create_transaction_to_addresses_from_delegation( + pub async fn create_transaction_to_addresses_from_delegation( &mut self, account_index: U31, address: Address, @@ -1047,27 +1128,33 @@ impl RuntimeWallet { current_fee_rate: FeeRate, ) -> WalletResult { match self { - RuntimeWallet::Software(w) => w.create_transaction_to_addresses_from_delegation( - account_index, - address, - amount, - delegation_id, - delegation_share, - current_fee_rate, - ), + RuntimeWallet::Software(w) => { + w.create_transaction_to_addresses_from_delegation( + account_index, + address, + amount, + delegation_id, + delegation_share, + current_fee_rate, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.create_transaction_to_addresses_from_delegation( - account_index, - address, - amount, - delegation_id, - delegation_share, - current_fee_rate, - ), + RuntimeWallet::Trezor(w) => { + w.create_transaction_to_addresses_from_delegation( + account_index, + address, + amount, + delegation_id, + delegation_share, + current_fee_rate, + ) + .await + } } } - pub fn create_stake_pool( + pub async fn create_stake_pool( &mut self, account_index: U31, current_fee_rate: FeeRate, @@ -1075,23 +1162,29 @@ impl RuntimeWallet { stake_pool_arguments: StakePoolCreationArguments, ) -> WalletResult { match self { - RuntimeWallet::Software(w) => w.create_stake_pool( - account_index, - current_fee_rate, - consolidate_fee_rate, - stake_pool_arguments, - ), + RuntimeWallet::Software(w) => { + w.create_stake_pool( + account_index, + current_fee_rate, + consolidate_fee_rate, + stake_pool_arguments, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.create_stake_pool_with_vrf_key( - account_index, - current_fee_rate, - consolidate_fee_rate, - stake_pool_arguments, - ), + RuntimeWallet::Trezor(w) => { + w.create_stake_pool_with_vrf_key( + account_index, + current_fee_rate, + consolidate_fee_rate, + stake_pool_arguments, + ) + .await + } } } - pub fn decommission_stake_pool( + pub async fn decommission_stake_pool( &mut self, account_index: U31, pool_id: PoolId, @@ -1100,25 +1193,31 @@ impl RuntimeWallet { current_fee_rate: FeeRate, ) -> WalletResult { match self { - RuntimeWallet::Software(w) => w.decommission_stake_pool( - account_index, - pool_id, - staker_balance, - output_address, - current_fee_rate, - ), + RuntimeWallet::Software(w) => { + w.decommission_stake_pool( + account_index, + pool_id, + staker_balance, + output_address, + current_fee_rate, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.decommission_stake_pool( - account_index, - pool_id, - staker_balance, - output_address, - current_fee_rate, - ), + RuntimeWallet::Trezor(w) => { + w.decommission_stake_pool( + account_index, + pool_id, + staker_balance, + output_address, + current_fee_rate, + ) + .await + } } } - pub fn decommission_stake_pool_request( + pub async fn decommission_stake_pool_request( &mut self, account_index: U31, pool_id: PoolId, @@ -1127,25 +1226,31 @@ impl RuntimeWallet { current_fee_rate: FeeRate, ) -> WalletResult { match self { - RuntimeWallet::Software(w) => w.decommission_stake_pool_request( - account_index, - pool_id, - staker_balance, - output_address, - current_fee_rate, - ), + RuntimeWallet::Software(w) => { + w.decommission_stake_pool_request( + account_index, + pool_id, + staker_balance, + output_address, + current_fee_rate, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.decommission_stake_pool_request( - account_index, - pool_id, - staker_balance, - output_address, - current_fee_rate, - ), + RuntimeWallet::Trezor(w) => { + w.decommission_stake_pool_request( + account_index, + pool_id, + staker_balance, + output_address, + current_fee_rate, + ) + .await + } } } - pub fn create_htlc_tx( + pub async fn create_htlc_tx( &mut self, account_index: U31, output_value: OutputValue, @@ -1155,28 +1260,34 @@ impl RuntimeWallet { additional_info: TxAdditionalInfo, ) -> WalletResult { match self { - RuntimeWallet::Software(w) => w.create_htlc_tx( - account_index, - output_value, - htlc, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ), + RuntimeWallet::Software(w) => { + w.create_htlc_tx( + account_index, + output_value, + htlc, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.create_htlc_tx( - account_index, - output_value, - htlc, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ), + RuntimeWallet::Trezor(w) => { + w.create_htlc_tx( + account_index, + output_value, + htlc, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await + } } } #[allow(clippy::too_many_arguments)] - pub fn create_order_tx( + pub async fn create_order_tx( &mut self, account_index: U31, ask_value: OutputValue, @@ -1187,30 +1298,36 @@ impl RuntimeWallet { additional_info: TxAdditionalInfo, ) -> WalletResult<(OrderId, SignedTxWithFees)> { match self { - RuntimeWallet::Software(w) => w.create_order_tx( - account_index, - ask_value, - give_value, - conclude_key, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ), + RuntimeWallet::Software(w) => { + w.create_order_tx( + account_index, + ask_value, + give_value, + conclude_key, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.create_order_tx( - account_index, - ask_value, - give_value, - conclude_key, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ), + RuntimeWallet::Trezor(w) => { + w.create_order_tx( + account_index, + ask_value, + give_value, + conclude_key, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await + } } } #[allow(clippy::too_many_arguments)] - pub fn create_conclude_order_tx( + pub async fn create_conclude_order_tx( &mut self, account_index: U31, order_id: OrderId, @@ -1221,30 +1338,36 @@ impl RuntimeWallet { additional_info: TxAdditionalInfo, ) -> WalletResult { match self { - RuntimeWallet::Software(w) => w.create_conclude_order_tx( - account_index, - order_id, - order_info, - output_address, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ), + RuntimeWallet::Software(w) => { + w.create_conclude_order_tx( + account_index, + order_id, + order_info, + output_address, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.create_conclude_order_tx( - account_index, - order_id, - order_info, - output_address, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ), + RuntimeWallet::Trezor(w) => { + w.create_conclude_order_tx( + account_index, + order_id, + order_info, + output_address, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await + } } } #[allow(clippy::too_many_arguments)] - pub fn create_fill_order_tx( + pub async fn create_fill_order_tx( &mut self, account_index: U31, order_id: OrderId, @@ -1256,31 +1379,37 @@ impl RuntimeWallet { additional_info: TxAdditionalInfo, ) -> WalletResult { match self { - RuntimeWallet::Software(w) => w.create_fill_order_tx( - account_index, - order_id, - order_info, - fill_amount_in_ask_currency, - output_address, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ), + RuntimeWallet::Software(w) => { + w.create_fill_order_tx( + account_index, + order_id, + order_info, + fill_amount_in_ask_currency, + output_address, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.create_fill_order_tx( - account_index, - order_id, - order_info, - fill_amount_in_ask_currency, - output_address, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ), + RuntimeWallet::Trezor(w) => { + w.create_fill_order_tx( + account_index, + order_id, + order_info, + fill_amount_in_ask_currency, + output_address, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await + } } } - pub fn create_freeze_order_tx( + pub async fn create_freeze_order_tx( &mut self, account_index: U31, order_id: OrderId, @@ -1290,27 +1419,33 @@ impl RuntimeWallet { additional_info: TxAdditionalInfo, ) -> WalletResult { match self { - RuntimeWallet::Software(w) => w.create_freeze_order_tx( - account_index, - order_id, - order_info, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ), + RuntimeWallet::Software(w) => { + w.create_freeze_order_tx( + account_index, + order_id, + order_info, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.create_freeze_order_tx( - account_index, - order_id, - order_info, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ), + RuntimeWallet::Trezor(w) => { + w.create_freeze_order_tx( + account_index, + order_id, + order_info, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await + } } } - pub fn sign_raw_transaction( + pub async fn sign_raw_transaction( &mut self, account_index: U31, ptx: PartiallySignedTransaction, @@ -1322,30 +1457,34 @@ impl RuntimeWallet { )> { match self { RuntimeWallet::Software(w) => { - w.sign_raw_transaction(account_index, ptx, tokens_additional_info) + w.sign_raw_transaction(account_index, ptx, tokens_additional_info).await } #[cfg(feature = "trezor")] RuntimeWallet::Trezor(w) => { - w.sign_raw_transaction(account_index, ptx, tokens_additional_info) + w.sign_raw_transaction(account_index, ptx, tokens_additional_info).await } } } - pub fn sign_challenge( + pub async fn sign_challenge( &mut self, account_index: U31, challenge: &[u8], destination: &Destination, ) -> WalletResult { match self { - RuntimeWallet::Software(w) => w.sign_challenge(account_index, challenge, destination), + RuntimeWallet::Software(w) => { + w.sign_challenge(account_index, challenge, destination).await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.sign_challenge(account_index, challenge, destination), + RuntimeWallet::Trezor(w) => { + w.sign_challenge(account_index, challenge, destination).await + } } } #[allow(clippy::too_many_arguments)] - pub fn create_transaction_to_addresses_with_intent( + pub async fn create_transaction_to_addresses_with_intent( &mut self, account_index: U31, outputs: impl IntoIterator, @@ -1357,27 +1496,33 @@ impl RuntimeWallet { additional_info: TxAdditionalInfo, ) -> WalletResult<(SignedTxWithFees, SignedTransactionIntent)> { match self { - RuntimeWallet::Software(w) => w.create_transaction_to_addresses_with_intent( - account_index, - outputs, - inputs, - change_addresses, - intent, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ), + RuntimeWallet::Software(w) => { + w.create_transaction_to_addresses_with_intent( + account_index, + outputs, + inputs, + change_addresses, + intent, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await + } #[cfg(feature = "trezor")] - RuntimeWallet::Trezor(w) => w.create_transaction_to_addresses_with_intent( - account_index, - outputs, - inputs, - change_addresses, - intent, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ), + RuntimeWallet::Trezor(w) => { + w.create_transaction_to_addresses_with_intent( + account_index, + outputs, + inputs, + change_addresses, + intent, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await + } } } diff --git a/wallet/wallet-controller/src/sync/mod.rs b/wallet/wallet-controller/src/sync/mod.rs index 453f425209..bfe7c5af4b 100644 --- a/wallet/wallet-controller/src/sync/mod.rs +++ b/wallet/wallet-controller/src/sync/mod.rs @@ -55,7 +55,7 @@ pub trait SyncingWallet { impl SyncingWallet for Wallet where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, P: SignerProvider, { fn syncing_state(&self) -> WalletSyncingState { diff --git a/wallet/wallet-controller/src/synced_controller.rs b/wallet/wallet-controller/src/synced_controller.rs index f11b5b7a17..22698e55f2 100644 --- a/wallet/wallet-controller/src/synced_controller.rs +++ b/wallet/wallet-controller/src/synced_controller.rs @@ -93,7 +93,7 @@ pub struct SyncedController<'a, T, W, B: storage::Backend + 'static> { impl<'a, T, W, B> SyncedController<'a, T, W, B> where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, T: NodeInterface, W: WalletEvents, { @@ -297,23 +297,25 @@ where is_freezable: IsTokenFreezable, ) -> Result<(NewTransaction, TokenId), ControllerError> { self.create_and_send_tx_with_id( - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.issue_new_token( - account_index, - TokenIssuance::V1(TokenIssuanceV1 { - token_ticker, - number_of_decimals, - metadata_uri, - total_supply: token_total_supply, - authority: address.into_object(), - is_freezable, - }), - current_fee_rate, - consolidate_fee_rate, - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .issue_new_token( + account_index, + TokenIssuance::V1(TokenIssuanceV1 { + token_ticker, + number_of_decimals, + metadata_uri, + total_supply: token_total_supply, + authority: address.into_object(), + is_freezable, + }), + current_fee_rate, + consolidate_fee_rate, + ) + .await }, ) .await @@ -325,17 +327,19 @@ where metadata: Metadata, ) -> Result<(NewTransaction, TokenId), ControllerError> { self.create_and_send_tx_with_id( - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.issue_new_nft( - account_index, - address, - metadata, - current_fee_rate, - consolidate_fee_rate, - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .issue_new_nft( + account_index, + address, + metadata, + current_fee_rate, + consolidate_fee_rate, + ) + .await }, ) .await @@ -349,20 +353,23 @@ where ) -> Result> { self.create_and_send_token_tx( token_info, - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31, - token_info: &UnconfirmedTokenInfo| { + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31, + token_info: &UnconfirmedTokenInfo| { token_info.check_can_be_used()?; - wallet.mint_tokens( - account_index, - token_info, - amount, - address, - current_fee_rate, - consolidate_fee_rate, - ) + + wallet + .mint_tokens( + account_index, + token_info, + amount, + address, + current_fee_rate, + consolidate_fee_rate, + ) + .await }, ) .await @@ -374,19 +381,22 @@ where ) -> Result> { self.create_and_send_token_tx( token_info, - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31, - token_info: &UnconfirmedTokenInfo| { + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31, + token_info: &UnconfirmedTokenInfo| { token_info.check_can_be_used()?; - wallet.unmint_tokens( - account_index, - token_info, - amount, - current_fee_rate, - consolidate_fee_rate, - ) + + wallet + .unmint_tokens( + account_index, + token_info, + amount, + current_fee_rate, + consolidate_fee_rate, + ) + .await }, ) .await @@ -398,18 +408,21 @@ where ) -> Result> { self.create_and_send_token_tx( token_info, - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31, - token_info: &UnconfirmedTokenInfo| { + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31, + token_info: &UnconfirmedTokenInfo| { token_info.check_can_be_used()?; - wallet.lock_token_supply( - account_index, - token_info, - current_fee_rate, - consolidate_fee_rate, - ) + + wallet + .lock_token_supply( + account_index, + token_info, + current_fee_rate, + consolidate_fee_rate, + ) + .await }, ) .await @@ -424,18 +437,22 @@ where ) -> Result> { self.create_and_send_token_tx( token_info, - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31, - token_info: &UnconfirmedTokenInfo| { - wallet.freeze_token( - account_index, - token_info, - is_token_unfreezable, - current_fee_rate, - consolidate_fee_rate, - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31, + token_info: &UnconfirmedTokenInfo| { + token_info.check_can_be_used()?; + + wallet + .freeze_token( + account_index, + token_info, + is_token_unfreezable, + current_fee_rate, + consolidate_fee_rate, + ) + .await }, ) .await @@ -448,17 +465,19 @@ where ) -> Result> { self.create_and_send_token_tx( token_info, - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31, - token_info: &UnconfirmedTokenInfo| { - wallet.unfreeze_token( - account_index, - token_info, - current_fee_rate, - consolidate_fee_rate, - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31, + token_info: &UnconfirmedTokenInfo| { + wallet + .unfreeze_token( + account_index, + token_info, + current_fee_rate, + consolidate_fee_rate, + ) + .await }, ) .await @@ -473,18 +492,22 @@ where ) -> Result> { self.create_and_send_token_tx( token_info, - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31, - token_info: &UnconfirmedTokenInfo| { - wallet.change_token_authority( - account_index, - token_info, - address, - current_fee_rate, - consolidate_fee_rate, - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31, + token_info: &UnconfirmedTokenInfo| { + token_info.check_can_be_used()?; + + wallet + .change_token_authority( + account_index, + token_info, + address, + current_fee_rate, + consolidate_fee_rate, + ) + .await }, ) .await @@ -497,18 +520,22 @@ where ) -> Result> { self.create_and_send_token_tx( token_info, - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31, - token_info: &UnconfirmedTokenInfo| { - wallet.change_token_metadata_uri( - account_index, - token_info, - metadata_uri, - current_fee_rate, - consolidate_fee_rate, - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31, + token_info: &UnconfirmedTokenInfo| { + token_info.check_can_be_used()?; + + wallet + .change_token_metadata_uri( + account_index, + token_info, + metadata_uri, + current_fee_rate, + consolidate_fee_rate, + ) + .await }, ) .await @@ -522,19 +549,21 @@ where let outputs = make_data_deposit_output(self.chain_config, data, best_block_height)?; self.create_and_send_tx( - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.create_transaction_to_addresses( - account_index, - outputs, - SelectedInputs::Utxos(vec![]), - BTreeMap::new(), - current_fee_rate, - consolidate_fee_rate, - TxAdditionalInfo::new(), - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .create_transaction_to_addresses( + account_index, + outputs, + SelectedInputs::Utxos(vec![]), + BTreeMap::new(), + current_fee_rate, + consolidate_fee_rate, + TxAdditionalInfo::new(), + ) + .await }, ) .await @@ -560,19 +589,21 @@ where let output = make_address_output(address.into_object(), amount); self.create_and_send_tx( - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.create_transaction_to_addresses( - account_index, - [output], - SelectedInputs::Utxos(selected_utxos), - BTreeMap::new(), - current_fee_rate, - consolidate_fee_rate, - TxAdditionalInfo::new(), - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .create_transaction_to_addresses( + account_index, + [output], + SelectedInputs::Utxos(selected_utxos), + BTreeMap::new(), + current_fee_rate, + consolidate_fee_rate, + TxAdditionalInfo::new(), + ) + .await }, ) .await @@ -604,20 +635,22 @@ where .collect::>(); self.create_and_send_tx( - move |current_fee_rate: FeeRate, - _consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.create_sweep_transaction( - account_index, - destination_address, - filtered_inputs, - current_fee_rate, - TxAdditionalInfo { - ptx_additional_info: PtxAdditionalInfo::new(), - tokens_additional_info, - }, - ) + async move |current_fee_rate: FeeRate, + _consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .create_sweep_transaction( + account_index, + destination_address, + filtered_inputs, + current_fee_rate, + TxAdditionalInfo { + ptx_additional_info: PtxAdditionalInfo::new(), + tokens_additional_info, + }, + ) + .await }, ) .await @@ -646,17 +679,19 @@ where ))?; self.create_and_send_tx( - move |current_fee_rate: FeeRate, - _consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.create_sweep_from_delegation_transaction( - account_index, - destination_address, - delegation_id, - delegation_share, - current_fee_rate, - ) + async move |current_fee_rate: FeeRate, + _consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .create_sweep_from_delegation_transaction( + account_index, + destination_address, + delegation_id, + delegation_share, + current_fee_rate, + ) + .await }, ) .await @@ -867,16 +902,18 @@ where ) -> Result<(NewTransaction, DelegationId), ControllerError> { let output = make_create_delegation_output(address, pool_id); self.create_and_send_tx_with_id( - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.create_delegation( - account_index, - output, - current_fee_rate, - consolidate_fee_rate, - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .create_delegation( + account_index, + output, + current_fee_rate, + consolidate_fee_rate, + ) + .await }, ) .await @@ -891,19 +928,21 @@ where ) -> Result> { let output = TxOutput::DelegateStaking(amount, delegation_id); self.create_and_send_tx( - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.create_transaction_to_addresses( - account_index, - [output], - SelectedInputs::Utxos(vec![]), - BTreeMap::new(), - current_fee_rate, - consolidate_fee_rate, - TxAdditionalInfo::new(), - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .create_transaction_to_addresses( + account_index, + [output], + SelectedInputs::Utxos(vec![]), + BTreeMap::new(), + current_fee_rate, + consolidate_fee_rate, + TxAdditionalInfo::new(), + ) + .await }, ) .await @@ -929,18 +968,20 @@ where ))?; self.create_and_send_tx( - move |current_fee_rate: FeeRate, - _consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.create_transaction_to_addresses_from_delegation( - account_index, - address, - amount, - delegation_id, - delegation_share, - current_fee_rate, - ) + async move |current_fee_rate: FeeRate, + _consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .create_transaction_to_addresses_from_delegation( + account_index, + address, + amount, + delegation_id, + delegation_share, + current_fee_rate, + ) + .await }, ) .await @@ -958,11 +999,11 @@ where make_address_output_token(address.into_object(), amount, token_info.token_id()); self.create_and_send_token_tx( token_info, - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31, - token_info: &UnconfirmedTokenInfo| { + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31, + token_info: &UnconfirmedTokenInfo| { token_info.check_can_be_used()?; let additional_info = TxAdditionalInfo::new().with_token_info( token_info.token_id(), @@ -971,15 +1012,17 @@ where ticker: token_info.token_ticker().to_vec(), }, ); - wallet.create_transaction_to_addresses( - account_index, - [output], - SelectedInputs::Utxos(vec![]), - BTreeMap::new(), - current_fee_rate, - consolidate_fee_rate, - additional_info, - ) + wallet + .create_transaction_to_addresses( + account_index, + [output], + SelectedInputs::Utxos(vec![]), + BTreeMap::new(), + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await }, ) .await @@ -997,11 +1040,11 @@ where make_address_output_token(address.into_object(), amount, token_info.token_id()); self.create_token_tx( token_info, - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31, - token_info: &UnconfirmedTokenInfo| { + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31, + token_info: &UnconfirmedTokenInfo| { token_info.check_can_be_used()?; let additional_info = TxAdditionalInfo::new().with_token_info( token_info.token_id(), @@ -1010,16 +1053,18 @@ where ticker: token_info.token_ticker().to_vec(), }, ); - wallet.create_transaction_to_addresses_with_intent( - account_index, - [output], - SelectedInputs::Utxos(vec![]), - BTreeMap::new(), - intent, - current_fee_rate, - consolidate_fee_rate, - additional_info, - ) + wallet + .create_transaction_to_addresses_with_intent( + account_index, + [output], + SelectedInputs::Utxos(vec![]), + BTreeMap::new(), + intent, + current_fee_rate, + consolidate_fee_rate, + additional_info, + ) + .await }, ) .await @@ -1036,23 +1081,25 @@ where vrf_public_key: Option, ) -> Result> { self.create_and_send_tx( - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.create_stake_pool( - account_index, - current_fee_rate, - consolidate_fee_rate, - StakePoolCreationArguments { - amount, - margin_ratio_per_thousand, - cost_per_block, - decommission_key, - staker_key, - vrf_public_key, - }, - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .create_stake_pool( + account_index, + current_fee_rate, + consolidate_fee_rate, + StakePoolCreationArguments { + amount, + margin_ratio_per_thousand, + cost_per_block, + decommission_key, + staker_key, + vrf_public_key, + }, + ) + .await }, ) .await @@ -1074,17 +1121,19 @@ where )))?; self.create_and_send_tx( - move |current_fee_rate: FeeRate, - _consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.decommission_stake_pool( - account_index, - pool_id, - staker_balance, - output_address, - current_fee_rate, - ) + async move |current_fee_rate: FeeRate, + _consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .decommission_stake_pool( + account_index, + pool_id, + staker_balance, + output_address, + current_fee_rate, + ) + .await }, ) .await @@ -1115,6 +1164,7 @@ where output_address, current_fee_rate, ) + .await .map_err(ControllerError::WalletError) } @@ -1136,14 +1186,17 @@ where let (current_fee_rate, consolidate_fee_rate) = self.get_current_and_consolidation_fee_rate().await?; - let SignedTxWithFees { tx, fees } = self.wallet.create_htlc_tx( - self.account_index, - output_value, - htlc, - current_fee_rate, - consolidate_fee_rate, - tx_additional_info, - )?; + let SignedTxWithFees { tx, fees } = self + .wallet + .create_htlc_tx( + self.account_index, + output_value, + htlc, + current_fee_rate, + consolidate_fee_rate, + tx_additional_info, + ) + .await?; let fees = into_balances(&self.rpc_client, self.chain_config, fees).await?; @@ -1179,19 +1232,21 @@ where let give_value = convert_value(give_value).await?; self.create_and_send_tx_with_id( - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.create_order_tx( - account_index, - ask_value, - give_value, - conclude_key, - current_fee_rate, - consolidate_fee_rate, - tx_additional_info, - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .create_order_tx( + account_index, + ask_value, + give_value, + conclude_key, + current_fee_rate, + consolidate_fee_rate, + tx_additional_info, + ) + .await }, ) .await @@ -1207,19 +1262,21 @@ where self.additional_info_for_order_update_tx(order_id, &order_info).await?; self.create_and_send_tx( - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.create_conclude_order_tx( - account_index, - order_id, - order_info, - output_address, - current_fee_rate, - consolidate_fee_rate, - tx_additional_info, - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .create_conclude_order_tx( + account_index, + order_id, + order_info, + output_address, + current_fee_rate, + consolidate_fee_rate, + tx_additional_info, + ) + .await }, ) .await @@ -1250,20 +1307,22 @@ where .ok_or(ControllerError::InvalidCoinAmount)?; self.create_and_send_tx( - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.create_fill_order_tx( - account_index, - order_id, - order_info, - fill_amount_in_ask_currency, - output_address, - current_fee_rate, - consolidate_fee_rate, - tx_additional_info, - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .create_fill_order_tx( + account_index, + order_id, + order_info, + fill_amount_in_ask_currency, + output_address, + current_fee_rate, + consolidate_fee_rate, + tx_additional_info, + ) + .await }, ) .await @@ -1278,18 +1337,20 @@ where self.additional_info_for_order_update_tx(order_id, &order_info).await?; self.create_and_send_tx( - move |current_fee_rate: FeeRate, - consolidate_fee_rate: FeeRate, - wallet: &mut RuntimeWallet, - account_index: U31| { - wallet.create_freeze_order_tx( - account_index, - order_id, - order_info, - current_fee_rate, - consolidate_fee_rate, - tx_additional_info, - ) + async move |current_fee_rate: FeeRate, + consolidate_fee_rate: FeeRate, + wallet: &mut RuntimeWallet, + account_index: U31| { + wallet + .create_freeze_order_tx( + account_index, + order_id, + order_info, + current_fee_rate, + consolidate_fee_rate, + tx_additional_info, + ) + .await }, ) .await @@ -1396,16 +1457,18 @@ where self.wallet .sign_raw_transaction(self.account_index, ptx, &tokens_additional_info) + .await .map_err(ControllerError::WalletError) } - pub fn sign_challenge( + pub async fn sign_challenge( &mut self, challenge: &[u8], destination: &Destination, ) -> Result> { self.wallet .sign_challenge(self.account_index, challenge, destination) + .await .map_err(ControllerError::WalletError) } @@ -1464,12 +1527,17 @@ where } /// Create a transaction and broadcast it if needed - async fn create_and_send_tx( + async fn create_and_send_tx( &mut self, - tx_maker: F, + tx_maker: Fun, ) -> Result> where - F: FnOnce(FeeRate, FeeRate, &mut RuntimeWallet, U31) -> Result, + Fun: AsyncFnOnce( + FeeRate, + FeeRate, + &mut RuntimeWallet, + U31, + ) -> Result, ControllerError: From, { let (current_fee_rate, consolidate_fee_rate) = @@ -1480,7 +1548,8 @@ where consolidate_fee_rate, self.wallet, self.account_index, - )?; + ) + .await?; let (tx, broadcasted) = self.broadcast_to_mempool_if_needed(tx).await?; let fees = into_balances(&self.rpc_client, self.chain_config, fees).await?; @@ -1499,7 +1568,7 @@ where tx_maker: F, ) -> Result> where - F: FnOnce( + F: AsyncFnOnce( FeeRate, FeeRate, &mut RuntimeWallet, @@ -1519,6 +1588,7 @@ where self.account_index, &token_freezable_info, ) + .await .map_err(ControllerError::WalletError)?; Ok(tx) @@ -1526,19 +1596,20 @@ where /// Create and broadcast a transaction that uses a token, /// check if that token can be used i.e. not frozen. - async fn create_and_send_token_tx< - F: FnOnce( + async fn create_and_send_token_tx( + &mut self, + token_info: RPCTokenInfo, + tx_maker: F, + ) -> Result> + where + F: AsyncFnOnce( FeeRate, FeeRate, &mut RuntimeWallet, U31, &UnconfirmedTokenInfo, ) -> WalletResult, - >( - &mut self, - token_info: RPCTokenInfo, - tx_maker: F, - ) -> Result> { + { let SignedTxWithFees { tx, fees } = self.create_token_tx(token_info, tx_maker).await?; let (tx, broadcasted) = self.broadcast_to_mempool_if_needed(tx).await?; let fees = into_balances(&self.rpc_client, self.chain_config, fees).await?; @@ -1569,7 +1640,7 @@ where /// e.g. newly issued token, nft or delegation id async fn create_and_send_tx_with_id< ID, - F: FnOnce( + F: AsyncFnOnce( FeeRate, FeeRate, &mut RuntimeWallet, @@ -1588,6 +1659,7 @@ where self.wallet, self.account_index, ) + .await .map_err(ControllerError::WalletError)?; let (tx, broadcasted) = self.broadcast_to_mempool_if_needed(tx).await?; diff --git a/wallet/wallet-controller/src/tests/test_utils.rs b/wallet/wallet-controller/src/tests/test_utils.rs index 7f253a8108..9c1e4162a5 100644 --- a/wallet/wallet-controller/src/tests/test_utils.rs +++ b/wallet/wallet-controller/src/tests/test_utils.rs @@ -171,7 +171,7 @@ pub fn create_block_scan_wallet( block_height: u64, ) -> Block where - B: storage::Backend + 'static, + B: storage::BackendWithSendableTransactions + 'static, P: SignerProvider, { let block = Block::new( diff --git a/wallet/wallet-rpc-lib/src/rpc/mod.rs b/wallet/wallet-rpc-lib/src/rpc/mod.rs index 78b1c20e58..bbe69eb854 100644 --- a/wallet/wallet-rpc-lib/src/rpc/mod.rs +++ b/wallet/wallet-rpc-lib/src/rpc/mod.rs @@ -902,6 +902,7 @@ where .synced_controller(account_index, config) .await? .sign_challenge(&challenge, &destination) + .await .map_err(RpcError::Controller) }) })