From b4f421a512d17e6ae9415d961aa3fe538b253d6a Mon Sep 17 00:00:00 2001 From: LLFourn Date: Thu, 3 Nov 2022 15:59:38 +0800 Subject: [PATCH] =?UTF-8?q?bdk=5Fcore=20integration=20initial=20commit=20?= =?UTF-8?q?=F0=9F=94=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We prepare the BDK repo for a major restructuring. - database modules removed - blockchain module removed - minimal API changes. - Many macros removed. - no longer applicable examples removed. - Much conditional compilation removed. Can compile with --all-features. --- .github/workflows/code_coverage.yml | 8 +- .github/workflows/cont_integration.yml | 59 +- .github/workflows/nightly_docs.yml | 2 +- Cargo.toml | 105 +- README.md | 8 +- bdk_test_client/Cargo.toml | 19 + bdk_test_client/src/lib.rs | 295 ++ examples/compact_filters_balance.rs | 41 - examples/compiler.rs | 7 +- examples/esplora.rs | 112 + examples/hardware_signer.rs | 3 +- examples/psbt_signer.rs | 10 +- examples/rpcwallet.rs | 229 -- macros/Cargo.toml | 24 - macros/src/lib.rs | 146 - src/blockchain/any.rs | 248 -- src/blockchain/compact_filters/mod.rs | 594 ---- src/blockchain/compact_filters/peer.rs | 576 ---- src/blockchain/compact_filters/store.rs | 836 ------ src/blockchain/compact_filters/sync.rs | 297 -- src/blockchain/electrum.rs | 430 --- src/blockchain/esplora/async.rs | 250 -- src/blockchain/esplora/blocking.rs | 238 -- src/blockchain/esplora/mod.rs | 130 - src/blockchain/mod.rs | 393 --- src/blockchain/rpc.rs | 1000 ------- src/blockchain/script_sync.rs | 467 --- src/database/any.rs | 427 --- src/database/keyvalue.rs | 535 ---- src/database/memory.rs | 690 ----- src/database/mod.rs | 657 ----- src/database/sqlite.rs | 1147 -------- src/descriptor/mod.rs | 8 +- src/descriptor/spk_iter.rs | 63 + src/descriptor/template.rs | 24 +- src/error.rs | 58 - src/lib.rs | 77 +- src/psbt/mod.rs | 34 +- src/testutils/blockchain_tests.rs | 1480 ---------- .../configurable_blockchain_tests.rs | 257 -- src/testutils/mod.rs | 233 -- src/types.rs | 143 +- src/wallet/coin_selection.rs | 255 +- src/wallet/export.rs | 129 +- src/wallet/mod.rs | 2612 +++++++---------- src/wallet/time.rs | 73 - src/wallet/tx_builder.rs | 63 +- 47 files changed, 1693 insertions(+), 13799 deletions(-) create mode 100644 bdk_test_client/Cargo.toml create mode 100644 bdk_test_client/src/lib.rs delete mode 100644 examples/compact_filters_balance.rs create mode 100644 examples/esplora.rs delete mode 100644 examples/rpcwallet.rs delete mode 100644 macros/Cargo.toml delete mode 100644 macros/src/lib.rs delete mode 100644 src/blockchain/any.rs delete mode 100644 src/blockchain/compact_filters/mod.rs delete mode 100644 src/blockchain/compact_filters/peer.rs delete mode 100644 src/blockchain/compact_filters/store.rs delete mode 100644 src/blockchain/compact_filters/sync.rs delete mode 100644 src/blockchain/electrum.rs delete mode 100644 src/blockchain/esplora/async.rs delete mode 100644 src/blockchain/esplora/blocking.rs delete mode 100644 src/blockchain/esplora/mod.rs delete mode 100644 src/blockchain/mod.rs delete mode 100644 src/blockchain/rpc.rs delete mode 100644 src/blockchain/script_sync.rs delete mode 100644 src/database/any.rs delete mode 100644 src/database/keyvalue.rs delete mode 100644 src/database/memory.rs delete mode 100644 src/database/mod.rs delete mode 100644 src/database/sqlite.rs create mode 100644 src/descriptor/spk_iter.rs delete mode 100644 src/testutils/blockchain_tests.rs delete mode 100644 src/testutils/configurable_blockchain_tests.rs delete mode 100644 src/testutils/mod.rs delete mode 100644 src/wallet/time.rs diff --git a/.github/workflows/code_coverage.yml b/.github/workflows/code_coverage.yml index b53c47a638..8236bedeae 100644 --- a/.github/workflows/code_coverage.yml +++ b/.github/workflows/code_coverage.yml @@ -38,13 +38,7 @@ jobs: - name: Install grcov run: if [[ ! -e ~/.cargo/bin/grcov ]]; then cargo install grcov; fi - name: Test - # WARNING: this is not testing the following features: test-esplora, test-hardware-signer, async-interface - # This is because some of our features are mutually exclusive, and generating various reports and - # merging them doesn't seem to be working very well. - # For more info, see: - # - https://github.com/bitcoindevkit/bdk/issues/696 - # - https://github.com/bitcoindevkit/bdk/pull/748#issuecomment-1242721040 - run: cargo test --features all-keys,compact_filters,compiler,key-value-db,sqlite,sqlite-bundled,test-electrum,test-rpc,verify + run: cargo test --all-features - name: Run grcov run: mkdir coverage; grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore '/*' -o ./coverage/lcov.info - name: Generate HTML coverage report diff --git a/.github/workflows/cont_integration.yml b/.github/workflows/cont_integration.yml index c82e8d1eb9..58a4dc94a0 100644 --- a/.github/workflows/cont_integration.yml +++ b/.github/workflows/cont_integration.yml @@ -14,21 +14,9 @@ jobs: clippy: true - version: 1.56.1 # MSRV features: - - default - - minimal - - all-keys - - minimal,use-esplora-blocking - - key-value-db - - electrum - - compact_filters - - use-esplora-blocking,key-value-db,electrum - - compiler - - rpc - - verify - - async-interface - - use-esplora-async - - sqlite - - sqlite-bundled + - --no-default-features + - --all-features + - --features=default steps: - name: checkout uses: actions/checkout@v2 @@ -82,47 +70,6 @@ jobs: - name: Test run: cargo test --features test-md-docs --no-default-features -- doctest::ReadmeDoctests - test-blockchains: - name: Blockchain ${{ matrix.blockchain.features }} - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - blockchain: - - name: electrum - testprefix: blockchain::electrum::test - features: test-electrum,verify - - name: rpc - testprefix: blockchain::rpc::test - features: test-rpc - - name: rpc-legacy - testprefix: blockchain::rpc::test - features: test-rpc-legacy - - name: esplora - testprefix: esplora - features: test-esplora,use-esplora-async,verify - - name: esplora - testprefix: esplora - features: test-esplora,use-esplora-blocking,verify - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Cache - uses: actions/cache@v2 - with: - path: | - ~/.cargo/registry - ~/.cargo/git - target - key: ${{ runner.os }}-cargo-${{ github.job }}-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }} - - name: Setup rust toolchain - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - name: Test - run: cargo test --no-default-features --features ${{ matrix.blockchain.features }} ${{ matrix.blockchain.testprefix }}::bdk_blockchain_tests - check-wasm: name: Check WASM runs-on: ubuntu-20.04 diff --git a/.github/workflows/nightly_docs.yml b/.github/workflows/nightly_docs.yml index 1b07937d96..1e3f613946 100644 --- a/.github/workflows/nightly_docs.yml +++ b/.github/workflows/nightly_docs.yml @@ -24,7 +24,7 @@ jobs: - name: Update toolchain run: rustup update - name: Build docs - run: cargo rustdoc --verbose --features=compiler,electrum,esplora,use-esplora-blocking,compact_filters,rpc,key-value-db,sqlite,all-keys,verify,hardware-signer -- --cfg docsrs -Dwarnings + run: cargo rustdoc --verbose --all-features -- --cfg docsrs -Dwarnings - name: Upload artifact uses: actions/upload-artifact@v2 with: diff --git a/Cargo.toml b/Cargo.toml index 337c47c677..6810da598b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,34 +10,21 @@ description = "A modern, lightweight, descriptor-based wallet library" keywords = ["bitcoin", "wallet", "descriptor", "psbt"] readme = "README.md" license = "MIT OR Apache-2.0" +# TODO: remove this when examples all work +autoexamples = false [dependencies] -bdk-macros = "^0.6" log = "^0.4" -miniscript = { version = "8.0", features = ["serde"] } -bitcoin = { version = "0.29.1", features = ["serde", "base64", "rand"] } +miniscript = { version = "9.0.0", features = ["serde"] } +bitcoin = { version = "0.29" , features = ["serde", "base64", "rand"]} serde = { version = "^1.0", features = ["derive"] } serde_json = { version = "^1.0" } +bdk_chain = { git = "https://github.com/LLFourn/bdk_core_staging.git", rev = "666ef9acdeb09af9d220a267f4784929886b09a6", features = ["miniscript", "serde"] } rand = "^0.8" # Optional dependencies -sled = { version = "0.34", optional = true } -electrum-client = { version = "0.12", optional = true } -esplora-client = { version = "0.3", default-features = false, optional = true } -rusqlite = { version = "0.27.0", optional = true } -ahash = { version = "0.7.6", optional = true } -futures = { version = "0.3", optional = true } -async-trait = { version = "0.1", optional = true } -rocksdb = { version = "0.14", default-features = false, features = ["snappy"], optional = true } -cc = { version = ">=1.0.64", optional = true } -socks = { version = "0.3", optional = true } hwi = { version = "0.4.0", optional = true, features = [ "use-miniscript"] } - bip39 = { version = "1.0.1", optional = true } -bitcoinconsensus = { version = "0.19.0-3", optional = true } - -# Needed by bdk_blockchain_tests macro and the `rpc` feature -bitcoincore-rpc = { version = "0.16", optional = true } # Platform-specific dependencies [target.'cfg(not(target_arch = "wasm32"))'.dependencies] @@ -45,57 +32,16 @@ tokio = { version = "1", features = ["rt", "macros"] } [target.'cfg(target_arch = "wasm32")'.dependencies] getrandom = "0.2" -async-trait = "0.1" js-sys = "0.3" [features] -minimal = [] compiler = ["miniscript/compiler"] -verify = ["bitcoinconsensus"] -default = ["key-value-db", "electrum"] -sqlite = ["rusqlite", "ahash"] -sqlite-bundled = ["sqlite", "rusqlite/bundled"] -compact_filters = ["rocksdb", "socks", "cc"] -key-value-db = ["sled"] all-keys = ["keys-bip39"] keys-bip39 = ["bip39"] -rpc = ["bitcoincore-rpc"] hardware-signer = ["hwi"] -# We currently provide mulitple implementations of `Blockchain`, all are -# blocking except for the `EsploraBlockchain` which can be either async or -# blocking, depending on the HTTP client in use. -# -# - Users wanting asynchronous HTTP calls should enable `async-interface` to get -# access to the asynchronous method implementations. Then, if Esplora is wanted, -# enable the `use-esplora-async` feature. -# - Users wanting blocking HTTP calls can use any of the other blockchain -# implementations (`compact_filters`, `electrum`, or `esplora`). Users wanting to -# use Esplora should enable the `use-esplora-blocking` feature. -# -# WARNING: Please take care with the features below, various combinations will -# fail to build. We cannot currently build `bdk` with `--all-features`. -async-interface = ["async-trait"] -electrum = ["electrum-client"] -# MUST ALSO USE `--no-default-features`. -use-esplora-async = ["esplora", "esplora-client/async", "futures"] -use-esplora-blocking = ["esplora", "esplora-client/blocking"] -# Deprecated aliases -use-esplora-reqwest = ["use-esplora-async"] -use-esplora-ureq = ["use-esplora-blocking"] -# Typical configurations will not need to use `esplora` feature directly. -esplora = [] - -# Use below feature with `use-esplora-async` to enable reqwest default TLS support -reqwest-default-tls = ["esplora-client/async-https"] - # Debug/Test features -test-blockchains = ["bitcoincore-rpc", "electrum-client"] -test-electrum = ["electrum", "electrsd/electrs_0_8_10", "electrsd/bitcoind_22_0", "test-blockchains"] -test-rpc = ["rpc", "electrsd/electrs_0_8_10", "electrsd/bitcoind_22_0", "test-blockchains"] -test-rpc-legacy = ["rpc", "electrsd/electrs_0_8_10", "electrsd/bitcoind_0_20_0", "test-blockchains"] -test-esplora = ["electrsd/legacy", "electrsd/esplora_a33e97e1", "electrsd/bitcoind_22_0", "test-blockchains"] -test-md-docs = ["electrum"] +test-md-docs = [] test-hardware-signer = ["hardware-signer"] # This feature is used to run `cargo check` in our CI targeting wasm. It's not recommended @@ -106,15 +52,10 @@ dev-getrandom-wasm = ["getrandom/js"] [dev-dependencies] lazy_static = "1.4" env_logger = "0.7" -electrsd = "0.21" # Move back to importing from rust-bitcoin once https://github.com/rust-bitcoin/rust-bitcoin/pull/1342 is released base64 = "^0.13" assert_matches = "1.5.0" -[[example]] -name = "compact_filters_balance" -required-features = ["compact_filters"] - [[example]] name = "miniscriptc" path = "examples/compiler.rs" @@ -124,44 +65,14 @@ required-features = ["compiler"] name = "policy" path = "examples/policy.rs" -[[example]] -name = "rpcwallet" -path = "examples/rpcwallet.rs" -required-features = ["keys-bip39", "key-value-db", "rpc", "electrsd/bitcoind_22_0"] - -[[example]] -name = "psbt_signer" -path = "examples/psbt_signer.rs" -required-features = ["electrum"] - -[[example]] -name = "hardware_signer" -path = "examples/hardware_signer.rs" -required-features = ["electrum", "hardware-signer"] - -[[example]] -name = "electrum_backend" -path = "examples/electrum_backend.rs" -required-features = ["electrum"] - -[[example]] -name = "esplora_backend_synchronous" -path = "examples/esplora_backend_synchronous.rs" -required-features = ["use-esplora-ureq"] - -[[example]] -name = "esplora_backend_asynchronous" -path = "examples/esplora_backend_asynchronous.rs" -required-features = ["use-esplora-reqwest", "reqwest-default-tls", "async-interface"] - [[example]] name = "mnemonic_to_descriptors" path = "examples/mnemonic_to_descriptors.rs" required-features = ["all-keys"] [workspace] -members = ["macros"] +members = ["bdk_test_client"] [package.metadata.docs.rs] -features = ["compiler", "electrum", "esplora", "use-esplora-blocking", "compact_filters", "rpc", "key-value-db", "sqlite", "all-keys", "verify", "hardware-signer"] +all-feautres = true # defines the configuration attribute `docsrs` rustdoc-args = ["--cfg", "docsrs"] diff --git a/README.md b/README.md index c68c73a801..aba16e2bbf 100644 --- a/README.md +++ b/README.md @@ -78,9 +78,9 @@ fn main() -> Result<(), bdk::Error> { MemoryDatabase::default(), )?; - println!("Address #0: {}", wallet.get_address(New)?); - println!("Address #1: {}", wallet.get_address(New)?); - println!("Address #2: {}", wallet.get_address(New)?); + println!("Address #0: {}", wallet.get_address(New)); + println!("Address #1: {}", wallet.get_address(New)); + println!("Address #2: {}", wallet.get_address(New)); Ok(()) } @@ -111,7 +111,7 @@ fn main() -> Result<(), bdk::Error> { wallet.sync(&blockchain, SyncOptions::default())?; - let send_to = wallet.get_address(New)?; + let send_to = wallet.get_address(New); let (psbt, details) = { let mut builder = wallet.build_tx(); builder diff --git a/bdk_test_client/Cargo.toml b/bdk_test_client/Cargo.toml new file mode 100644 index 0000000000..bc14b1e841 --- /dev/null +++ b/bdk_test_client/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "bdk_test_client" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +electrsd = { version = "0.22" } +bitcoincore-rpc = { version = "0.16"} +log = "^0.4" +bitcoin = { version = "0.29.1", features = ["serde", "base64", "rand"] } +electrum-client = "0.12" + + +[features] +bitcoind_22_0 = ["electrsd/bitcoind_22_0"] +electrs_0_8_10 = ["electrsd/electrs_0_8_10"] +esplora = ["electrsd/legacy", "electrsd/esplora_a33e97e1" ] diff --git a/bdk_test_client/src/lib.rs b/bdk_test_client/src/lib.rs new file mode 100644 index 0000000000..aebcb45778 --- /dev/null +++ b/bdk_test_client/src/lib.rs @@ -0,0 +1,295 @@ +use bitcoin::consensus::encode::serialize; +use bitcoin::hashes::hex::{FromHex, ToHex}; +use bitcoin::hashes::sha256d; +use bitcoin::{Address, PackedLockTime, Script, Sequence, Transaction, Txid, Witness}; +pub use bitcoincore_rpc::bitcoincore_rpc_json::AddressType; +use bitcoincore_rpc::jsonrpc::serde_json::{self, json}; +pub use bitcoincore_rpc::{Auth, Client as RpcClient, Error as RpcError, RpcApi}; +use core::str::FromStr; +use electrsd::bitcoind::BitcoinD; +use electrsd::{bitcoind, ElectrsD}; +pub use electrum_client::{Client as ElectrumClient, ElectrumApi}; +#[allow(unused_imports)] +use log::{debug, error, info, log_enabled, trace, Level}; +use std::env; +use std::ops::Deref; +use std::time::Duration; + +pub struct TestClient { + pub bitcoind: BitcoinD, + pub electrsd: ElectrsD, +} + +impl TestClient { + pub fn new(bitcoind_exe: String, electrs_exe: String) -> Self { + debug!("launching {} and {}", &bitcoind_exe, &electrs_exe); + + let mut conf = bitcoind::Conf::default(); + conf.view_stdout = log_enabled!(Level::Debug); + let bitcoind = BitcoinD::with_conf(bitcoind_exe, &conf).unwrap(); + + let mut conf = electrsd::Conf::default(); + conf.view_stderr = log_enabled!(Level::Debug); + conf.http_enabled = cfg!(feature = "esplora"); + + let electrsd = ElectrsD::with_conf(electrs_exe, &bitcoind, &conf).unwrap(); + + let node_address = bitcoind.client.get_new_address(None, None).unwrap(); + bitcoind + .client + .generate_to_address(101, &node_address) + .unwrap(); + + let mut test_client = TestClient { bitcoind, electrsd }; + TestClient::wait_for_block(&mut test_client, 101); + test_client + } + + fn wait_for_tx(&mut self, txid: Txid, monitor_script: &Script) { + // wait for electrs to index the tx + exponential_backoff_poll(|| { + self.electrsd.trigger().unwrap(); + trace!("wait_for_tx {}", txid); + + self.electrsd + .client + .script_get_history(monitor_script) + .unwrap() + .iter() + .position(|entry| entry.tx_hash == txid) + }); + } + + fn wait_for_block(&mut self, min_height: usize) { + self.electrsd.client.block_headers_subscribe().unwrap(); + + loop { + let header = exponential_backoff_poll(|| { + self.electrsd.trigger().unwrap(); + self.electrsd.client.ping().unwrap(); + self.electrsd.client.block_headers_pop().unwrap() + }); + if header.height >= min_height { + break; + } + } + } + + pub fn bump_fee(&mut self, txid: &Txid) -> Txid { + let tx = self.get_raw_transaction_info(txid, None).unwrap(); + assert!( + tx.confirmations.is_none(), + "Can't bump tx {} because it's already confirmed", + txid + ); + + let bumped: serde_json::Value = self.call("bumpfee", &[txid.to_string().into()]).unwrap(); + let new_txid = Txid::from_str(&bumped["txid"].as_str().unwrap().to_string()).unwrap(); + let monitor_script = Script::from_hex(&mut tx.vout[0].script_pub_key.hex.to_hex()).unwrap(); + self.wait_for_tx(new_txid, &monitor_script); + + debug!("Bumped {}, new txid {}", txid, new_txid); + + new_txid + } + + pub fn generate_manually(&mut self, txs: Vec) -> String { + use bitcoin::blockdata::block::{Block, BlockHeader}; + use bitcoin::blockdata::script::Builder; + use bitcoin::blockdata::transaction::{OutPoint, TxIn, TxOut}; + use bitcoin::hash_types::{BlockHash, TxMerkleNode}; + use bitcoin::hashes::Hash; + + let block_template: serde_json::Value = self + .call("getblocktemplate", &[json!({"rules": ["segwit"]})]) + .unwrap(); + trace!("getblocktemplate: {:#?}", block_template); + + let header = BlockHeader { + version: block_template["version"].as_i64().unwrap() as i32, + prev_blockhash: BlockHash::from_hex( + block_template["previousblockhash"].as_str().unwrap(), + ) + .unwrap(), + merkle_root: TxMerkleNode::all_zeros(), + time: block_template["curtime"].as_u64().unwrap() as u32, + bits: u32::from_str_radix(block_template["bits"].as_str().unwrap(), 16).unwrap(), + nonce: 0, + }; + debug!("header: {:#?}", header); + + let height = block_template["height"].as_u64().unwrap() as i64; + let witness_reserved_value: Vec = sha256d::Hash::all_zeros().as_ref().into(); + // burn block subsidy and fees, not a big deal + let mut coinbase_tx = Transaction { + version: 1, + lock_time: PackedLockTime(0), + input: vec![TxIn { + previous_output: OutPoint::null(), + script_sig: Builder::new().push_int(height).into_script(), + sequence: Sequence(0xFFFFFFFF), + witness: Witness::from_vec(vec![witness_reserved_value]), + }], + output: vec![], + }; + + let mut txdata = vec![coinbase_tx.clone()]; + txdata.extend_from_slice(&txs); + + let mut block = Block { header, txdata }; + + if let Some(witness_root) = block.witness_root() { + let witness_commitment = Block::compute_witness_commitment( + &witness_root, + &coinbase_tx.input[0] + .witness + .last() + .expect("Should contain the witness reserved value"), + ); + + // now update and replace the coinbase tx + let mut coinbase_witness_commitment_script = vec![0x6a, 0x24, 0xaa, 0x21, 0xa9, 0xed]; + coinbase_witness_commitment_script.extend_from_slice(&witness_commitment); + + coinbase_tx.output.push(TxOut { + value: 0, + script_pubkey: coinbase_witness_commitment_script.into(), + }); + } + + block.txdata[0] = coinbase_tx; + + // set merkle root + if let Some(merkle_root) = block.compute_merkle_root() { + block.header.merkle_root = merkle_root; + } + + assert!(block.check_merkle_root()); + assert!(block.check_witness_commitment()); + + // now do PoW :) + let target = block.header.target(); + while block.header.validate_pow(&target).is_err() { + block.header.nonce = block.header.nonce.checked_add(1).unwrap(); // panic if we run out of nonces + } + + let block_hex: String = serialize(&block).to_hex(); + debug!("generated block hex: {}", block_hex); + + self.electrsd.client.block_headers_subscribe().unwrap(); + + let submit_result: serde_json::Value = + self.call("submitblock", &[block_hex.into()]).unwrap(); + debug!("submitblock: {:?}", submit_result); + assert!( + submit_result.is_null(), + "submitblock error: {:?}", + submit_result.as_str() + ); + + self.wait_for_block(height as usize); + + block.header.block_hash().to_hex() + } + + pub fn generate(&mut self, num_blocks: u64, address: Option
) -> u32 { + let address = address.unwrap_or_else(|| self.get_new_address(None, None).unwrap()); + let hashes = self.generate_to_address(num_blocks, &address).unwrap(); + let best_hash = hashes.last().unwrap(); + let height = self.get_block_info(best_hash).unwrap().height; + + self.wait_for_block(height); + + debug!("Generated blocks to new height {}", height); + height as u32 + } + + pub fn invalidate(&mut self, num_blocks: u64) { + self.electrsd.client.block_headers_subscribe().unwrap(); + + let best_hash = self.get_best_block_hash().unwrap(); + let initial_height = self.get_block_info(&best_hash).unwrap().height; + + let mut to_invalidate = best_hash; + for i in 1..=num_blocks { + trace!( + "Invalidating block {}/{} ({})", + i, + num_blocks, + to_invalidate + ); + + self.invalidate_block(&to_invalidate).unwrap(); + to_invalidate = self.get_best_block_hash().unwrap(); + } + + self.wait_for_block(initial_height - num_blocks as usize); + + debug!( + "Invalidated {} blocks to new height of {}", + num_blocks, + initial_height - num_blocks as usize + ); + } + + pub fn reorg(&mut self, num_blocks: u64) { + self.invalidate(num_blocks); + self.generate(num_blocks, None); + } + + pub fn get_node_address(&self, address_type: Option) -> Address { + Address::from_str( + &self + .get_new_address(None, address_type) + .unwrap() + .to_string(), + ) + .unwrap() + } +} + +pub fn get_electrum_url() -> String { + env::var("BDK_ELECTRUM_URL").unwrap_or_else(|_| "tcp://127.0.0.1:50001".to_string()) +} + +impl Deref for TestClient { + type Target = RpcClient; + + fn deref(&self) -> &Self::Target { + &self.bitcoind.client + } +} + +impl Default for TestClient { + fn default() -> Self { + let bitcoind_exe = env::var("BITCOIND_EXE") + .ok() + .or(bitcoind::downloaded_exe_path().ok()) + .expect( + "you should provide env var BITCOIND_EXE or specifiy a bitcoind version feature", + ); + let electrs_exe = env::var("ELECTRS_EXE") + .ok() + .or(electrsd::downloaded_exe_path()) + .expect( + "you should provide env var ELECTRS_EXE or specifiy a electrsd version feature", + ); + Self::new(bitcoind_exe, electrs_exe) + } +} + +fn exponential_backoff_poll(mut poll: F) -> T +where + F: FnMut() -> Option, +{ + let mut delay = Duration::from_millis(64); + loop { + match poll() { + Some(data) => break data, + None if delay.as_millis() < 512 => delay = delay.mul_f32(2.0), + None => {} + } + + std::thread::sleep(delay); + } +} diff --git a/examples/compact_filters_balance.rs b/examples/compact_filters_balance.rs deleted file mode 100644 index ce875b4d59..0000000000 --- a/examples/compact_filters_balance.rs +++ /dev/null @@ -1,41 +0,0 @@ -// Bitcoin Dev Kit -// Written in 2020 by Alekos Filini -// -// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. - -use bdk::blockchain::compact_filters::*; -use bdk::database::MemoryDatabase; -use bdk::*; -use bitcoin::*; -use blockchain::compact_filters::CompactFiltersBlockchain; -use blockchain::compact_filters::CompactFiltersError; -use log::info; -use std::sync::Arc; - -/// This will return wallet balance using compact filters -/// Requires a synced local bitcoin node 0.21 running on testnet with blockfilterindex=1 and peerblockfilters=1 -fn main() -> Result<(), CompactFiltersError> { - env_logger::init(); - info!("start"); - - let num_threads = 4; - let mempool = Arc::new(Mempool::default()); - let peers = (0..num_threads) - .map(|_| Peer::connect("localhost:18333", Arc::clone(&mempool), Network::Testnet)) - .collect::>()?; - let blockchain = CompactFiltersBlockchain::new(peers, "./wallet-filters", Some(500_000))?; - info!("done {:?}", blockchain); - let descriptor = "wpkh(tpubD6NzVbkrYhZ4X2yy78HWrr1M9NT8dKeWfzNiQqDdMqqa9UmmGztGGz6TaLFGsLfdft5iu32gxq1T4eMNxExNNWzVCpf9Y6JZi5TnqoC9wJq/*)"; - - let database = MemoryDatabase::default(); - let wallet = Arc::new(Wallet::new(descriptor, None, Network::Testnet, database).unwrap()); - wallet.sync(&blockchain, SyncOptions::default()).unwrap(); - info!("balance: {}", wallet.get_balance()?); - Ok(()) -} diff --git a/examples/compiler.rs b/examples/compiler.rs index 2ddabffd30..6b82ae4b8d 100644 --- a/examples/compiler.rs +++ b/examples/compiler.rs @@ -24,7 +24,6 @@ use bitcoin::Network; use miniscript::policy::Concrete; use miniscript::Descriptor; -use bdk::database::memory::MemoryDatabase; use bdk::wallet::AddressIndex::New; use bdk::{KeychainKind, Wallet}; @@ -54,14 +53,12 @@ fn main() -> Result<(), Box> { info!("Compiled into following Descriptor: \n{}", descriptor); - let database = MemoryDatabase::new(); - // Create a new wallet from this descriptor - let wallet = Wallet::new(&format!("{}", descriptor), None, Network::Regtest, database)?; + let wallet = Wallet::new(&format!("{}", descriptor), None, Network::Regtest)?; info!( "First derived address from the descriptor: \n{}", - wallet.get_address(New)? + wallet.get_address(New) ); // BDK also has it's own `Policy` structure to represent the spending condition in a more diff --git a/examples/esplora.rs b/examples/esplora.rs new file mode 100644 index 0000000000..1659666418 --- /dev/null +++ b/examples/esplora.rs @@ -0,0 +1,112 @@ +use bdk::{ + blockchain::esplora::{esplora_client, BlockingClientExt}, + wallet::AddressIndex, + Wallet, +}; +use bdk_test_client::{RpcApi, TestClient}; +use bitcoin::{Amount, Network}; +use rand::Rng; +use std::error::Error; + +fn main() -> Result<(), Box> { + let _ = env_logger::init(); + const DESCRIPTOR: &'static str ="tr([73c5da0a/86'/0'/0']tprv8cSrHfiTQQWzKVejDHvBcvW4pdLEDLMvtVdbUXFfceQ4kbZKMsuFWbd3LUN3omNrQfafQaPwXUFXtcofkE9UjFZ3i9deezBHQTGvYV2xUzz/0/*)"; + const CHANGE_DESCRIPTOR: &'static str = "tr(tprv8ZgxMBicQKsPeQe98SGJ53vEJ7MNEFkQ4CkZmrr6PNom3vn6GqxuyoE78smkzpuP347zR9MXPg38PoZ8tbxLqSx4CufufHAGbQ9Hf7yTTwn/44'/0'/0'/1/*)#pxy2d75a"; + + let mut test_client = TestClient::default(); + let esplora_url = format!( + "http://{}", + test_client.electrsd.esplora_url.as_ref().unwrap() + ); + let client = esplora_client::Builder::new(&esplora_url).build_blocking()?; + + let wallet = Wallet::new(DESCRIPTOR, Some(CHANGE_DESCRIPTOR), Network::Regtest) + .expect("parsing descriptors failed"); + // note we don't *need* the Mutex for this example but it helps to show when the wallet does and + // doesn't need to be mutablek + let wallet = std::sync::Mutex::new(wallet); + let n_initial_transactions = 10; + + let addresses = { + // we need it to be mutable to get a new address. + // This incremenents the derivatoin index of the keychain. + let mut wallet = wallet.lock().unwrap(); + core::iter::repeat_with(|| wallet.get_address(AddressIndex::New)) + .filter(|_| rand::thread_rng().gen_bool(0.5)) + .take(n_initial_transactions) + .collect::>() + }; + + // get some coins for the internal node + test_client.generate(100, None); + + for address in addresses { + let exp_txid = test_client + .send_to_address( + &address, + Amount::from_sat(10_000), + None, + None, + None, + None, + None, + None, + ) + .expect("tx should send"); + eprintln!( + "💸 sending some coins to: {} (index {}) in tx {}", + address, address.index, exp_txid + ); + // sometimes generate a block after we send coins to the address + if rand::thread_rng().gen_bool(0.3) { + let height = test_client.generate(1, None); + eprintln!("📦 created a block at height {}", height); + } + } + + let wait_for_esplora_sync = std::time::Duration::from_secs(5); + + println!("⏳ waiting {}s for esplora to catch up..", wait_for_esplora_sync.as_secs()); + std::thread::sleep(wait_for_esplora_sync); + + + let wallet_scan_input = { + let wallet = wallet.lock().unwrap(); + wallet.start_wallet_scan() + }; + + let start = std::time::Instant::now(); + let stop_gap = 5; + eprintln!( + "🔎 startig scanning all keychains with stop gap of {}", + stop_gap + ); + let wallet_scan = client.wallet_scan(wallet_scan_input, stop_gap, &Default::default(), 5)?; + + // we've got an update so briefly take a lock the wallet to apply it + { + let mut wallet = wallet.lock().unwrap(); + match wallet.apply_wallet_scan(wallet_scan) { + Ok(changes) => { + eprintln!("🎉 success! ({}ms)", start.elapsed().as_millis()); + eprintln!("wallet balance after: {:?}", wallet.get_balance()); + //XXX: esplora is not indexing mempool transactions right now (or not doing it fast enough) + eprintln!( + "wallet found {} new transactions", + changes.tx_additions().count(), + ); + if changes.tx_additions().count() != n_initial_transactions { + eprintln!( + "(it should have found {} but maybe stop gap wasn't large enough?)", + n_initial_transactions + ); + } + } + Err(reason) => { + eprintln!("❌ esplora produced invalid wallet scan {}", reason); + } + } + } + + Ok(()) +} diff --git a/examples/hardware_signer.rs b/examples/hardware_signer.rs index d1c25f1ab8..514f99a64d 100644 --- a/examples/hardware_signer.rs +++ b/examples/hardware_signer.rs @@ -46,7 +46,6 @@ fn main() -> Result<(), Box> { descriptors.receive[0].clone(), Some(descriptors.internal[0].clone()), Network::Testnet, - MemoryDatabase::default(), )?; // Adding the hardware signer to the BDK wallet @@ -64,7 +63,7 @@ fn main() -> Result<(), Box> { wallet.sync(&blockchain, SyncOptions::default())?; // get deposit address - let deposit_address = wallet.get_address(AddressIndex::New)?; + let deposit_address = wallet.get_address(AddressIndex::New); let balance = wallet.get_balance()?; println!("Wallet balances in SATs: {}", balance); diff --git a/examples/psbt_signer.rs b/examples/psbt_signer.rs index 35c539dad5..c7d4156df5 100644 --- a/examples/psbt_signer.rs +++ b/examples/psbt_signer.rs @@ -47,26 +47,24 @@ fn main() -> Result<(), Box> { ElectrumBlockchain::from(Client::new("ssl://electrum.blockstream.info:60002")?); // create watch only wallet - let watch_only_wallet: Wallet = Wallet::new( + let watch_only_wallet: Wallet = Wallet::new( watch_only_external_descriptor, Some(watch_only_internal_descriptor), Network::Testnet, - MemoryDatabase::default(), )?; // create signing wallet - let signing_wallet: Wallet = Wallet::new( + let signing_wallet: Wallet = Wallet::new( signing_external_descriptor, Some(signing_internal_descriptor), Network::Testnet, - MemoryDatabase::default(), )?; println!("Syncing watch only wallet."); watch_only_wallet.sync(&blockchain, SyncOptions::default())?; // get deposit address - let deposit_address = watch_only_wallet.get_address(AddressIndex::New)?; + let deposit_address = watch_only_wallet.get_address(AddressIndex::New); let balance = watch_only_wallet.get_balance()?; println!("Watch only wallet balances in SATs: {}", balance); @@ -81,7 +79,7 @@ fn main() -> Result<(), Box> { "Wait for at least 10000 SATs of your wallet transactions to be confirmed...\nBe patient, this could take 10 mins or longer depending on how testnet is behaving." ); for tx_details in watch_only_wallet - .list_transactions(false)? + .transactions() .iter() .filter(|txd| txd.received > 0 && txd.confirmation_time.is_none()) { diff --git a/examples/rpcwallet.rs b/examples/rpcwallet.rs deleted file mode 100644 index 24a5559103..0000000000 --- a/examples/rpcwallet.rs +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. - -use bdk::bitcoin::secp256k1::Secp256k1; -use bdk::bitcoin::Amount; -use bdk::bitcoin::Network; -use bdk::bitcoincore_rpc::RpcApi; - -use bdk::blockchain::rpc::{Auth, RpcBlockchain, RpcConfig}; -use bdk::blockchain::ConfigurableBlockchain; - -use bdk::keys::bip39::{Language, Mnemonic, WordCount}; -use bdk::keys::{DerivableKey, GeneratableKey, GeneratedKey}; - -use bdk::miniscript::miniscript::Segwitv0; - -use bdk::sled; -use bdk::template::Bip84; -use bdk::wallet::{signer::SignOptions, wallet_name_from_descriptor, AddressIndex, SyncOptions}; -use bdk::KeychainKind; -use bdk::Wallet; - -use bdk::blockchain::Blockchain; - -use electrsd; - -use std::error::Error; -use std::path::PathBuf; -use std::str::FromStr; - -/// This example demonstrates a typical way to create a wallet and work with bdk. -/// -/// This example bdk wallet is connected to a bitcoin core rpc regtest node, -/// and will attempt to receive, create and broadcast transactions. -/// -/// To start a bitcoind regtest node programmatically, this example uses -/// `electrsd` library, which is also a bdk dev-dependency. -/// -/// But you can start your own bitcoind backend, and the rest of the example should work fine. - -fn main() -> Result<(), Box> { - // -- Setting up background bitcoind process - - println!(">> Setting up bitcoind"); - - // Start the bitcoind process - let bitcoind_conf = electrsd::bitcoind::Conf::default(); - - // electrsd will automatically download the bitcoin core binaries - let bitcoind_exe = - electrsd::bitcoind::downloaded_exe_path().expect("We should always have downloaded path"); - - // Launch bitcoind and gather authentication access - let bitcoind = electrsd::bitcoind::BitcoinD::with_conf(bitcoind_exe, &bitcoind_conf).unwrap(); - let bitcoind_auth = Auth::Cookie { - file: bitcoind.params.cookie_file.clone(), - }; - - // Get a new core address - let core_address = bitcoind.client.get_new_address(None, None)?; - - // Generate 101 blocks and use the above address as coinbase - bitcoind.client.generate_to_address(101, &core_address)?; - - println!(">> bitcoind setup complete"); - println!( - "Available coins in Core wallet : {}", - bitcoind.client.get_balance(None, None)? - ); - - // -- Setting up the Wallet - - println!("\n>> Setting up BDK wallet"); - - // Get a random private key - let xprv = generate_random_ext_privkey()?; - - // Use the derived descriptors from the privatekey to - // create unique wallet name. - // This is a special utility function exposed via `bdk::wallet_name_from_descriptor()` - let wallet_name = wallet_name_from_descriptor( - Bip84(xprv.clone(), KeychainKind::External), - Some(Bip84(xprv.clone(), KeychainKind::Internal)), - Network::Regtest, - &Secp256k1::new(), - )?; - - // Create a database (using default sled type) to store wallet data - let mut datadir = PathBuf::from_str("/tmp/")?; - datadir.push(".bdk-example"); - let database = sled::open(datadir)?; - let database = database.open_tree(wallet_name.clone())?; - - // Create a RPC configuration of the running bitcoind backend we created in last step - // Note: If you are using custom regtest node, use the appropriate url and auth - let rpc_config = RpcConfig { - url: bitcoind.params.rpc_socket.to_string(), - auth: bitcoind_auth, - network: Network::Regtest, - wallet_name, - sync_params: None, - }; - - // Use the above configuration to create a RPC blockchain backend - let blockchain = RpcBlockchain::from_config(&rpc_config)?; - - // Combine Database + Descriptor to create the final wallet - let wallet = Wallet::new( - Bip84(xprv.clone(), KeychainKind::External), - Some(Bip84(xprv.clone(), KeychainKind::Internal)), - Network::Regtest, - database, - )?; - - // The `wallet` and the `blockchain` are independent structs. - // The wallet will be used to do all wallet level actions - // The blockchain can be used to do all blockchain level actions. - // For certain actions (like sync) the wallet will ask for a blockchain. - - // Sync the wallet - // The first sync is important as this will instantiate the - // wallet files. - wallet.sync(&blockchain, SyncOptions::default())?; - - println!(">> BDK wallet setup complete."); - println!( - "Available initial coins in BDK wallet : {} sats", - wallet.get_balance()? - ); - - // -- Wallet transaction demonstration - - println!("\n>> Sending coins: Core --> BDK, 10 BTC"); - // Get a new address to receive coins - let bdk_new_addr = wallet.get_address(AddressIndex::New)?.address; - - // Send 10 BTC from core wallet to bdk wallet - bitcoind.client.send_to_address( - &bdk_new_addr, - Amount::from_btc(10.0)?, - None, - None, - None, - None, - None, - None, - )?; - - // Confirm transaction by generating 1 block - bitcoind.client.generate_to_address(1, &core_address)?; - - // Sync the BDK wallet - // This time the sync will fetch the new transaction and update it in - // wallet database - wallet.sync(&blockchain, SyncOptions::default())?; - - println!(">> Received coins in BDK wallet"); - println!( - "Available balance in BDK wallet: {} sats", - wallet.get_balance()? - ); - - println!("\n>> Sending coins: BDK --> Core, 5 BTC"); - // Attempt to send back 5.0 BTC to core address by creating a transaction - // - // Transactions are created using a `TxBuilder`. - // This helps us to systematically build a transaction with all - // required customization. - // A full list of APIs offered by `TxBuilder` can be found at - // https://docs.rs/bdk/latest/bdk/wallet/tx_builder/struct.TxBuilder.html - let mut tx_builder = wallet.build_tx(); - - // For a regular transaction, just set the recipient and amount - tx_builder.set_recipients(vec![(core_address.script_pubkey(), 500000000)]); - - // Finalize the transaction and extract the PSBT - let (mut psbt, _) = tx_builder.finish()?; - - // Set signing option - let signopt = SignOptions { - assume_height: None, - ..Default::default() - }; - - // Sign the psbt - wallet.sign(&mut psbt, signopt)?; - - // Extract the signed transaction - let tx = psbt.extract_tx(); - - // Broadcast the transaction - blockchain.broadcast(&tx)?; - - // Confirm transaction by generating some blocks - bitcoind.client.generate_to_address(1, &core_address)?; - - // Sync the BDK wallet - wallet.sync(&blockchain, SyncOptions::default())?; - - println!(">> Coins sent to Core wallet"); - println!( - "Remaining BDK wallet balance: {} sats", - wallet.get_balance()? - ); - println!("\nCongrats!! you made your first test transaction with bdk and bitcoin core."); - - Ok(()) -} - -// Helper function demonstrating privatekey extraction using bip39 mnemonic -// The mnemonic can be shown to user to safekeeping and the same wallet -// private descriptors can be recreated from it. -fn generate_random_ext_privkey() -> Result + Clone, Box> { - // a Bip39 passphrase can be set optionally - let password = Some("random password".to_string()); - - // Generate a random mnemonic, and use that to create a "DerivableKey" - let mnemonic: GeneratedKey<_, _> = Mnemonic::generate((WordCount::Words12, Language::English)) - .map_err(|e| e.expect("Unknown Error"))?; - - // `Ok(mnemonic)` would also work if there's no passphrase and it would - // yield the same result as this construct with `password` = `None`. - Ok((mnemonic, password)) -} diff --git a/macros/Cargo.toml b/macros/Cargo.toml deleted file mode 100644 index d5b2f5ff3b..0000000000 --- a/macros/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "bdk-macros" -version = "0.6.0" -authors = ["Alekos Filini "] -edition = "2018" -homepage = "https://bitcoindevkit.org" -repository = "https://github.com/bitcoindevkit/bdk" -documentation = "https://docs.rs/bdk-macros" -description = "Supporting macros for `bdk`" -keywords = ["bdk"] -license = "MIT OR Apache-2.0" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -syn = { version = "1.0", features = ["parsing", "full"] } -proc-macro2 = "1.0" -quote = "1.0" - -[features] -debug = ["syn/extra-traits"] - -[lib] -proc-macro = true diff --git a/macros/src/lib.rs b/macros/src/lib.rs deleted file mode 100644 index 74eda5cf4e..0000000000 --- a/macros/src/lib.rs +++ /dev/null @@ -1,146 +0,0 @@ -// Bitcoin Dev Kit -// Written in 2020 by Alekos Filini -// -// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. - -#[macro_use] -extern crate quote; - -use proc_macro::TokenStream; - -use syn::spanned::Spanned; -use syn::{parse, ImplItemMethod, ItemImpl, ItemTrait, Token}; - -fn add_async_trait(mut parsed: ItemTrait) -> TokenStream { - let output = quote! { - #[cfg(all(not(target_arch = "wasm32"), not(feature = "async-interface")))] - #parsed - }; - - for mut item in &mut parsed.items { - if let syn::TraitItem::Method(m) = &mut item { - m.sig.asyncness = Some(Token![async](m.span())); - } - } - - let output = quote! { - #output - - #[cfg(any(target_arch = "wasm32", feature = "async-interface"))] - #[async_trait(?Send)] - #parsed - }; - - output.into() -} - -fn add_async_method(mut parsed: ImplItemMethod) -> TokenStream { - let output = quote! { - #[cfg(all(not(target_arch = "wasm32"), not(feature = "async-interface")))] - #parsed - }; - - parsed.sig.asyncness = Some(Token![async](parsed.span())); - - let output = quote! { - #output - - #[cfg(any(target_arch = "wasm32", feature = "async-interface"))] - #parsed - }; - - output.into() -} - -fn add_async_impl_trait(mut parsed: ItemImpl) -> TokenStream { - let output = quote! { - #[cfg(all(not(target_arch = "wasm32"), not(feature = "async-interface")))] - #parsed - }; - - for mut item in &mut parsed.items { - if let syn::ImplItem::Method(m) = &mut item { - m.sig.asyncness = Some(Token![async](m.span())); - } - } - - let output = quote! { - #output - - #[cfg(any(target_arch = "wasm32", feature = "async-interface"))] - #[async_trait(?Send)] - #parsed - }; - - output.into() -} - -/// Makes a method or every method of a trait "async" only if the target_arch is "wasm32" -/// -/// Requires the `async-trait` crate as a dependency whenever this attribute is used on a trait -/// definition or trait implementation. -#[proc_macro_attribute] -pub fn maybe_async(_attr: TokenStream, item: TokenStream) -> TokenStream { - if let Ok(parsed) = parse(item.clone()) { - add_async_trait(parsed) - } else if let Ok(parsed) = parse(item.clone()) { - add_async_method(parsed) - } else if let Ok(parsed) = parse(item) { - add_async_impl_trait(parsed) - } else { - (quote! { - compile_error!("#[maybe_async] can only be used on methods, trait or trait impl blocks") - }) - .into() - } -} - -/// Awaits if target_arch is "wasm32", does nothing otherwise -#[proc_macro] -pub fn maybe_await(expr: TokenStream) -> TokenStream { - let expr: proc_macro2::TokenStream = expr.into(); - let quoted = quote! { - { - #[cfg(all(not(target_arch = "wasm32"), not(feature = "async-interface")))] - { - #expr - } - - #[cfg(any(target_arch = "wasm32", feature = "async-interface"))] - { - #expr.await - } - } - }; - - quoted.into() -} - -/// Awaits if target_arch is "wasm32", uses `tokio::Runtime::block_on()` otherwise -/// -/// Requires the `tokio` crate as a dependecy with `rt-core` or `rt-threaded` to build on non-wasm32 platforms. -#[proc_macro] -pub fn await_or_block(expr: TokenStream) -> TokenStream { - let expr: proc_macro2::TokenStream = expr.into(); - let quoted = quote! { - { - #[cfg(all(not(target_arch = "wasm32"), not(feature = "async-interface")))] - { - tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap().block_on(#expr) - } - - #[cfg(any(target_arch = "wasm32", feature = "async-interface"))] - { - #expr.await - } - } - }; - - quoted.into() -} diff --git a/src/blockchain/any.rs b/src/blockchain/any.rs deleted file mode 100644 index 1d1a407ddf..0000000000 --- a/src/blockchain/any.rs +++ /dev/null @@ -1,248 +0,0 @@ -// Bitcoin Dev Kit -// Written in 2020 by Alekos Filini -// -// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. - -//! Runtime-checked blockchain types -//! -//! This module provides the implementation of [`AnyBlockchain`] which allows switching the -//! inner [`Blockchain`] type at runtime. -//! -//! ## Example -//! -//! When paired with the use of [`ConfigurableBlockchain`], it allows creating any -//! blockchain type supported using a single line of code: -//! -//! ```no_run -//! # use bitcoin::Network; -//! # use bdk::blockchain::*; -//! # #[cfg(all(feature = "esplora", feature = "ureq"))] -//! # { -//! let config = serde_json::from_str("...")?; -//! let blockchain = AnyBlockchain::from_config(&config)?; -//! let height = blockchain.get_height(); -//! # } -//! # Ok::<(), bdk::Error>(()) -//! ``` - -use super::*; - -macro_rules! impl_from { - ( boxed $from:ty, $to:ty, $variant:ident, $( $cfg:tt )* ) => { - $( $cfg )* - impl From<$from> for $to { - fn from(inner: $from) -> Self { - <$to>::$variant(Box::new(inner)) - } - } - }; - ( $from:ty, $to:ty, $variant:ident, $( $cfg:tt )* ) => { - $( $cfg )* - impl From<$from> for $to { - fn from(inner: $from) -> Self { - <$to>::$variant(inner) - } - } - }; -} - -macro_rules! impl_inner_method { - ( $self:expr, $name:ident $(, $args:expr)* ) => { - match $self { - #[cfg(feature = "electrum")] - AnyBlockchain::Electrum(inner) => inner.$name( $($args, )* ), - #[cfg(feature = "esplora")] - AnyBlockchain::Esplora(inner) => inner.$name( $($args, )* ), - #[cfg(feature = "compact_filters")] - AnyBlockchain::CompactFilters(inner) => inner.$name( $($args, )* ), - #[cfg(feature = "rpc")] - AnyBlockchain::Rpc(inner) => inner.$name( $($args, )* ), - } - } -} - -/// Type that can contain any of the [`Blockchain`] types defined by the library -/// -/// It allows switching backend at runtime -/// -/// See [this module](crate::blockchain::any)'s documentation for a usage example. -pub enum AnyBlockchain { - #[cfg(feature = "electrum")] - #[cfg_attr(docsrs, doc(cfg(feature = "electrum")))] - /// Electrum client - Electrum(Box), - #[cfg(feature = "esplora")] - #[cfg_attr(docsrs, doc(cfg(feature = "esplora")))] - /// Esplora client - Esplora(Box), - #[cfg(feature = "compact_filters")] - #[cfg_attr(docsrs, doc(cfg(feature = "compact_filters")))] - /// Compact filters client - CompactFilters(Box), - #[cfg(feature = "rpc")] - #[cfg_attr(docsrs, doc(cfg(feature = "rpc")))] - /// RPC client - Rpc(Box), -} - -#[maybe_async] -impl Blockchain for AnyBlockchain { - fn get_capabilities(&self) -> HashSet { - maybe_await!(impl_inner_method!(self, get_capabilities)) - } - - fn broadcast(&self, tx: &Transaction) -> Result<(), Error> { - maybe_await!(impl_inner_method!(self, broadcast, tx)) - } - - fn estimate_fee(&self, target: usize) -> Result { - maybe_await!(impl_inner_method!(self, estimate_fee, target)) - } -} - -#[maybe_async] -impl GetHeight for AnyBlockchain { - fn get_height(&self) -> Result { - maybe_await!(impl_inner_method!(self, get_height)) - } -} - -#[maybe_async] -impl GetTx for AnyBlockchain { - fn get_tx(&self, txid: &Txid) -> Result, Error> { - maybe_await!(impl_inner_method!(self, get_tx, txid)) - } -} - -#[maybe_async] -impl GetBlockHash for AnyBlockchain { - fn get_block_hash(&self, height: u64) -> Result { - maybe_await!(impl_inner_method!(self, get_block_hash, height)) - } -} - -#[maybe_async] -impl WalletSync for AnyBlockchain { - fn wallet_sync( - &self, - database: &mut D, - progress_update: Box, - ) -> Result<(), Error> { - maybe_await!(impl_inner_method!( - self, - wallet_sync, - database, - progress_update - )) - } - - fn wallet_setup( - &self, - database: &mut D, - progress_update: Box, - ) -> Result<(), Error> { - maybe_await!(impl_inner_method!( - self, - wallet_setup, - database, - progress_update - )) - } -} - -impl_from!(boxed electrum::ElectrumBlockchain, AnyBlockchain, Electrum, #[cfg(feature = "electrum")]); -impl_from!(boxed esplora::EsploraBlockchain, AnyBlockchain, Esplora, #[cfg(feature = "esplora")]); -impl_from!(boxed compact_filters::CompactFiltersBlockchain, AnyBlockchain, CompactFilters, #[cfg(feature = "compact_filters")]); -impl_from!(boxed rpc::RpcBlockchain, AnyBlockchain, Rpc, #[cfg(feature = "rpc")]); - -/// Type that can contain any of the blockchain configurations defined by the library -/// -/// This allows storing a single configuration that can be loaded into an [`AnyBlockchain`] -/// instance. Wallets that plan to offer users the ability to switch blockchain backend at runtime -/// will find this particularly useful. -/// -/// This type can be serialized from a JSON object like: -/// -/// ``` -/// # #[cfg(feature = "electrum")] -/// # { -/// use bdk::blockchain::{electrum::ElectrumBlockchainConfig, AnyBlockchainConfig}; -/// let config: AnyBlockchainConfig = serde_json::from_str( -/// r#"{ -/// "type" : "electrum", -/// "url" : "ssl://electrum.blockstream.info:50002", -/// "retry": 2, -/// "stop_gap": 20, -/// "validate_domain": true -/// }"#, -/// ) -/// .unwrap(); -/// assert_eq!( -/// config, -/// AnyBlockchainConfig::Electrum(ElectrumBlockchainConfig { -/// url: "ssl://electrum.blockstream.info:50002".into(), -/// retry: 2, -/// socks5: None, -/// timeout: None, -/// stop_gap: 20, -/// validate_domain: true, -/// }) -/// ); -/// # } -/// ``` -#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)] -#[serde(tag = "type", rename_all = "snake_case")] -pub enum AnyBlockchainConfig { - #[cfg(feature = "electrum")] - #[cfg_attr(docsrs, doc(cfg(feature = "electrum")))] - /// Electrum client - Electrum(electrum::ElectrumBlockchainConfig), - #[cfg(feature = "esplora")] - #[cfg_attr(docsrs, doc(cfg(feature = "esplora")))] - /// Esplora client - Esplora(esplora::EsploraBlockchainConfig), - #[cfg(feature = "compact_filters")] - #[cfg_attr(docsrs, doc(cfg(feature = "compact_filters")))] - /// Compact filters client - CompactFilters(compact_filters::CompactFiltersBlockchainConfig), - #[cfg(feature = "rpc")] - #[cfg_attr(docsrs, doc(cfg(feature = "rpc")))] - /// RPC client configuration - Rpc(rpc::RpcConfig), -} - -impl ConfigurableBlockchain for AnyBlockchain { - type Config = AnyBlockchainConfig; - - fn from_config(config: &Self::Config) -> Result { - Ok(match config { - #[cfg(feature = "electrum")] - AnyBlockchainConfig::Electrum(inner) => { - AnyBlockchain::Electrum(Box::new(electrum::ElectrumBlockchain::from_config(inner)?)) - } - #[cfg(feature = "esplora")] - AnyBlockchainConfig::Esplora(inner) => { - AnyBlockchain::Esplora(Box::new(esplora::EsploraBlockchain::from_config(inner)?)) - } - #[cfg(feature = "compact_filters")] - AnyBlockchainConfig::CompactFilters(inner) => AnyBlockchain::CompactFilters(Box::new( - compact_filters::CompactFiltersBlockchain::from_config(inner)?, - )), - #[cfg(feature = "rpc")] - AnyBlockchainConfig::Rpc(inner) => { - AnyBlockchain::Rpc(Box::new(rpc::RpcBlockchain::from_config(inner)?)) - } - }) - } -} - -impl_from!(electrum::ElectrumBlockchainConfig, AnyBlockchainConfig, Electrum, #[cfg(feature = "electrum")]); -impl_from!(esplora::EsploraBlockchainConfig, AnyBlockchainConfig, Esplora, #[cfg(feature = "esplora")]); -impl_from!(compact_filters::CompactFiltersBlockchainConfig, AnyBlockchainConfig, CompactFilters, #[cfg(feature = "compact_filters")]); -impl_from!(rpc::RpcConfig, AnyBlockchainConfig, Rpc, #[cfg(feature = "rpc")]); diff --git a/src/blockchain/compact_filters/mod.rs b/src/blockchain/compact_filters/mod.rs deleted file mode 100644 index 9b47df9cf0..0000000000 --- a/src/blockchain/compact_filters/mod.rs +++ /dev/null @@ -1,594 +0,0 @@ -// Bitcoin Dev Kit -// Written in 2020 by Alekos Filini -// -// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. - -//! Compact Filters -//! -//! This module contains a multithreaded implementation of an [`Blockchain`] backend that -//! uses BIP157 (aka "Neutrino") to populate the wallet's [database](crate::database::Database) -//! by downloading compact filters from the P2P network. -//! -//! Since there are currently very few peers "in the wild" that advertise the required service -//! flag, this implementation requires that one or more known peers are provided by the user. -//! No dns or other kinds of peer discovery are done internally. -//! -//! Moreover, this module doesn't currently support detecting and resolving conflicts between -//! messages received by different peers. Thus, it's recommended to use this module by only -//! connecting to a single peer at a time, optionally by opening multiple connections if it's -//! desirable to use multiple threads at once to sync in parallel. -//! -//! This is an **EXPERIMENTAL** feature, API and other major changes are expected. -//! -//! ## Example -//! -//! ```no_run -//! # use std::sync::Arc; -//! # use bitcoin::*; -//! # use bdk::*; -//! # use bdk::blockchain::compact_filters::*; -//! let num_threads = 4; -//! -//! let mempool = Arc::new(Mempool::default()); -//! let peers = (0..num_threads) -//! .map(|_| { -//! Peer::connect( -//! "btcd-mainnet.lightning.computer:8333", -//! Arc::clone(&mempool), -//! Network::Bitcoin, -//! ) -//! }) -//! .collect::>()?; -//! let blockchain = CompactFiltersBlockchain::new(peers, "./wallet-filters", Some(500_000))?; -//! # Ok::<(), CompactFiltersError>(()) -//! ``` - -use std::collections::HashSet; -use std::fmt; -use std::path::Path; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::{Arc, Mutex}; - -#[allow(unused_imports)] -use log::{debug, error, info, trace}; - -use bitcoin::network::message_blockdata::Inventory; -use bitcoin::{Network, OutPoint, Transaction, Txid}; - -use rocksdb::{Options, SliceTransform, DB}; - -mod peer; -mod store; -mod sync; - -use crate::blockchain::*; -use crate::database::{BatchDatabase, BatchOperations, DatabaseUtils}; -use crate::error::Error; -use crate::types::{KeychainKind, LocalUtxo, TransactionDetails}; -use crate::{BlockTime, FeeRate}; - -use peer::*; -use store::*; -use sync::*; - -pub use peer::{Mempool, Peer}; - -const SYNC_HEADERS_COST: f32 = 1.0; -const SYNC_FILTERS_COST: f32 = 11.6 * 1_000.0; -const PROCESS_BLOCKS_COST: f32 = 20_000.0; - -/// Structure implementing the required blockchain traits -/// -/// ## Example -/// See the [`blockchain::compact_filters`](crate::blockchain::compact_filters) module for a usage example. -#[derive(Debug)] -pub struct CompactFiltersBlockchain { - peers: Vec>, - headers: Arc>, - skip_blocks: Option, -} - -impl CompactFiltersBlockchain { - /// Construct a new instance given a list of peers, a path to store headers and block - /// filters downloaded during the sync and optionally a number of blocks to ignore starting - /// from the genesis while scanning for the wallet's outputs. - /// - /// For each [`Peer`] specified a new thread will be spawned to download and verify the filters - /// in parallel. It's currently recommended to only connect to a single peer to avoid - /// inconsistencies in the data returned, optionally with multiple connections in parallel to - /// speed-up the sync process. - pub fn new>( - peers: Vec, - storage_dir: P, - skip_blocks: Option, - ) -> Result { - if peers.is_empty() { - return Err(CompactFiltersError::NoPeers); - } - - let mut opts = Options::default(); - opts.create_if_missing(true); - opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(16)); - - let network = peers[0].get_network(); - - let cfs = DB::list_cf(&opts, &storage_dir).unwrap_or_else(|_| vec!["default".to_string()]); - let db = DB::open_cf(&opts, &storage_dir, &cfs)?; - let headers = Arc::new(ChainStore::new(db, network)?); - - // try to recover partial snapshots - for cf_name in &cfs { - if !cf_name.starts_with("_headers:") { - continue; - } - - info!("Trying to recover: {:?}", cf_name); - headers.recover_snapshot(cf_name)?; - } - - Ok(CompactFiltersBlockchain { - peers: peers.into_iter().map(Arc::new).collect(), - headers, - skip_blocks, - }) - } - - /// Process a transaction by looking for inputs that spend from a UTXO in the database or - /// outputs that send funds to a know script_pubkey. - fn process_tx( - &self, - database: &mut D, - tx: &Transaction, - height: Option, - timestamp: Option, - internal_max_deriv: &mut Option, - external_max_deriv: &mut Option, - ) -> Result<(), Error> { - let mut updates = database.begin_batch(); - - let mut incoming: u64 = 0; - let mut outgoing: u64 = 0; - - let mut inputs_sum: u64 = 0; - let mut outputs_sum: u64 = 0; - - // look for our own inputs - for (i, input) in tx.input.iter().enumerate() { - if let Some(previous_output) = database.get_previous_output(&input.previous_output)? { - inputs_sum += previous_output.value; - - // this output is ours, we have a path to derive it - if let Some((keychain, _)) = - database.get_path_from_script_pubkey(&previous_output.script_pubkey)? - { - outgoing += previous_output.value; - - debug!("{} input #{} is mine, setting utxo as spent", tx.txid(), i); - updates.set_utxo(&LocalUtxo { - outpoint: input.previous_output, - txout: previous_output.clone(), - keychain, - is_spent: true, - })?; - } - } - } - - for (i, output) in tx.output.iter().enumerate() { - // to compute the fees later - outputs_sum += output.value; - - // this output is ours, we have a path to derive it - if let Some((keychain, child)) = - database.get_path_from_script_pubkey(&output.script_pubkey)? - { - debug!("{} output #{} is mine, adding utxo", tx.txid(), i); - updates.set_utxo(&LocalUtxo { - outpoint: OutPoint::new(tx.txid(), i as u32), - txout: output.clone(), - keychain, - is_spent: false, - })?; - incoming += output.value; - - if keychain == KeychainKind::Internal - && (internal_max_deriv.is_none() || child > internal_max_deriv.unwrap_or(0)) - { - *internal_max_deriv = Some(child); - } else if keychain == KeychainKind::External - && (external_max_deriv.is_none() || child > external_max_deriv.unwrap_or(0)) - { - *external_max_deriv = Some(child); - } - } - } - - if incoming > 0 || outgoing > 0 { - let tx = TransactionDetails { - txid: tx.txid(), - transaction: Some(tx.clone()), - received: incoming, - sent: outgoing, - confirmation_time: BlockTime::new(height, timestamp), - fee: Some(inputs_sum.saturating_sub(outputs_sum)), - }; - - info!("Saving tx {}", tx.txid); - updates.set_tx(&tx)?; - } - - database.commit_batch(updates)?; - - Ok(()) - } -} - -impl Blockchain for CompactFiltersBlockchain { - fn get_capabilities(&self) -> HashSet { - vec![Capability::FullHistory].into_iter().collect() - } - - fn broadcast(&self, tx: &Transaction) -> Result<(), Error> { - self.peers[0].broadcast_tx(tx.clone())?; - - Ok(()) - } - - fn estimate_fee(&self, _target: usize) -> Result { - // TODO - Ok(FeeRate::default()) - } -} - -impl GetHeight for CompactFiltersBlockchain { - fn get_height(&self) -> Result { - Ok(self.headers.get_height()? as u32) - } -} - -impl GetTx for CompactFiltersBlockchain { - fn get_tx(&self, txid: &Txid) -> Result, Error> { - Ok(self.peers[0] - .get_mempool() - .get_tx(&Inventory::Transaction(*txid))) - } -} - -impl GetBlockHash for CompactFiltersBlockchain { - fn get_block_hash(&self, height: u64) -> Result { - self.headers - .get_block_hash(height as usize)? - .ok_or(Error::CompactFilters( - CompactFiltersError::BlockHashNotFound, - )) - } -} - -impl WalletSync for CompactFiltersBlockchain { - #[allow(clippy::mutex_atomic)] // Mutex is easier to understand than a CAS loop. - fn wallet_setup( - &self, - database: &mut D, - progress_update: Box, - ) -> Result<(), Error> { - let first_peer = &self.peers[0]; - - let skip_blocks = self.skip_blocks.unwrap_or(0); - - let cf_sync = Arc::new(CfSync::new(Arc::clone(&self.headers), skip_blocks, 0x00)?); - - let initial_height = self.headers.get_height()?; - let total_bundles = (first_peer.get_version().start_height as usize) - .checked_sub(skip_blocks) - .map(|x| x / 1000) - .unwrap_or(0) - + 1; - let expected_bundles_to_sync = total_bundles.saturating_sub(cf_sync.pruned_bundles()?); - - let headers_cost = (first_peer.get_version().start_height as usize) - .saturating_sub(initial_height) as f32 - * SYNC_HEADERS_COST; - let filters_cost = expected_bundles_to_sync as f32 * SYNC_FILTERS_COST; - - let total_cost = headers_cost + filters_cost + PROCESS_BLOCKS_COST; - - if let Some(snapshot) = sync::sync_headers( - Arc::clone(first_peer), - Arc::clone(&self.headers), - |new_height| { - let local_headers_cost = - new_height.saturating_sub(initial_height) as f32 * SYNC_HEADERS_COST; - progress_update.update( - local_headers_cost / total_cost * 100.0, - Some(format!("Synced headers to {}", new_height)), - ) - }, - )? { - if snapshot.work()? > self.headers.work()? { - info!("Applying snapshot with work: {}", snapshot.work()?); - self.headers.apply_snapshot(snapshot)?; - } - } - - let synced_height = self.headers.get_height()?; - let buried_height = synced_height.saturating_sub(sync::BURIED_CONFIRMATIONS); - info!("Synced headers to height: {}", synced_height); - - cf_sync.prepare_sync(Arc::clone(first_peer))?; - - let all_scripts = Arc::new( - database - .iter_script_pubkeys(None)? - .into_iter() - .map(|s| s.to_bytes()) - .collect::>(), - ); - - #[allow(clippy::mutex_atomic)] - let last_synced_block = Arc::new(Mutex::new(synced_height)); - - let synced_bundles = Arc::new(AtomicUsize::new(0)); - let progress_update = Arc::new(Mutex::new(progress_update)); - - let mut threads = Vec::with_capacity(self.peers.len()); - for peer in &self.peers { - let cf_sync = Arc::clone(&cf_sync); - let peer = Arc::clone(peer); - let headers = Arc::clone(&self.headers); - let all_scripts = Arc::clone(&all_scripts); - let last_synced_block = Arc::clone(&last_synced_block); - let progress_update = Arc::clone(&progress_update); - let synced_bundles = Arc::clone(&synced_bundles); - - let thread = std::thread::spawn(move || { - cf_sync.capture_thread_for_sync( - peer, - |block_hash, filter| { - if !filter - .match_any(block_hash, &mut all_scripts.iter().map(AsRef::as_ref))? - { - return Ok(false); - } - - let block_height = headers.get_height_for(block_hash)?.unwrap_or(0); - let saved_correct_block = matches!(headers.get_full_block(block_height)?, Some(block) if &block.block_hash() == block_hash); - - if saved_correct_block { - Ok(false) - } else { - let mut last_synced_block = last_synced_block.lock().unwrap(); - - // If we download a block older than `last_synced_block`, we update it so that - // we know to delete and re-process all txs starting from that height - if block_height < *last_synced_block { - *last_synced_block = block_height; - } - - Ok(true) - } - }, - |index| { - let synced_bundles = synced_bundles.fetch_add(1, Ordering::SeqCst); - let local_filters_cost = synced_bundles as f32 * SYNC_FILTERS_COST; - progress_update.lock().unwrap().update( - (headers_cost + local_filters_cost) / total_cost * 100.0, - Some(format!( - "Synced filters {} - {}", - index * 1000 + 1, - (index + 1) * 1000 - )), - ) - }, - ) - }); - - threads.push(thread); - } - - for t in threads { - t.join().unwrap()?; - } - - progress_update.lock().unwrap().update( - (headers_cost + filters_cost) / total_cost * 100.0, - Some("Processing downloaded blocks and mempool".into()), - )?; - - // delete all txs newer than last_synced_block - let last_synced_block = *last_synced_block.lock().unwrap(); - log::debug!( - "Dropping transactions newer than `last_synced_block` = {}", - last_synced_block - ); - let mut updates = database.begin_batch(); - for details in database.iter_txs(false)? { - match details.confirmation_time { - Some(c) if (c.height as usize) < last_synced_block => continue, - _ => updates.del_tx(&details.txid, false)?, - }; - } - database.commit_batch(updates)?; - - match first_peer.ask_for_mempool() { - Err(CompactFiltersError::PeerBloomDisabled) => { - log::warn!("Peer has BLOOM disabled, we can't ask for the mempool") - } - e => e?, - }; - - let mut internal_max_deriv = None; - let mut external_max_deriv = None; - - for (height, block) in self.headers.iter_full_blocks()? { - for tx in &block.txdata { - self.process_tx( - database, - tx, - Some(height as u32), - None, - &mut internal_max_deriv, - &mut external_max_deriv, - )?; - } - } - for tx in first_peer.get_mempool().iter_txs().iter() { - self.process_tx( - database, - tx, - None, - None, - &mut internal_max_deriv, - &mut external_max_deriv, - )?; - } - - let current_ext = database - .get_last_index(KeychainKind::External)? - .unwrap_or(0); - let first_ext_new = external_max_deriv.map(|x| x + 1).unwrap_or(0); - if first_ext_new > current_ext { - info!("Setting external index to {}", first_ext_new); - database.set_last_index(KeychainKind::External, first_ext_new)?; - } - - let current_int = database - .get_last_index(KeychainKind::Internal)? - .unwrap_or(0); - let first_int_new = internal_max_deriv.map(|x| x + 1).unwrap_or(0); - if first_int_new > current_int { - info!("Setting internal index to {}", first_int_new); - database.set_last_index(KeychainKind::Internal, first_int_new)?; - } - - info!("Dropping blocks until {}", buried_height); - self.headers.delete_blocks_until(buried_height)?; - - progress_update - .lock() - .unwrap() - .update(100.0, Some("Done".into()))?; - - Ok(()) - } -} - -/// Data to connect to a Bitcoin P2P peer -#[derive(Debug, serde::Deserialize, serde::Serialize, Clone, PartialEq, Eq)] -pub struct BitcoinPeerConfig { - /// Peer address such as 127.0.0.1:18333 - pub address: String, - /// Optional socks5 proxy - pub socks5: Option, - /// Optional socks5 proxy credentials - pub socks5_credentials: Option<(String, String)>, -} - -/// Configuration for a [`CompactFiltersBlockchain`] -#[derive(Debug, serde::Deserialize, serde::Serialize, Clone, PartialEq, Eq)] -pub struct CompactFiltersBlockchainConfig { - /// List of peers to try to connect to for asking headers and filters - pub peers: Vec, - /// Network used - pub network: Network, - /// Storage dir to save partially downloaded headers and full blocks. Should be a separate directory per descriptor. Consider using [crate::wallet::wallet_name_from_descriptor] for this. - pub storage_dir: String, - /// Optionally skip initial `skip_blocks` blocks (default: 0) - pub skip_blocks: Option, -} - -impl ConfigurableBlockchain for CompactFiltersBlockchain { - type Config = CompactFiltersBlockchainConfig; - - fn from_config(config: &Self::Config) -> Result { - let mempool = Arc::new(Mempool::default()); - let peers = config - .peers - .iter() - .map(|peer_conf| match &peer_conf.socks5 { - None => Peer::connect(&peer_conf.address, Arc::clone(&mempool), config.network), - Some(proxy) => Peer::connect_proxy( - peer_conf.address.as_str(), - proxy, - peer_conf - .socks5_credentials - .as_ref() - .map(|(a, b)| (a.as_str(), b.as_str())), - Arc::clone(&mempool), - config.network, - ), - }) - .collect::>()?; - - Ok(CompactFiltersBlockchain::new( - peers, - &config.storage_dir, - config.skip_blocks, - )?) - } -} - -/// An error that can occur during sync with a [`CompactFiltersBlockchain`] -#[derive(Debug)] -pub enum CompactFiltersError { - /// A peer sent an invalid or unexpected response - InvalidResponse, - /// The headers returned are invalid - InvalidHeaders, - /// The compact filter headers returned are invalid - InvalidFilterHeader, - /// The compact filter returned is invalid - InvalidFilter, - /// The peer is missing a block in the valid chain - MissingBlock, - /// Block hash at specified height not found - BlockHashNotFound, - /// The data stored in the block filters storage are corrupted - DataCorruption, - - /// A peer is not connected - NotConnected, - /// A peer took too long to reply to one of our messages - Timeout, - /// The peer doesn't advertise the [`BLOOM`](bitcoin::network::constants::ServiceFlags::BLOOM) service flag - PeerBloomDisabled, - - /// No peers have been specified - NoPeers, - - /// Internal database error - Db(rocksdb::Error), - /// Internal I/O error - Io(std::io::Error), - /// Invalid BIP158 filter - Bip158(bitcoin::util::bip158::Error), - /// Internal system time error - Time(std::time::SystemTimeError), - - /// Wrapper for [`crate::error::Error`] - Global(Box), -} - -impl fmt::Display for CompactFiltersError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -impl std::error::Error for CompactFiltersError {} - -impl_error!(rocksdb::Error, Db, CompactFiltersError); -impl_error!(std::io::Error, Io, CompactFiltersError); -impl_error!(bitcoin::util::bip158::Error, Bip158, CompactFiltersError); -impl_error!(std::time::SystemTimeError, Time, CompactFiltersError); - -impl From for CompactFiltersError { - fn from(err: crate::error::Error) -> Self { - CompactFiltersError::Global(Box::new(err)) - } -} diff --git a/src/blockchain/compact_filters/peer.rs b/src/blockchain/compact_filters/peer.rs deleted file mode 100644 index 665a033d4b..0000000000 --- a/src/blockchain/compact_filters/peer.rs +++ /dev/null @@ -1,576 +0,0 @@ -// Bitcoin Dev Kit -// Written in 2020 by Alekos Filini -// -// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. - -use std::collections::HashMap; -use std::io::BufReader; -use std::net::{TcpStream, ToSocketAddrs}; -use std::sync::{Arc, Condvar, Mutex, RwLock}; -use std::thread; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; - -use socks::{Socks5Stream, ToTargetAddr}; - -use rand::{thread_rng, Rng}; - -use bitcoin::consensus::{Decodable, Encodable}; -use bitcoin::hash_types::BlockHash; -use bitcoin::network::constants::ServiceFlags; -use bitcoin::network::message::{NetworkMessage, RawNetworkMessage}; -use bitcoin::network::message_blockdata::*; -use bitcoin::network::message_filter::*; -use bitcoin::network::message_network::VersionMessage; -use bitcoin::network::Address; -use bitcoin::{Block, Network, Transaction, Txid, Wtxid}; - -use super::CompactFiltersError; - -type ResponsesMap = HashMap<&'static str, Arc<(Mutex>, Condvar)>>; - -pub(crate) const TIMEOUT_SECS: u64 = 30; - -/// Container for unconfirmed, but valid Bitcoin transactions -/// -/// It is normally shared between [`Peer`]s with the use of [`Arc`], so that transactions are not -/// duplicated in memory. -#[derive(Debug, Default)] -pub struct Mempool(RwLock); - -#[derive(Debug, Default)] -struct InnerMempool { - txs: HashMap, - wtxids: HashMap, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -enum TxIdentifier { - Wtxid(Wtxid), - Txid(Txid), -} - -impl Mempool { - /// Create a new empty mempool - pub fn new() -> Self { - Self::default() - } - - /// Add a transaction to the mempool - /// - /// Note that this doesn't propagate the transaction to other - /// peers. To do that, [`broadcast`](crate::blockchain::Blockchain::broadcast) should be used. - pub fn add_tx(&self, tx: Transaction) { - let mut guard = self.0.write().unwrap(); - - guard.wtxids.insert(tx.wtxid(), tx.txid()); - guard.txs.insert(tx.txid(), tx); - } - - /// Look-up a transaction in the mempool given an [`Inventory`] request - pub fn get_tx(&self, inventory: &Inventory) -> Option { - let identifer = match inventory { - Inventory::Error - | Inventory::Block(_) - | Inventory::WitnessBlock(_) - | Inventory::CompactBlock(_) => return None, - Inventory::Transaction(txid) => TxIdentifier::Txid(*txid), - Inventory::WitnessTransaction(txid) => TxIdentifier::Txid(*txid), - Inventory::WTx(wtxid) => TxIdentifier::Wtxid(*wtxid), - Inventory::Unknown { inv_type, hash } => { - log::warn!( - "Unknown inventory request type `{}`, hash `{:?}`", - inv_type, - hash - ); - return None; - } - }; - - let txid = match identifer { - TxIdentifier::Txid(txid) => Some(txid), - TxIdentifier::Wtxid(wtxid) => self.0.read().unwrap().wtxids.get(&wtxid).cloned(), - }; - - txid.and_then(|txid| self.0.read().unwrap().txs.get(&txid).cloned()) - } - - /// Return whether or not the mempool contains a transaction with a given txid - pub fn has_tx(&self, txid: &Txid) -> bool { - self.0.read().unwrap().txs.contains_key(txid) - } - - /// Return the list of transactions contained in the mempool - pub fn iter_txs(&self) -> Vec { - self.0.read().unwrap().txs.values().cloned().collect() - } -} - -/// A Bitcoin peer -#[derive(Debug)] -#[allow(dead_code)] -pub struct Peer { - writer: Arc>, - responses: Arc>, - - reader_thread: thread::JoinHandle<()>, - connected: Arc>, - - mempool: Arc, - - version: VersionMessage, - network: Network, -} - -impl Peer { - /// Connect to a peer over a plaintext TCP connection - /// - /// This function internally spawns a new thread that will monitor incoming messages from the - /// peer, and optionally reply to some of them transparently, like [pings](bitcoin::network::message::NetworkMessage::Ping) - pub fn connect( - address: A, - mempool: Arc, - network: Network, - ) -> Result { - let stream = TcpStream::connect(address)?; - - Peer::from_stream(stream, mempool, network) - } - - /// Connect to a peer through a SOCKS5 proxy, optionally by using some credentials, specified - /// as a tuple of `(username, password)` - /// - /// This function internally spawns a new thread that will monitor incoming messages from the - /// peer, and optionally reply to some of them transparently, like [pings](NetworkMessage::Ping) - pub fn connect_proxy( - target: T, - proxy: P, - credentials: Option<(&str, &str)>, - mempool: Arc, - network: Network, - ) -> Result { - let socks_stream = if let Some((username, password)) = credentials { - Socks5Stream::connect_with_password(proxy, target, username, password)? - } else { - Socks5Stream::connect(proxy, target)? - }; - - Peer::from_stream(socks_stream.into_inner(), mempool, network) - } - - /// Create a [`Peer`] from an already connected TcpStream - fn from_stream( - stream: TcpStream, - mempool: Arc, - network: Network, - ) -> Result { - let writer = Arc::new(Mutex::new(stream.try_clone()?)); - let responses: Arc> = Arc::new(RwLock::new(HashMap::new())); - let connected = Arc::new(RwLock::new(true)); - - let mut locked_writer = writer.lock().unwrap(); - - let reader_thread_responses = Arc::clone(&responses); - let reader_thread_writer = Arc::clone(&writer); - let reader_thread_mempool = Arc::clone(&mempool); - let reader_thread_connected = Arc::clone(&connected); - let reader_thread = thread::spawn(move || { - Self::reader_thread( - network, - stream, - reader_thread_responses, - reader_thread_writer, - reader_thread_mempool, - reader_thread_connected, - ) - }); - - let timestamp = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs() as i64; - let nonce = thread_rng().gen(); - let receiver = Address::new(&locked_writer.peer_addr()?, ServiceFlags::NONE); - let sender = Address { - services: ServiceFlags::NONE, - address: [0u16; 8], - port: 0, - }; - - Self::_send( - &mut locked_writer, - network.magic(), - NetworkMessage::Version(VersionMessage::new( - ServiceFlags::WITNESS, - timestamp, - receiver, - sender, - nonce, - "MagicalBitcoinWallet".into(), - 0, - )), - )?; - let version = if let NetworkMessage::Version(version) = - Self::_recv(&responses, "version", None).unwrap() - { - version - } else { - return Err(CompactFiltersError::InvalidResponse); - }; - - if let NetworkMessage::Verack = Self::_recv(&responses, "verack", None).unwrap() { - Self::_send(&mut locked_writer, network.magic(), NetworkMessage::Verack)?; - } else { - return Err(CompactFiltersError::InvalidResponse); - } - - std::mem::drop(locked_writer); - - Ok(Peer { - writer, - responses, - reader_thread, - connected, - mempool, - version, - network, - }) - } - - /// Send a Bitcoin network message - fn _send( - writer: &mut TcpStream, - magic: u32, - payload: NetworkMessage, - ) -> Result<(), CompactFiltersError> { - log::trace!("==> {:?}", payload); - - let raw_message = RawNetworkMessage { magic, payload }; - - raw_message - .consensus_encode(writer) - .map_err(|_| CompactFiltersError::DataCorruption)?; - - Ok(()) - } - - /// Wait for a specific incoming Bitcoin message, optionally with a timeout - fn _recv( - responses: &Arc>, - wait_for: &'static str, - timeout: Option, - ) -> Option { - let message_resp = { - let mut lock = responses.write().unwrap(); - let message_resp = lock.entry(wait_for).or_default(); - Arc::clone(message_resp) - }; - - let (lock, cvar) = &*message_resp; - - let mut messages = lock.lock().unwrap(); - while messages.is_empty() { - match timeout { - None => messages = cvar.wait(messages).unwrap(), - Some(t) => { - let result = cvar.wait_timeout(messages, t).unwrap(); - if result.1.timed_out() { - return None; - } - messages = result.0; - } - } - } - - messages.pop() - } - - /// Return the [`VersionMessage`] sent by the peer - pub fn get_version(&self) -> &VersionMessage { - &self.version - } - - /// Return the Bitcoin [`Network`] in use - pub fn get_network(&self) -> Network { - self.network - } - - /// Return the mempool used by this peer - pub fn get_mempool(&self) -> Arc { - Arc::clone(&self.mempool) - } - - /// Return whether or not the peer is still connected - pub fn is_connected(&self) -> bool { - *self.connected.read().unwrap() - } - - /// Internal function called once the `reader_thread` is spawned - fn reader_thread( - network: Network, - connection: TcpStream, - reader_thread_responses: Arc>, - reader_thread_writer: Arc>, - reader_thread_mempool: Arc, - reader_thread_connected: Arc>, - ) { - macro_rules! check_disconnect { - ($call:expr) => { - match $call { - Ok(good) => good, - Err(e) => { - log::debug!("Error {:?}", e); - *reader_thread_connected.write().unwrap() = false; - - break; - } - } - }; - } - - let mut reader = BufReader::new(connection); - loop { - let raw_message: RawNetworkMessage = - check_disconnect!(Decodable::consensus_decode(&mut reader)); - - let in_message = if raw_message.magic != network.magic() { - continue; - } else { - raw_message.payload - }; - - log::trace!("<== {:?}", in_message); - - match in_message { - NetworkMessage::Ping(nonce) => { - check_disconnect!(Self::_send( - &mut reader_thread_writer.lock().unwrap(), - network.magic(), - NetworkMessage::Pong(nonce), - )); - - continue; - } - NetworkMessage::Alert(_) => continue, - NetworkMessage::GetData(ref inv) => { - let (found, not_found): (Vec<_>, Vec<_>) = inv - .iter() - .map(|item| (*item, reader_thread_mempool.get_tx(item))) - .partition(|(_, d)| d.is_some()); - for (_, found_tx) in found { - check_disconnect!(Self::_send( - &mut reader_thread_writer.lock().unwrap(), - network.magic(), - NetworkMessage::Tx(found_tx.unwrap()), - )); - } - - if !not_found.is_empty() { - check_disconnect!(Self::_send( - &mut reader_thread_writer.lock().unwrap(), - network.magic(), - NetworkMessage::NotFound( - not_found.into_iter().map(|(i, _)| i).collect(), - ), - )); - } - } - _ => {} - } - - let message_resp = { - let mut lock = reader_thread_responses.write().unwrap(); - let message_resp = lock.entry(in_message.cmd()).or_default(); - Arc::clone(message_resp) - }; - - let (lock, cvar) = &*message_resp; - let mut messages = lock.lock().unwrap(); - messages.push(in_message); - cvar.notify_all(); - } - } - - /// Send a raw Bitcoin message to the peer - pub fn send(&self, payload: NetworkMessage) -> Result<(), CompactFiltersError> { - let mut writer = self.writer.lock().unwrap(); - Self::_send(&mut writer, self.network.magic(), payload) - } - - /// Waits for a specific incoming Bitcoin message, optionally with a timeout - pub fn recv( - &self, - wait_for: &'static str, - timeout: Option, - ) -> Result, CompactFiltersError> { - Ok(Self::_recv(&self.responses, wait_for, timeout)) - } -} - -pub trait CompactFiltersPeer { - fn get_cf_checkpt( - &self, - filter_type: u8, - stop_hash: BlockHash, - ) -> Result; - fn get_cf_headers( - &self, - filter_type: u8, - start_height: u32, - stop_hash: BlockHash, - ) -> Result; - fn get_cf_filters( - &self, - filter_type: u8, - start_height: u32, - stop_hash: BlockHash, - ) -> Result<(), CompactFiltersError>; - fn pop_cf_filter_resp(&self) -> Result; -} - -impl CompactFiltersPeer for Peer { - fn get_cf_checkpt( - &self, - filter_type: u8, - stop_hash: BlockHash, - ) -> Result { - self.send(NetworkMessage::GetCFCheckpt(GetCFCheckpt { - filter_type, - stop_hash, - }))?; - - let response = self - .recv("cfcheckpt", Some(Duration::from_secs(TIMEOUT_SECS)))? - .ok_or(CompactFiltersError::Timeout)?; - let response = match response { - NetworkMessage::CFCheckpt(response) => response, - _ => return Err(CompactFiltersError::InvalidResponse), - }; - - if response.filter_type != filter_type { - return Err(CompactFiltersError::InvalidResponse); - } - - Ok(response) - } - - fn get_cf_headers( - &self, - filter_type: u8, - start_height: u32, - stop_hash: BlockHash, - ) -> Result { - self.send(NetworkMessage::GetCFHeaders(GetCFHeaders { - filter_type, - start_height, - stop_hash, - }))?; - - let response = self - .recv("cfheaders", Some(Duration::from_secs(TIMEOUT_SECS)))? - .ok_or(CompactFiltersError::Timeout)?; - let response = match response { - NetworkMessage::CFHeaders(response) => response, - _ => return Err(CompactFiltersError::InvalidResponse), - }; - - if response.filter_type != filter_type { - return Err(CompactFiltersError::InvalidResponse); - } - - Ok(response) - } - - fn pop_cf_filter_resp(&self) -> Result { - let response = self - .recv("cfilter", Some(Duration::from_secs(TIMEOUT_SECS)))? - .ok_or(CompactFiltersError::Timeout)?; - let response = match response { - NetworkMessage::CFilter(response) => response, - _ => return Err(CompactFiltersError::InvalidResponse), - }; - - Ok(response) - } - - fn get_cf_filters( - &self, - filter_type: u8, - start_height: u32, - stop_hash: BlockHash, - ) -> Result<(), CompactFiltersError> { - self.send(NetworkMessage::GetCFilters(GetCFilters { - filter_type, - start_height, - stop_hash, - }))?; - - Ok(()) - } -} - -pub trait InvPeer { - fn get_block(&self, block_hash: BlockHash) -> Result, CompactFiltersError>; - fn ask_for_mempool(&self) -> Result<(), CompactFiltersError>; - fn broadcast_tx(&self, tx: Transaction) -> Result<(), CompactFiltersError>; -} - -impl InvPeer for Peer { - fn get_block(&self, block_hash: BlockHash) -> Result, CompactFiltersError> { - self.send(NetworkMessage::GetData(vec![Inventory::WitnessBlock( - block_hash, - )]))?; - - match self.recv("block", Some(Duration::from_secs(TIMEOUT_SECS)))? { - None => Ok(None), - Some(NetworkMessage::Block(response)) => Ok(Some(response)), - _ => Err(CompactFiltersError::InvalidResponse), - } - } - - fn ask_for_mempool(&self) -> Result<(), CompactFiltersError> { - if !self.version.services.has(ServiceFlags::BLOOM) { - return Err(CompactFiltersError::PeerBloomDisabled); - } - - self.send(NetworkMessage::MemPool)?; - let inv = match self.recv("inv", Some(Duration::from_secs(5)))? { - None => return Ok(()), // empty mempool - Some(NetworkMessage::Inv(inv)) => inv, - _ => return Err(CompactFiltersError::InvalidResponse), - }; - - let getdata = inv - .iter() - .cloned() - .filter( - |item| matches!(item, Inventory::Transaction(txid) if !self.mempool.has_tx(txid)), - ) - .collect::>(); - let num_txs = getdata.len(); - self.send(NetworkMessage::GetData(getdata))?; - - for _ in 0..num_txs { - let tx = self - .recv("tx", Some(Duration::from_secs(TIMEOUT_SECS)))? - .ok_or(CompactFiltersError::Timeout)?; - let tx = match tx { - NetworkMessage::Tx(tx) => tx, - _ => return Err(CompactFiltersError::InvalidResponse), - }; - - self.mempool.add_tx(tx); - } - - Ok(()) - } - - fn broadcast_tx(&self, tx: Transaction) -> Result<(), CompactFiltersError> { - self.mempool.add_tx(tx.clone()); - self.send(NetworkMessage::Tx(tx))?; - - Ok(()) - } -} diff --git a/src/blockchain/compact_filters/store.rs b/src/blockchain/compact_filters/store.rs deleted file mode 100644 index 9d5731009d..0000000000 --- a/src/blockchain/compact_filters/store.rs +++ /dev/null @@ -1,836 +0,0 @@ -// Bitcoin Dev Kit -// Written in 2020 by Alekos Filini -// -// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. - -use std::convert::TryInto; -use std::fmt; -use std::io::{Read, Write}; -use std::marker::PhantomData; -use std::sync::Arc; -use std::sync::RwLock; - -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; - -use rocksdb::{Direction, IteratorMode, ReadOptions, WriteBatch, DB}; - -use bitcoin::blockdata::constants::genesis_block; -use bitcoin::consensus::{deserialize, encode::VarInt, serialize, Decodable, Encodable}; -use bitcoin::hash_types::{FilterHash, FilterHeader}; -use bitcoin::hashes::Hash; -use bitcoin::util::bip158::BlockFilter; -use bitcoin::util::uint::Uint256; -use bitcoin::Block; -use bitcoin::BlockHash; -use bitcoin::BlockHeader; -use bitcoin::Network; - -use super::CompactFiltersError; - -pub trait StoreType: Default + fmt::Debug {} - -#[derive(Default, Debug)] -pub struct Full; -impl StoreType for Full {} -#[derive(Default, Debug)] -pub struct Snapshot; -impl StoreType for Snapshot {} - -pub enum StoreEntry { - BlockHeader(Option), - Block(Option), - BlockHeaderIndex(Option), - CFilterTable((u8, Option)), -} - -impl StoreEntry { - pub fn get_prefix(&self) -> Vec { - match self { - StoreEntry::BlockHeader(_) => b"z", - StoreEntry::Block(_) => b"x", - StoreEntry::BlockHeaderIndex(_) => b"i", - StoreEntry::CFilterTable(_) => b"t", - } - .to_vec() - } - - pub fn get_key(&self) -> Vec { - let mut prefix = self.get_prefix(); - match self { - StoreEntry::BlockHeader(Some(height)) => { - prefix.extend_from_slice(&height.to_be_bytes()) - } - StoreEntry::Block(Some(height)) => prefix.extend_from_slice(&height.to_be_bytes()), - StoreEntry::BlockHeaderIndex(Some(hash)) => { - prefix.extend_from_slice(&hash.into_inner()) - } - StoreEntry::CFilterTable((filter_type, bundle_index)) => { - prefix.push(*filter_type); - if let Some(bundle_index) = bundle_index { - prefix.extend_from_slice(&bundle_index.to_be_bytes()); - } - } - _ => {} - } - - prefix - } -} - -pub trait SerializeDb: Sized { - fn serialize(&self) -> Vec; - fn deserialize(data: &[u8]) -> Result; -} - -impl SerializeDb for T -where - T: Encodable + Decodable, -{ - fn serialize(&self) -> Vec { - serialize(self) - } - - fn deserialize(data: &[u8]) -> Result { - deserialize(data).map_err(|_| CompactFiltersError::DataCorruption) - } -} - -impl Encodable for BundleStatus { - fn consensus_encode(&self, e: &mut W) -> Result { - let mut written = 0; - - match self { - BundleStatus::Init => { - written += 0x00u8.consensus_encode(e)?; - } - BundleStatus::CfHeaders { cf_headers } => { - written += 0x01u8.consensus_encode(e)?; - written += VarInt(cf_headers.len() as u64).consensus_encode(e)?; - for header in cf_headers { - written += header.consensus_encode(e)?; - } - } - BundleStatus::CFilters { cf_filters } => { - written += 0x02u8.consensus_encode(e)?; - written += VarInt(cf_filters.len() as u64).consensus_encode(e)?; - for filter in cf_filters { - written += filter.consensus_encode(e)?; - } - } - BundleStatus::Processed { cf_filters } => { - written += 0x03u8.consensus_encode(e)?; - written += VarInt(cf_filters.len() as u64).consensus_encode(e)?; - for filter in cf_filters { - written += filter.consensus_encode(e)?; - } - } - BundleStatus::Pruned => { - written += 0x04u8.consensus_encode(e)?; - } - BundleStatus::Tip { cf_filters } => { - written += 0x05u8.consensus_encode(e)?; - written += VarInt(cf_filters.len() as u64).consensus_encode(e)?; - for filter in cf_filters { - written += filter.consensus_encode(e)?; - } - } - } - - Ok(written) - } -} - -impl Decodable for BundleStatus { - fn consensus_decode( - d: &mut D, - ) -> Result { - let byte_type = u8::consensus_decode(d)?; - match byte_type { - 0x00 => Ok(BundleStatus::Init), - 0x01 => { - let num = VarInt::consensus_decode(d)?; - let num = num.0 as usize; - - let mut cf_headers = Vec::with_capacity(num); - for _ in 0..num { - cf_headers.push(FilterHeader::consensus_decode(d)?); - } - - Ok(BundleStatus::CfHeaders { cf_headers }) - } - 0x02 => { - let num = VarInt::consensus_decode(d)?; - let num = num.0 as usize; - - let mut cf_filters = Vec::with_capacity(num); - for _ in 0..num { - cf_filters.push(Vec::::consensus_decode(d)?); - } - - Ok(BundleStatus::CFilters { cf_filters }) - } - 0x03 => { - let num = VarInt::consensus_decode(d)?; - let num = num.0 as usize; - - let mut cf_filters = Vec::with_capacity(num); - for _ in 0..num { - cf_filters.push(Vec::::consensus_decode(d)?); - } - - Ok(BundleStatus::Processed { cf_filters }) - } - 0x04 => Ok(BundleStatus::Pruned), - 0x05 => { - let num = VarInt::consensus_decode(d)?; - let num = num.0 as usize; - - let mut cf_filters = Vec::with_capacity(num); - for _ in 0..num { - cf_filters.push(Vec::::consensus_decode(d)?); - } - - Ok(BundleStatus::Tip { cf_filters }) - } - _ => Err(bitcoin::consensus::encode::Error::ParseFailed( - "Invalid byte type", - )), - } - } -} - -pub struct ChainStore { - store: Arc>, - cf_name: String, - min_height: usize, - network: Network, - phantom: PhantomData, -} - -impl ChainStore { - pub fn new(store: DB, network: Network) -> Result { - let genesis = genesis_block(network); - - let cf_name = "default".to_string(); - let cf_handle = store.cf_handle(&cf_name).unwrap(); - - let genesis_key = StoreEntry::BlockHeader(Some(0)).get_key(); - - if store.get_pinned_cf(cf_handle, &genesis_key)?.is_none() { - let mut batch = WriteBatch::default(); - batch.put_cf( - cf_handle, - genesis_key, - (genesis.header, genesis.header.work()).serialize(), - ); - batch.put_cf( - cf_handle, - StoreEntry::BlockHeaderIndex(Some(genesis.block_hash())).get_key(), - &0usize.to_be_bytes(), - ); - store.write(batch)?; - } - - Ok(ChainStore { - store: Arc::new(RwLock::new(store)), - cf_name, - min_height: 0, - network, - phantom: PhantomData, - }) - } - - pub fn get_locators(&self) -> Result, CompactFiltersError> { - let mut step = 1; - let mut index = self.get_height()?; - let mut answer = Vec::new(); - - let store_read = self.store.read().unwrap(); - let cf_handle = store_read.cf_handle(&self.cf_name).unwrap(); - - loop { - if answer.len() > 10 { - step *= 2; - } - - let (header, _): (BlockHeader, Uint256) = SerializeDb::deserialize( - &store_read - .get_pinned_cf(cf_handle, StoreEntry::BlockHeader(Some(index)).get_key())? - .unwrap(), - )?; - answer.push((header.block_hash(), index)); - - if let Some(new_index) = index.checked_sub(step) { - index = new_index; - } else { - break; - } - } - - Ok(answer) - } - - pub fn start_snapshot(&self, from: usize) -> Result, CompactFiltersError> { - let new_cf_name: String = thread_rng() - .sample_iter(&Alphanumeric) - .map(|byte| byte as char) - .take(16) - .collect(); - let new_cf_name = format!("_headers:{}", new_cf_name); - - let mut write_store = self.store.write().unwrap(); - - write_store.create_cf(&new_cf_name, &Default::default())?; - - let cf_handle = write_store.cf_handle(&self.cf_name).unwrap(); - let new_cf_handle = write_store.cf_handle(&new_cf_name).unwrap(); - - let (header, work): (BlockHeader, Uint256) = SerializeDb::deserialize( - &write_store - .get_pinned_cf(cf_handle, StoreEntry::BlockHeader(Some(from)).get_key())? - .ok_or(CompactFiltersError::DataCorruption)?, - )?; - - let mut batch = WriteBatch::default(); - batch.put_cf( - new_cf_handle, - StoreEntry::BlockHeaderIndex(Some(header.block_hash())).get_key(), - &from.to_be_bytes(), - ); - batch.put_cf( - new_cf_handle, - StoreEntry::BlockHeader(Some(from)).get_key(), - (header, work).serialize(), - ); - write_store.write(batch)?; - - let store = Arc::clone(&self.store); - Ok(ChainStore { - store, - cf_name: new_cf_name, - min_height: from, - network: self.network, - phantom: PhantomData, - }) - } - - pub fn recover_snapshot(&self, cf_name: &str) -> Result<(), CompactFiltersError> { - let mut write_store = self.store.write().unwrap(); - let snapshot_cf_handle = write_store.cf_handle(cf_name).unwrap(); - - let prefix = StoreEntry::BlockHeader(None).get_key(); - let mut iterator = write_store.prefix_iterator_cf(snapshot_cf_handle, prefix); - - let min_height = match iterator - .next() - .and_then(|(k, _)| k[1..].try_into().ok()) - .map(usize::from_be_bytes) - { - None => { - std::mem::drop(iterator); - write_store.drop_cf(cf_name).ok(); - - return Ok(()); - } - Some(x) => x, - }; - std::mem::drop(iterator); - std::mem::drop(write_store); - - let snapshot = ChainStore { - store: Arc::clone(&self.store), - cf_name: cf_name.into(), - min_height, - network: self.network, - phantom: PhantomData, - }; - if snapshot.work()? > self.work()? { - self.apply_snapshot(snapshot)?; - } - - Ok(()) - } - - pub fn apply_snapshot( - &self, - snaphost: ChainStore, - ) -> Result<(), CompactFiltersError> { - let mut batch = WriteBatch::default(); - - let read_store = self.store.read().unwrap(); - let cf_handle = read_store.cf_handle(&self.cf_name).unwrap(); - let snapshot_cf_handle = read_store.cf_handle(&snaphost.cf_name).unwrap(); - - let from_key = StoreEntry::BlockHeader(Some(snaphost.min_height)).get_key(); - let to_key = StoreEntry::BlockHeader(Some(usize::MAX)).get_key(); - - let mut opts = ReadOptions::default(); - opts.set_iterate_upper_bound(to_key.clone()); - - log::debug!("Removing items"); - batch.delete_range_cf(cf_handle, &from_key, &to_key); - for (_, v) in read_store.iterator_cf_opt( - cf_handle, - opts, - IteratorMode::From(&from_key, Direction::Forward), - ) { - let (header, _): (BlockHeader, Uint256) = SerializeDb::deserialize(&v)?; - - batch.delete_cf( - cf_handle, - StoreEntry::BlockHeaderIndex(Some(header.block_hash())).get_key(), - ); - } - - // Delete full blocks overridden by snapshot - let from_key = StoreEntry::Block(Some(snaphost.min_height)).get_key(); - let to_key = StoreEntry::Block(Some(usize::MAX)).get_key(); - batch.delete_range(&from_key, &to_key); - - log::debug!("Copying over new items"); - for (k, v) in read_store.iterator_cf(snapshot_cf_handle, IteratorMode::Start) { - batch.put_cf(cf_handle, k, v); - } - - read_store.write(batch)?; - std::mem::drop(read_store); - - self.store.write().unwrap().drop_cf(&snaphost.cf_name)?; - - Ok(()) - } - - pub fn get_height_for( - &self, - block_hash: &BlockHash, - ) -> Result, CompactFiltersError> { - let read_store = self.store.read().unwrap(); - let cf_handle = read_store.cf_handle(&self.cf_name).unwrap(); - - let key = StoreEntry::BlockHeaderIndex(Some(*block_hash)).get_key(); - let data = read_store.get_pinned_cf(cf_handle, key)?; - data.map(|data| { - Ok::<_, CompactFiltersError>(usize::from_be_bytes( - data.as_ref() - .try_into() - .map_err(|_| CompactFiltersError::DataCorruption)?, - )) - }) - .transpose() - } - - pub fn get_block_hash(&self, height: usize) -> Result, CompactFiltersError> { - let read_store = self.store.read().unwrap(); - let cf_handle = read_store.cf_handle(&self.cf_name).unwrap(); - - let key = StoreEntry::BlockHeader(Some(height)).get_key(); - let data = read_store.get_pinned_cf(cf_handle, key)?; - data.map(|data| { - let (header, _): (BlockHeader, Uint256) = - deserialize(&data).map_err(|_| CompactFiltersError::DataCorruption)?; - Ok::<_, CompactFiltersError>(header.block_hash()) - }) - .transpose() - } - - pub fn save_full_block(&self, block: &Block, height: usize) -> Result<(), CompactFiltersError> { - let key = StoreEntry::Block(Some(height)).get_key(); - self.store.read().unwrap().put(key, block.serialize())?; - - Ok(()) - } - - pub fn get_full_block(&self, height: usize) -> Result, CompactFiltersError> { - let read_store = self.store.read().unwrap(); - - let key = StoreEntry::Block(Some(height)).get_key(); - let opt_block = read_store.get_pinned(key)?; - - opt_block - .map(|data| deserialize(&data)) - .transpose() - .map_err(|_| CompactFiltersError::DataCorruption) - } - - pub fn delete_blocks_until(&self, height: usize) -> Result<(), CompactFiltersError> { - let from_key = StoreEntry::Block(Some(0)).get_key(); - let to_key = StoreEntry::Block(Some(height)).get_key(); - - let mut batch = WriteBatch::default(); - batch.delete_range(&from_key, &to_key); - - self.store.read().unwrap().write(batch)?; - - Ok(()) - } - - pub fn iter_full_blocks(&self) -> Result, CompactFiltersError> { - let read_store = self.store.read().unwrap(); - - let prefix = StoreEntry::Block(None).get_key(); - - let iterator = read_store.prefix_iterator(&prefix); - // FIXME: we have to filter manually because rocksdb sometimes returns stuff that doesn't - // have the right prefix - iterator - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(k, v)| { - let height: usize = usize::from_be_bytes( - k[1..] - .try_into() - .map_err(|_| CompactFiltersError::DataCorruption)?, - ); - let block = SerializeDb::deserialize(&v)?; - - Ok((height, block)) - }) - .collect::>() - } -} - -impl ChainStore { - pub fn work(&self) -> Result { - let read_store = self.store.read().unwrap(); - let cf_handle = read_store.cf_handle(&self.cf_name).unwrap(); - - let prefix = StoreEntry::BlockHeader(None).get_key(); - let iterator = read_store.prefix_iterator_cf(cf_handle, prefix); - - Ok(iterator - .last() - .map(|(_, v)| -> Result<_, CompactFiltersError> { - let (_, work): (BlockHeader, Uint256) = SerializeDb::deserialize(&v)?; - - Ok(work) - }) - .transpose()? - .unwrap_or_default()) - } - - pub fn get_height(&self) -> Result { - let read_store = self.store.read().unwrap(); - let cf_handle = read_store.cf_handle(&self.cf_name).unwrap(); - - let prefix = StoreEntry::BlockHeader(None).get_key(); - let iterator = read_store.prefix_iterator_cf(cf_handle, prefix); - - Ok(iterator - .last() - .map(|(k, _)| -> Result<_, CompactFiltersError> { - let height = usize::from_be_bytes( - k[1..] - .try_into() - .map_err(|_| CompactFiltersError::DataCorruption)?, - ); - - Ok(height) - }) - .transpose()? - .unwrap_or_default()) - } - - pub fn get_tip_hash(&self) -> Result, CompactFiltersError> { - let read_store = self.store.read().unwrap(); - let cf_handle = read_store.cf_handle(&self.cf_name).unwrap(); - - let prefix = StoreEntry::BlockHeader(None).get_key(); - let iterator = read_store.prefix_iterator_cf(cf_handle, prefix); - - iterator - .last() - .map(|(_, v)| -> Result<_, CompactFiltersError> { - let (header, _): (BlockHeader, Uint256) = SerializeDb::deserialize(&v)?; - - Ok(header.block_hash()) - }) - .transpose() - } - - pub fn apply( - &mut self, - from: usize, - headers: Vec, - ) -> Result { - let mut batch = WriteBatch::default(); - - let read_store = self.store.read().unwrap(); - let cf_handle = read_store.cf_handle(&self.cf_name).unwrap(); - - let (mut last_hash, mut accumulated_work) = read_store - .get_pinned_cf(cf_handle, StoreEntry::BlockHeader(Some(from)).get_key())? - .map(|result| { - let (header, work): (BlockHeader, Uint256) = SerializeDb::deserialize(&result)?; - Ok::<_, CompactFiltersError>((header.block_hash(), work)) - }) - .transpose()? - .ok_or(CompactFiltersError::DataCorruption)?; - - for (index, header) in headers.into_iter().enumerate() { - if header.prev_blockhash != last_hash { - return Err(CompactFiltersError::InvalidHeaders); - } - - last_hash = header.block_hash(); - accumulated_work = accumulated_work + header.work(); - - let height = from + index + 1; - batch.put_cf( - cf_handle, - StoreEntry::BlockHeaderIndex(Some(header.block_hash())).get_key(), - &(height).to_be_bytes(), - ); - batch.put_cf( - cf_handle, - StoreEntry::BlockHeader(Some(height)).get_key(), - (header, accumulated_work).serialize(), - ); - } - - std::mem::drop(read_store); - - self.store.write().unwrap().write(batch)?; - Ok(last_hash) - } -} - -impl fmt::Debug for ChainStore { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct(&format!("ChainStore<{:?}>", T::default())) - .field("cf_name", &self.cf_name) - .field("min_height", &self.min_height) - .field("network", &self.network) - .field("headers_height", &self.get_height()) - .field("tip_hash", &self.get_tip_hash()) - .finish() - } -} - -pub enum BundleStatus { - Init, - CfHeaders { cf_headers: Vec }, - CFilters { cf_filters: Vec> }, - Processed { cf_filters: Vec> }, - Tip { cf_filters: Vec> }, - Pruned, -} - -pub struct CfStore { - store: Arc>, - filter_type: u8, -} - -type BundleEntry = (BundleStatus, FilterHeader); - -impl CfStore { - pub fn new( - headers_store: &ChainStore, - filter_type: u8, - ) -> Result { - let cf_store = CfStore { - store: Arc::clone(&headers_store.store), - filter_type, - }; - - let genesis = genesis_block(headers_store.network); - - let filter = BlockFilter::new_script_filter(&genesis, |utxo| { - Err(bitcoin::util::bip158::Error::UtxoMissing(*utxo)) - })?; - let first_key = StoreEntry::CFilterTable((filter_type, Some(0))).get_key(); - - // Add the genesis' filter - { - let read_store = cf_store.store.read().unwrap(); - if read_store.get_pinned(&first_key)?.is_none() { - read_store.put( - &first_key, - ( - BundleStatus::Init, - filter.filter_header(&FilterHeader::from_hash(Hash::all_zeros())), - ) - .serialize(), - )?; - } - } - - Ok(cf_store) - } - - pub fn get_filter_type(&self) -> u8 { - self.filter_type - } - - pub fn get_bundles(&self) -> Result, CompactFiltersError> { - let read_store = self.store.read().unwrap(); - - let prefix = StoreEntry::CFilterTable((self.filter_type, None)).get_key(); - let iterator = read_store.prefix_iterator(&prefix); - - // FIXME: we have to filter manually because rocksdb sometimes returns stuff that doesn't - // have the right prefix - iterator - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, data)| BundleEntry::deserialize(&data)) - .collect::>() - } - - pub fn get_checkpoints(&self) -> Result, CompactFiltersError> { - let read_store = self.store.read().unwrap(); - - let prefix = StoreEntry::CFilterTable((self.filter_type, None)).get_key(); - let iterator = read_store.prefix_iterator(&prefix); - - // FIXME: we have to filter manually because rocksdb sometimes returns stuff that doesn't - // have the right prefix - iterator - .filter(|(k, _)| k.starts_with(&prefix)) - .skip(1) - .map(|(_, data)| Ok::<_, CompactFiltersError>(BundleEntry::deserialize(&data)?.1)) - .collect::>() - } - - pub fn replace_checkpoints( - &self, - checkpoints: Vec, - ) -> Result<(), CompactFiltersError> { - let current_checkpoints = self.get_checkpoints()?; - - let mut equal_bundles = 0; - for (index, (our, their)) in current_checkpoints - .iter() - .zip(checkpoints.iter()) - .enumerate() - { - equal_bundles = index; - - if our != their { - break; - } - } - - let read_store = self.store.read().unwrap(); - let mut batch = WriteBatch::default(); - - for (index, filter_hash) in checkpoints.iter().enumerate().skip(equal_bundles) { - let key = StoreEntry::CFilterTable((self.filter_type, Some(index + 1))).get_key(); // +1 to skip the genesis' filter - - if let Some((BundleStatus::Tip { .. }, _)) = read_store - .get_pinned(&key)? - .map(|data| BundleEntry::deserialize(&data)) - .transpose()? - { - println!("Keeping bundle #{} as Tip", index); - } else { - batch.put(&key, (BundleStatus::Init, *filter_hash).serialize()); - } - } - - read_store.write(batch)?; - - Ok(()) - } - - pub fn advance_to_cf_headers( - &self, - bundle: usize, - checkpoint: FilterHeader, - filter_hashes: Vec, - ) -> Result { - let cf_headers: Vec = filter_hashes - .into_iter() - .scan(checkpoint, |prev_header, filter_hash| { - let filter_header = filter_hash.filter_header(prev_header); - *prev_header = filter_header; - - Some(filter_header) - }) - .collect(); - - let read_store = self.store.read().unwrap(); - - let next_key = StoreEntry::CFilterTable((self.filter_type, Some(bundle + 1))).get_key(); // +1 to skip the genesis' filter - if let Some((_, next_checkpoint)) = read_store - .get_pinned(&next_key)? - .map(|data| BundleEntry::deserialize(&data)) - .transpose()? - { - // check connection with the next bundle if present - if cf_headers.iter().last() != Some(&next_checkpoint) { - return Err(CompactFiltersError::InvalidFilterHeader); - } - } - - let key = StoreEntry::CFilterTable((self.filter_type, Some(bundle))).get_key(); - let value = (BundleStatus::CfHeaders { cf_headers }, checkpoint); - - read_store.put(key, value.serialize())?; - - Ok(value.0) - } - - pub fn advance_to_cf_filters( - &self, - bundle: usize, - checkpoint: FilterHeader, - headers: Vec, - filters: Vec<(usize, Vec)>, - ) -> Result { - let cf_filters = filters - .into_iter() - .zip(headers.into_iter()) - .scan(checkpoint, |prev_header, ((_, filter_content), header)| { - let filter = BlockFilter::new(&filter_content); - if header != filter.filter_header(prev_header) { - return Some(Err(CompactFiltersError::InvalidFilter)); - } - *prev_header = header; - - Some(Ok::<_, CompactFiltersError>(filter_content)) - }) - .collect::>()?; - - let key = StoreEntry::CFilterTable((self.filter_type, Some(bundle))).get_key(); - let value = (BundleStatus::CFilters { cf_filters }, checkpoint); - - let read_store = self.store.read().unwrap(); - read_store.put(key, value.serialize())?; - - Ok(value.0) - } - - pub fn prune_filters( - &self, - bundle: usize, - checkpoint: FilterHeader, - ) -> Result { - let key = StoreEntry::CFilterTable((self.filter_type, Some(bundle))).get_key(); - let value = (BundleStatus::Pruned, checkpoint); - - let read_store = self.store.read().unwrap(); - read_store.put(key, value.serialize())?; - - Ok(value.0) - } - - pub fn mark_as_tip( - &self, - bundle: usize, - cf_filters: Vec>, - checkpoint: FilterHeader, - ) -> Result { - let key = StoreEntry::CFilterTable((self.filter_type, Some(bundle))).get_key(); - let value = (BundleStatus::Tip { cf_filters }, checkpoint); - - let read_store = self.store.read().unwrap(); - read_store.put(key, value.serialize())?; - - Ok(value.0) - } -} diff --git a/src/blockchain/compact_filters/sync.rs b/src/blockchain/compact_filters/sync.rs deleted file mode 100644 index ba4e004564..0000000000 --- a/src/blockchain/compact_filters/sync.rs +++ /dev/null @@ -1,297 +0,0 @@ -// Bitcoin Dev Kit -// Written in 2020 by Alekos Filini -// -// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. - -use std::collections::{BTreeMap, HashMap, VecDeque}; -use std::sync::{Arc, Mutex}; -use std::time::Duration; - -use bitcoin::hash_types::{BlockHash, FilterHeader}; -use bitcoin::hashes::Hash; -use bitcoin::network::message::NetworkMessage; -use bitcoin::network::message_blockdata::GetHeadersMessage; -use bitcoin::util::bip158::BlockFilter; - -use super::peer::*; -use super::store::*; -use super::CompactFiltersError; -use crate::error::Error; - -pub(crate) const BURIED_CONFIRMATIONS: usize = 100; - -pub struct CfSync { - headers_store: Arc>, - cf_store: Arc, - skip_blocks: usize, - bundles: Mutex>, -} - -impl CfSync { - pub fn new( - headers_store: Arc>, - skip_blocks: usize, - filter_type: u8, - ) -> Result { - let cf_store = Arc::new(CfStore::new(&headers_store, filter_type)?); - - Ok(CfSync { - headers_store, - cf_store, - skip_blocks, - bundles: Mutex::new(VecDeque::new()), - }) - } - - pub fn pruned_bundles(&self) -> Result { - Ok(self - .cf_store - .get_bundles()? - .into_iter() - .skip(self.skip_blocks / 1000) - .fold(0, |acc, (status, _)| match status { - BundleStatus::Pruned => acc + 1, - _ => acc, - })) - } - - pub fn prepare_sync(&self, peer: Arc) -> Result<(), CompactFiltersError> { - let mut bundles_lock = self.bundles.lock().unwrap(); - - let resp = peer.get_cf_checkpt( - self.cf_store.get_filter_type(), - self.headers_store.get_tip_hash()?.unwrap(), - )?; - self.cf_store.replace_checkpoints(resp.filter_headers)?; - - bundles_lock.clear(); - for (index, (status, checkpoint)) in self.cf_store.get_bundles()?.into_iter().enumerate() { - bundles_lock.push_back((status, checkpoint, index)); - } - - Ok(()) - } - - pub fn capture_thread_for_sync( - &self, - peer: Arc, - process: F, - completed_bundle: Q, - ) -> Result<(), CompactFiltersError> - where - F: Fn(&BlockHash, &BlockFilter) -> Result, - Q: Fn(usize) -> Result<(), Error>, - { - let current_height = self.headers_store.get_height()?; // TODO: we should update it in case headers_store is also updated - - loop { - let (mut status, checkpoint, index) = match self.bundles.lock().unwrap().pop_front() { - None => break, - Some(x) => x, - }; - - log::debug!( - "Processing bundle #{} - height {} to {}", - index, - index * 1000 + 1, - (index + 1) * 1000 - ); - - let process_received_filters = - |expected_filters| -> Result>, CompactFiltersError> { - let mut filters_map = BTreeMap::new(); - for _ in 0..expected_filters { - let filter = peer.pop_cf_filter_resp()?; - if filter.filter_type != self.cf_store.get_filter_type() { - return Err(CompactFiltersError::InvalidResponse); - } - - match self.headers_store.get_height_for(&filter.block_hash)? { - Some(height) => filters_map.insert(height, filter.filter), - None => return Err(CompactFiltersError::InvalidFilter), - }; - } - - Ok(filters_map) - }; - - let start_height = index * 1000 + 1; - let mut already_processed = 0; - - if start_height < self.skip_blocks { - status = self.cf_store.prune_filters(index, checkpoint)?; - } - - let stop_height = std::cmp::min(current_height, start_height + 999); - let stop_hash = self.headers_store.get_block_hash(stop_height)?.unwrap(); - - if let BundleStatus::Init = status { - log::trace!("status: Init"); - - let resp = peer.get_cf_headers(0x00, start_height as u32, stop_hash)?; - - assert_eq!(resp.previous_filter_header, checkpoint); - status = - self.cf_store - .advance_to_cf_headers(index, checkpoint, resp.filter_hashes)?; - } - if let BundleStatus::Tip { cf_filters } = status { - log::trace!("status: Tip (beginning) "); - - already_processed = cf_filters.len(); - let headers_resp = peer.get_cf_headers(0x00, start_height as u32, stop_hash)?; - - let cf_headers = match self.cf_store.advance_to_cf_headers( - index, - checkpoint, - headers_resp.filter_hashes, - )? { - BundleStatus::CfHeaders { cf_headers } => cf_headers, - _ => return Err(CompactFiltersError::InvalidResponse), - }; - - peer.get_cf_filters( - self.cf_store.get_filter_type(), - (start_height + cf_filters.len()) as u32, - stop_hash, - )?; - let expected_filters = stop_height - start_height + 1 - cf_filters.len(); - let filters_map = process_received_filters(expected_filters)?; - let filters = cf_filters - .into_iter() - .enumerate() - .chain(filters_map.into_iter()) - .collect(); - status = self - .cf_store - .advance_to_cf_filters(index, checkpoint, cf_headers, filters)?; - } - if let BundleStatus::CfHeaders { cf_headers } = status { - log::trace!("status: CFHeaders"); - - peer.get_cf_filters( - self.cf_store.get_filter_type(), - start_height as u32, - stop_hash, - )?; - let expected_filters = stop_height - start_height + 1; - let filters_map = process_received_filters(expected_filters)?; - status = self.cf_store.advance_to_cf_filters( - index, - checkpoint, - cf_headers, - filters_map.into_iter().collect(), - )?; - } - if let BundleStatus::CFilters { cf_filters } = status { - log::trace!("status: CFilters"); - - let last_sync_buried_height = - (start_height + already_processed).saturating_sub(BURIED_CONFIRMATIONS); - - for (filter_index, filter) in cf_filters.iter().enumerate() { - let height = filter_index + start_height; - - // do not download blocks that were already "buried" since the last sync - if height < last_sync_buried_height { - continue; - } - - let block_hash = self.headers_store.get_block_hash(height)?.unwrap(); - - // TODO: also download random blocks? - if process(&block_hash, &BlockFilter::new(filter))? { - log::debug!("Downloading block {}", block_hash); - - let block = peer - .get_block(block_hash)? - .ok_or(CompactFiltersError::MissingBlock)?; - self.headers_store.save_full_block(&block, height)?; - } - } - - status = BundleStatus::Processed { cf_filters }; - } - if let BundleStatus::Processed { cf_filters } = status { - log::trace!("status: Processed"); - - if current_height - stop_height > 1000 { - status = self.cf_store.prune_filters(index, checkpoint)?; - } else { - status = self.cf_store.mark_as_tip(index, cf_filters, checkpoint)?; - } - - completed_bundle(index)?; - } - if let BundleStatus::Pruned = status { - log::trace!("status: Pruned"); - } - if let BundleStatus::Tip { .. } = status { - log::trace!("status: Tip"); - } - } - - Ok(()) - } -} - -pub fn sync_headers( - peer: Arc, - store: Arc>, - sync_fn: F, -) -> Result>, CompactFiltersError> -where - F: Fn(usize) -> Result<(), Error>, -{ - let locators = store.get_locators()?; - let locators_vec = locators.iter().map(|(hash, _)| hash).cloned().collect(); - let locators_map: HashMap<_, _> = locators.into_iter().collect(); - - peer.send(NetworkMessage::GetHeaders(GetHeadersMessage::new( - locators_vec, - Hash::all_zeros(), - )))?; - let (mut snapshot, mut last_hash) = if let NetworkMessage::Headers(headers) = peer - .recv("headers", Some(Duration::from_secs(TIMEOUT_SECS)))? - .ok_or(CompactFiltersError::Timeout)? - { - if headers.is_empty() { - return Ok(None); - } - - match locators_map.get(&headers[0].prev_blockhash) { - None => return Err(CompactFiltersError::InvalidHeaders), - Some(from) => (store.start_snapshot(*from)?, headers[0].prev_blockhash), - } - } else { - return Err(CompactFiltersError::InvalidResponse); - }; - - let mut sync_height = store.get_height()?; - while sync_height < peer.get_version().start_height as usize { - peer.send(NetworkMessage::GetHeaders(GetHeadersMessage::new( - vec![last_hash], - Hash::all_zeros(), - )))?; - if let NetworkMessage::Headers(headers) = peer - .recv("headers", Some(Duration::from_secs(TIMEOUT_SECS)))? - .ok_or(CompactFiltersError::Timeout)? - { - let batch_len = headers.len(); - last_hash = snapshot.apply(sync_height, headers)?; - - sync_height += batch_len; - sync_fn(sync_height)?; - } else { - return Err(CompactFiltersError::InvalidResponse); - } - } - - Ok(Some(snapshot)) -} diff --git a/src/blockchain/electrum.rs b/src/blockchain/electrum.rs deleted file mode 100644 index 6cbeef5651..0000000000 --- a/src/blockchain/electrum.rs +++ /dev/null @@ -1,430 +0,0 @@ -// Bitcoin Dev Kit -// Written in 2020 by Alekos Filini -// -// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. - -//! Electrum -//! -//! This module defines a [`Blockchain`] struct that wraps an [`electrum_client::Client`] -//! and implements the logic required to populate the wallet's [database](crate::database::Database) by -//! querying the inner client. -//! -//! ## Example -//! -//! ```no_run -//! # use bdk::blockchain::electrum::ElectrumBlockchain; -//! let client = electrum_client::Client::new("ssl://electrum.blockstream.info:50002")?; -//! let blockchain = ElectrumBlockchain::from(client); -//! # Ok::<(), bdk::Error>(()) -//! ``` - -use std::collections::{HashMap, HashSet}; -use std::ops::Deref; - -#[allow(unused_imports)] -use log::{debug, error, info, trace}; - -use bitcoin::{Transaction, Txid}; - -use electrum_client::{Client, ConfigBuilder, ElectrumApi, Socks5Config}; - -use super::script_sync::Request; -use super::*; -use crate::database::{BatchDatabase, Database}; -use crate::error::Error; -use crate::{BlockTime, FeeRate}; - -/// Wrapper over an Electrum Client that implements the required blockchain traits -/// -/// ## Example -/// See the [`blockchain::electrum`](crate::blockchain::electrum) module for a usage example. -pub struct ElectrumBlockchain { - client: Client, - stop_gap: usize, -} - -impl std::convert::From for ElectrumBlockchain { - fn from(client: Client) -> Self { - ElectrumBlockchain { - client, - stop_gap: 20, - } - } -} - -impl Blockchain for ElectrumBlockchain { - fn get_capabilities(&self) -> HashSet { - vec![ - Capability::FullHistory, - Capability::GetAnyTx, - Capability::AccurateFees, - ] - .into_iter() - .collect() - } - - fn broadcast(&self, tx: &Transaction) -> Result<(), Error> { - Ok(self.client.transaction_broadcast(tx).map(|_| ())?) - } - - fn estimate_fee(&self, target: usize) -> Result { - Ok(FeeRate::from_btc_per_kvb( - self.client.estimate_fee(target)? as f32 - )) - } -} - -impl Deref for ElectrumBlockchain { - type Target = Client; - - fn deref(&self) -> &Self::Target { - &self.client - } -} - -impl StatelessBlockchain for ElectrumBlockchain {} - -impl GetHeight for ElectrumBlockchain { - fn get_height(&self) -> Result { - // TODO: unsubscribe when added to the client, or is there a better call to use here? - - Ok(self - .client - .block_headers_subscribe() - .map(|data| data.height as u32)?) - } -} - -impl GetTx for ElectrumBlockchain { - fn get_tx(&self, txid: &Txid) -> Result, Error> { - Ok(self.client.transaction_get(txid).map(Option::Some)?) - } -} - -impl GetBlockHash for ElectrumBlockchain { - fn get_block_hash(&self, height: u64) -> Result { - let block_header = self.client.block_header(height as usize)?; - Ok(block_header.block_hash()) - } -} - -impl WalletSync for ElectrumBlockchain { - fn wallet_setup( - &self, - database: &mut D, - _progress_update: Box, - ) -> Result<(), Error> { - let mut request = script_sync::start(database, self.stop_gap)?; - let mut block_times = HashMap::::new(); - let mut txid_to_height = HashMap::::new(); - let mut tx_cache = TxCache::new(database, &self.client); - - // Set chunk_size to the smallest value capable of finding a gap greater than stop_gap. - let chunk_size = self.stop_gap + 1; - - // The electrum server has been inconsistent somehow in its responses during sync. For - // example, we do a batch request of transactions and the response contains less - // tranascations than in the request. This should never happen but we don't want to panic. - let electrum_goof = || Error::Generic("electrum server misbehaving".to_string()); - - let batch_update = loop { - request = match request { - Request::Script(script_req) => { - let scripts = script_req.request().take(chunk_size); - let txids_per_script: Vec> = self - .client - .batch_script_get_history(scripts) - .map_err(Error::Electrum)? - .into_iter() - .map(|txs| { - txs.into_iter() - .map(|tx| { - let tx_height = match tx.height { - none if none <= 0 => None, - height => { - txid_to_height.insert(tx.tx_hash, height as u32); - Some(height as u32) - } - }; - (tx.tx_hash, tx_height) - }) - .collect() - }) - .collect(); - - script_req.satisfy(txids_per_script)? - } - - Request::Conftime(conftime_req) => { - // collect up to chunk_size heights to fetch from electrum - let needs_block_height = conftime_req - .request() - .filter_map(|txid| txid_to_height.get(txid).cloned()) - .filter(|height| block_times.get(height).is_none()) - .take(chunk_size) - .collect::>(); - - let new_block_headers = self - .client - .batch_block_header(needs_block_height.iter().cloned())?; - - for (height, header) in needs_block_height.into_iter().zip(new_block_headers) { - block_times.insert(height, header.time); - } - - let conftimes = conftime_req - .request() - .take(chunk_size) - .map(|txid| { - let confirmation_time = txid_to_height - .get(txid) - .map(|height| { - let timestamp = - *block_times.get(height).ok_or_else(electrum_goof)?; - Result::<_, Error>::Ok(BlockTime { - height: *height, - timestamp: timestamp.into(), - }) - }) - .transpose()?; - Ok(confirmation_time) - }) - .collect::>()?; - - conftime_req.satisfy(conftimes)? - } - Request::Tx(tx_req) => { - let needs_full = tx_req.request().take(chunk_size); - tx_cache.save_txs(needs_full.clone())?; - let full_transactions = needs_full - .map(|txid| tx_cache.get(*txid).ok_or_else(electrum_goof)) - .collect::, _>>()?; - let input_txs = full_transactions.iter().flat_map(|tx| { - tx.input - .iter() - .filter(|input| !input.previous_output.is_null()) - .map(|input| &input.previous_output.txid) - }); - tx_cache.save_txs(input_txs)?; - - let full_details = full_transactions - .into_iter() - .map(|tx| { - let mut input_index = 0usize; - let prev_outputs = tx - .input - .iter() - .map(|input| { - if input.previous_output.is_null() { - return Ok(None); - } - let prev_tx = tx_cache - .get(input.previous_output.txid) - .ok_or_else(electrum_goof)?; - let txout = prev_tx - .output - .get(input.previous_output.vout as usize) - .ok_or_else(electrum_goof)?; - input_index += 1; - Ok(Some(txout.clone())) - }) - .collect::, Error>>()?; - Ok((prev_outputs, tx)) - }) - .collect::, Error>>()?; - - tx_req.satisfy(full_details)? - } - Request::Finish(batch_update) => break batch_update, - } - }; - - database.commit_batch(batch_update)?; - Ok(()) - } -} - -struct TxCache<'a, 'b, D> { - db: &'a D, - client: &'b Client, - cache: HashMap, -} - -impl<'a, 'b, D: Database> TxCache<'a, 'b, D> { - fn new(db: &'a D, client: &'b Client) -> Self { - TxCache { - db, - client, - cache: HashMap::default(), - } - } - fn save_txs<'c>(&mut self, txids: impl Iterator) -> Result<(), Error> { - let mut need_fetch = vec![]; - for txid in txids { - if self.cache.get(txid).is_some() { - continue; - } else if let Some(transaction) = self.db.get_raw_tx(txid)? { - self.cache.insert(*txid, transaction); - } else { - need_fetch.push(txid); - } - } - - if !need_fetch.is_empty() { - let txs = self - .client - .batch_transaction_get(need_fetch.clone()) - .map_err(Error::Electrum)?; - let mut txs: HashMap<_, _> = txs.into_iter().map(|tx| (tx.txid(), tx)).collect(); - for txid in need_fetch { - if let Some(tx) = txs.remove(txid) { - self.cache.insert(*txid, tx); - } - } - } - - Ok(()) - } - - fn get(&self, txid: Txid) -> Option { - self.cache.get(&txid).map(Clone::clone) - } -} - -/// Configuration for an [`ElectrumBlockchain`] -#[derive(Debug, serde::Deserialize, serde::Serialize, Clone, PartialEq, Eq)] -pub struct ElectrumBlockchainConfig { - /// URL of the Electrum server (such as ElectrumX, Esplora, BWT) may start with `ssl://` or `tcp://` and include a port - /// - /// eg. `ssl://electrum.blockstream.info:60002` - pub url: String, - /// URL of the socks5 proxy server or a Tor service - pub socks5: Option, - /// Request retry count - pub retry: u8, - /// Request timeout (seconds) - pub timeout: Option, - /// Stop searching addresses for transactions after finding an unused gap of this length - pub stop_gap: usize, - /// Validate the domain when using SSL - pub validate_domain: bool, -} - -impl ConfigurableBlockchain for ElectrumBlockchain { - type Config = ElectrumBlockchainConfig; - - fn from_config(config: &Self::Config) -> Result { - let socks5 = config.socks5.as_ref().map(Socks5Config::new); - let electrum_config = ConfigBuilder::new() - .retry(config.retry) - .timeout(config.timeout)? - .socks5(socks5)? - .validate_domain(config.validate_domain) - .build(); - - Ok(ElectrumBlockchain { - client: Client::from_config(config.url.as_str(), electrum_config)?, - stop_gap: config.stop_gap, - }) - } -} - -#[cfg(test)] -#[cfg(feature = "test-electrum")] -mod test { - use std::sync::Arc; - - use super::*; - use crate::database::MemoryDatabase; - use crate::testutils::blockchain_tests::TestClient; - use crate::testutils::configurable_blockchain_tests::ConfigurableBlockchainTester; - use crate::wallet::{AddressIndex, Wallet}; - - crate::bdk_blockchain_tests! { - fn test_instance(test_client: &TestClient) -> ElectrumBlockchain { - ElectrumBlockchain::from(Client::new(&test_client.electrsd.electrum_url).unwrap()) - } - } - - fn get_factory() -> (TestClient, Arc) { - let test_client = TestClient::default(); - - let factory = Arc::new(ElectrumBlockchain::from( - Client::new(&test_client.electrsd.electrum_url).unwrap(), - )); - - (test_client, factory) - } - - #[test] - fn test_electrum_blockchain_factory() { - let (_test_client, factory) = get_factory(); - - let a = factory.build("aaaaaa", None).unwrap(); - let b = factory.build("bbbbbb", None).unwrap(); - - assert_eq!( - a.client.block_headers_subscribe().unwrap().height, - b.client.block_headers_subscribe().unwrap().height - ); - } - - #[test] - fn test_electrum_blockchain_factory_sync_wallet() { - let (mut test_client, factory) = get_factory(); - - let db = MemoryDatabase::new(); - let wallet = Wallet::new( - "wpkh(L5EZftvrYaSudiozVRzTqLcHLNDoVn7H5HSfM9BAN6tMJX8oTWz6)", - None, - bitcoin::Network::Regtest, - db, - ) - .unwrap(); - - let address = wallet.get_address(AddressIndex::New).unwrap(); - - let tx = testutils! { - @tx ( (@addr address.address) => 50_000 ) - }; - test_client.receive(tx); - - factory - .sync_wallet(&wallet, None, Default::default()) - .unwrap(); - - assert_eq!(wallet.get_balance().unwrap().untrusted_pending, 50_000); - } - - #[test] - fn test_electrum_with_variable_configs() { - struct ElectrumTester; - - impl ConfigurableBlockchainTester for ElectrumTester { - const BLOCKCHAIN_NAME: &'static str = "Electrum"; - - fn config_with_stop_gap( - &self, - test_client: &mut TestClient, - stop_gap: usize, - ) -> Option { - Some(ElectrumBlockchainConfig { - url: test_client.electrsd.electrum_url.clone(), - socks5: None, - retry: 0, - timeout: None, - stop_gap: stop_gap, - validate_domain: true, - }) - } - } - - ElectrumTester.run(); - } -} diff --git a/src/blockchain/esplora/async.rs b/src/blockchain/esplora/async.rs deleted file mode 100644 index 900d95376d..0000000000 --- a/src/blockchain/esplora/async.rs +++ /dev/null @@ -1,250 +0,0 @@ -// Bitcoin Dev Kit -// Written in 2020 by Alekos Filini -// -// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. - -//! Esplora by way of `reqwest` HTTP client. - -use std::collections::{HashMap, HashSet}; -use std::ops::Deref; - -use bitcoin::{Transaction, Txid}; - -#[allow(unused_imports)] -use log::{debug, error, info, trace}; - -use esplora_client::{convert_fee_rate, AsyncClient, Builder, Tx}; -use futures::stream::{FuturesOrdered, TryStreamExt}; - -use crate::blockchain::*; -use crate::database::BatchDatabase; -use crate::error::Error; -use crate::FeeRate; - -/// Structure that implements the logic to sync with Esplora -/// -/// ## Example -/// See the [`blockchain::esplora`](crate::blockchain::esplora) module for a usage example. -#[derive(Debug)] -pub struct EsploraBlockchain { - url_client: AsyncClient, - stop_gap: usize, - concurrency: u8, -} - -impl std::convert::From for EsploraBlockchain { - fn from(url_client: AsyncClient) -> Self { - EsploraBlockchain { - url_client, - stop_gap: 20, - concurrency: super::DEFAULT_CONCURRENT_REQUESTS, - } - } -} - -impl EsploraBlockchain { - /// Create a new instance of the client from a base URL and `stop_gap`. - pub fn new(base_url: &str, stop_gap: usize) -> Self { - let url_client = Builder::new(base_url) - .build_async() - .expect("Should never fail with no proxy and timeout"); - - Self::from_client(url_client, stop_gap) - } - - /// Build a new instance given a client - pub fn from_client(url_client: AsyncClient, stop_gap: usize) -> Self { - EsploraBlockchain { - url_client, - stop_gap, - concurrency: super::DEFAULT_CONCURRENT_REQUESTS, - } - } - - /// Set the concurrency to use when doing batch queries against the Esplora instance. - pub fn with_concurrency(mut self, concurrency: u8) -> Self { - self.concurrency = concurrency; - self - } -} - -#[maybe_async] -impl Blockchain for EsploraBlockchain { - fn get_capabilities(&self) -> HashSet { - vec![ - Capability::FullHistory, - Capability::GetAnyTx, - Capability::AccurateFees, - ] - .into_iter() - .collect() - } - - fn broadcast(&self, tx: &Transaction) -> Result<(), Error> { - Ok(await_or_block!(self.url_client.broadcast(tx))?) - } - - fn estimate_fee(&self, target: usize) -> Result { - let estimates = await_or_block!(self.url_client.get_fee_estimates())?; - Ok(FeeRate::from_sat_per_vb(convert_fee_rate( - target, estimates, - )?)) - } -} - -impl Deref for EsploraBlockchain { - type Target = AsyncClient; - - fn deref(&self) -> &Self::Target { - &self.url_client - } -} - -impl StatelessBlockchain for EsploraBlockchain {} - -#[maybe_async] -impl GetHeight for EsploraBlockchain { - fn get_height(&self) -> Result { - Ok(await_or_block!(self.url_client.get_height())?) - } -} - -#[maybe_async] -impl GetTx for EsploraBlockchain { - fn get_tx(&self, txid: &Txid) -> Result, Error> { - Ok(await_or_block!(self.url_client.get_tx(txid))?) - } -} - -#[maybe_async] -impl GetBlockHash for EsploraBlockchain { - fn get_block_hash(&self, height: u64) -> Result { - Ok(await_or_block!(self - .url_client - .get_block_hash(height as u32))?) - } -} - -#[maybe_async] -impl WalletSync for EsploraBlockchain { - fn wallet_setup( - &self, - database: &mut D, - _progress_update: Box, - ) -> Result<(), Error> { - use crate::blockchain::script_sync::Request; - let mut request = script_sync::start(database, self.stop_gap)?; - let mut tx_index: HashMap = HashMap::new(); - - let batch_update = loop { - request = match request { - Request::Script(script_req) => { - let futures: FuturesOrdered<_> = script_req - .request() - .take(self.concurrency as usize) - .map(|script| async move { - let mut related_txs: Vec = - self.url_client.scripthash_txs(script, None).await?; - - let n_confirmed = - related_txs.iter().filter(|tx| tx.status.confirmed).count(); - // esplora pages on 25 confirmed transactions. If there's 25 or more we - // keep requesting to see if there's more. - if n_confirmed >= 25 { - loop { - let new_related_txs: Vec = self - .url_client - .scripthash_txs( - script, - Some(related_txs.last().unwrap().txid), - ) - .await?; - let n = new_related_txs.len(); - related_txs.extend(new_related_txs); - // we've reached the end - if n < 25 { - break; - } - } - } - Result::<_, Error>::Ok(related_txs) - }) - .collect(); - let txs_per_script: Vec> = await_or_block!(futures.try_collect())?; - let mut satisfaction = vec![]; - - for txs in txs_per_script { - satisfaction.push( - txs.iter() - .map(|tx| (tx.txid, tx.status.block_height)) - .collect(), - ); - for tx in txs { - tx_index.insert(tx.txid, tx); - } - } - - script_req.satisfy(satisfaction)? - } - Request::Conftime(conftime_req) => { - let conftimes = conftime_req - .request() - .map(|txid| { - tx_index - .get(txid) - .expect("must be in index") - .confirmation_time() - .map(Into::into) - }) - .collect(); - conftime_req.satisfy(conftimes)? - } - Request::Tx(tx_req) => { - let full_txs = tx_req - .request() - .map(|txid| { - let tx = tx_index.get(txid).expect("must be in index"); - Ok((tx.previous_outputs(), tx.to_tx())) - }) - .collect::>()?; - tx_req.satisfy(full_txs)? - } - Request::Finish(batch_update) => break batch_update, - } - }; - - database.commit_batch(batch_update)?; - Ok(()) - } -} - -impl ConfigurableBlockchain for EsploraBlockchain { - type Config = super::EsploraBlockchainConfig; - - fn from_config(config: &Self::Config) -> Result { - let mut builder = Builder::new(config.base_url.as_str()); - - if let Some(timeout) = config.timeout { - builder = builder.timeout(timeout); - } - - if let Some(proxy) = &config.proxy { - builder = builder.proxy(proxy); - } - - let mut blockchain = - EsploraBlockchain::from_client(builder.build_async()?, config.stop_gap); - - if let Some(concurrency) = config.concurrency { - blockchain = blockchain.with_concurrency(concurrency); - } - - Ok(blockchain) - } -} diff --git a/src/blockchain/esplora/blocking.rs b/src/blockchain/esplora/blocking.rs deleted file mode 100644 index 768573c3f5..0000000000 --- a/src/blockchain/esplora/blocking.rs +++ /dev/null @@ -1,238 +0,0 @@ -// Bitcoin Dev Kit -// Written in 2020 by Alekos Filini -// -// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. - -//! Esplora by way of `ureq` HTTP client. - -use std::collections::{HashMap, HashSet}; - -#[allow(unused_imports)] -use log::{debug, error, info, trace}; - -use bitcoin::{Transaction, Txid}; - -use esplora_client::{convert_fee_rate, BlockingClient, Builder, Tx}; - -use crate::blockchain::*; -use crate::database::BatchDatabase; -use crate::error::Error; -use crate::FeeRate; - -/// Structure that implements the logic to sync with Esplora -/// -/// ## Example -/// See the [`blockchain::esplora`](crate::blockchain::esplora) module for a usage example. -#[derive(Debug)] -pub struct EsploraBlockchain { - url_client: BlockingClient, - stop_gap: usize, - concurrency: u8, -} - -impl EsploraBlockchain { - /// Create a new instance of the client from a base URL and the `stop_gap`. - pub fn new(base_url: &str, stop_gap: usize) -> Self { - let url_client = Builder::new(base_url) - .build_blocking() - .expect("Should never fail with no proxy and timeout"); - - Self::from_client(url_client, stop_gap) - } - - /// Build a new instance given a client - pub fn from_client(url_client: BlockingClient, stop_gap: usize) -> Self { - EsploraBlockchain { - url_client, - concurrency: super::DEFAULT_CONCURRENT_REQUESTS, - stop_gap, - } - } - - /// Set the number of parallel requests the client can make. - pub fn with_concurrency(mut self, concurrency: u8) -> Self { - self.concurrency = concurrency; - self - } -} - -impl Blockchain for EsploraBlockchain { - fn get_capabilities(&self) -> HashSet { - vec![ - Capability::FullHistory, - Capability::GetAnyTx, - Capability::AccurateFees, - ] - .into_iter() - .collect() - } - - fn broadcast(&self, tx: &Transaction) -> Result<(), Error> { - self.url_client.broadcast(tx)?; - Ok(()) - } - - fn estimate_fee(&self, target: usize) -> Result { - let estimates = self.url_client.get_fee_estimates()?; - Ok(FeeRate::from_sat_per_vb(convert_fee_rate( - target, estimates, - )?)) - } -} - -impl Deref for EsploraBlockchain { - type Target = BlockingClient; - - fn deref(&self) -> &Self::Target { - &self.url_client - } -} - -impl StatelessBlockchain for EsploraBlockchain {} - -impl GetHeight for EsploraBlockchain { - fn get_height(&self) -> Result { - Ok(self.url_client.get_height()?) - } -} - -impl GetTx for EsploraBlockchain { - fn get_tx(&self, txid: &Txid) -> Result, Error> { - Ok(self.url_client.get_tx(txid)?) - } -} - -impl GetBlockHash for EsploraBlockchain { - fn get_block_hash(&self, height: u64) -> Result { - Ok(self.url_client.get_block_hash(height as u32)?) - } -} - -impl WalletSync for EsploraBlockchain { - fn wallet_setup( - &self, - database: &mut D, - _progress_update: Box, - ) -> Result<(), Error> { - use crate::blockchain::script_sync::Request; - let mut request = script_sync::start(database, self.stop_gap)?; - let mut tx_index: HashMap = HashMap::new(); - let batch_update = loop { - request = match request { - Request::Script(script_req) => { - let scripts = script_req - .request() - .take(self.concurrency as usize) - .cloned(); - - let mut handles = vec![]; - for script in scripts { - let client = self.url_client.clone(); - // make each request in its own thread. - handles.push(std::thread::spawn(move || { - let mut related_txs: Vec = client.scripthash_txs(&script, None)?; - - let n_confirmed = - related_txs.iter().filter(|tx| tx.status.confirmed).count(); - // esplora pages on 25 confirmed transactions. If there's 25 or more we - // keep requesting to see if there's more. - if n_confirmed >= 25 { - loop { - let new_related_txs: Vec = client.scripthash_txs( - &script, - Some(related_txs.last().unwrap().txid), - )?; - let n = new_related_txs.len(); - related_txs.extend(new_related_txs); - // we've reached the end - if n < 25 { - break; - } - } - } - Result::<_, Error>::Ok(related_txs) - })); - } - - let txs_per_script: Vec> = handles - .into_iter() - .map(|handle| handle.join().unwrap()) - .collect::>()?; - let mut satisfaction = vec![]; - - for txs in txs_per_script { - satisfaction.push( - txs.iter() - .map(|tx| (tx.txid, tx.status.block_height)) - .collect(), - ); - for tx in txs { - tx_index.insert(tx.txid, tx); - } - } - - script_req.satisfy(satisfaction)? - } - Request::Conftime(conftime_req) => { - let conftimes = conftime_req - .request() - .map(|txid| { - tx_index - .get(txid) - .expect("must be in index") - .confirmation_time() - .map(Into::into) - }) - .collect(); - conftime_req.satisfy(conftimes)? - } - Request::Tx(tx_req) => { - let full_txs = tx_req - .request() - .map(|txid| { - let tx = tx_index.get(txid).expect("must be in index"); - Ok((tx.previous_outputs(), tx.to_tx())) - }) - .collect::>()?; - tx_req.satisfy(full_txs)? - } - Request::Finish(batch_update) => break batch_update, - } - }; - - database.commit_batch(batch_update)?; - - Ok(()) - } -} - -impl ConfigurableBlockchain for EsploraBlockchain { - type Config = super::EsploraBlockchainConfig; - - fn from_config(config: &Self::Config) -> Result { - let mut builder = Builder::new(config.base_url.as_str()); - - if let Some(timeout) = config.timeout { - builder = builder.timeout(timeout); - } - - if let Some(proxy) = &config.proxy { - builder = builder.proxy(proxy); - } - - let mut blockchain = - EsploraBlockchain::from_client(builder.build_blocking()?, config.stop_gap); - - if let Some(concurrency) = config.concurrency { - blockchain = blockchain.with_concurrency(concurrency); - } - - Ok(blockchain) - } -} diff --git a/src/blockchain/esplora/mod.rs b/src/blockchain/esplora/mod.rs deleted file mode 100644 index c4308406b0..0000000000 --- a/src/blockchain/esplora/mod.rs +++ /dev/null @@ -1,130 +0,0 @@ -//! Esplora -//! -//! This module defines a [`EsploraBlockchain`] struct that can query an Esplora -//! backend populate the wallet's [database](crate::database::Database) by: -//! -//! ## Example -//! -//! ```no_run -//! # use bdk::blockchain::esplora::EsploraBlockchain; -//! let blockchain = EsploraBlockchain::new("https://blockstream.info/testnet/api", 20); -//! # Ok::<(), bdk::Error>(()) -//! ``` -//! -//! Esplora blockchain can use either `ureq` or `reqwest` for the HTTP client -//! depending on your needs (blocking or async respectively). -//! -//! Please note, to configure the Esplora HTTP client correctly use one of: -//! Blocking: --features='use-esplora-blocking' -//! Async: --features='async-interface,use-esplora-async' --no-default-features - -pub use esplora_client::Error as EsploraError; - -#[cfg(feature = "use-esplora-async")] -mod r#async; - -#[cfg(feature = "use-esplora-async")] -pub use self::r#async::*; - -#[cfg(feature = "use-esplora-blocking")] -mod blocking; - -#[cfg(feature = "use-esplora-blocking")] -pub use self::blocking::*; - -/// Configuration for an [`EsploraBlockchain`] -#[derive(Debug, serde::Deserialize, serde::Serialize, Clone, PartialEq, Eq)] -pub struct EsploraBlockchainConfig { - /// Base URL of the esplora service - /// - /// eg. `https://blockstream.info/api/` - pub base_url: String, - /// Optional URL of the proxy to use to make requests to the Esplora server - /// - /// The string should be formatted as: `://:@host:`. - /// - /// Note that the format of this value and the supported protocols change slightly between the - /// sync version of esplora (using `ureq`) and the async version (using `reqwest`). For more - /// details check with the documentation of the two crates. Both of them are compiled with - /// the `socks` feature enabled. - /// - /// The proxy is ignored when targeting `wasm32`. - #[serde(skip_serializing_if = "Option::is_none")] - pub proxy: Option, - /// Number of parallel requests sent to the esplora service (default: 4) - #[serde(skip_serializing_if = "Option::is_none")] - pub concurrency: Option, - /// Stop searching addresses for transactions after finding an unused gap of this length. - pub stop_gap: usize, - /// Socket timeout. - #[serde(skip_serializing_if = "Option::is_none")] - pub timeout: Option, -} - -impl EsploraBlockchainConfig { - /// create a config with default values given the base url and stop gap - pub fn new(base_url: String, stop_gap: usize) -> Self { - Self { - base_url, - proxy: None, - timeout: None, - stop_gap, - concurrency: None, - } - } -} - -impl From for crate::BlockTime { - fn from(esplora_client::BlockTime { timestamp, height }: esplora_client::BlockTime) -> Self { - Self { timestamp, height } - } -} - -#[cfg(test)] -#[cfg(feature = "test-esplora")] -crate::bdk_blockchain_tests! { - fn test_instance(test_client: &TestClient) -> EsploraBlockchain { - EsploraBlockchain::new(&format!("http://{}",test_client.electrsd.esplora_url.as_ref().unwrap()), 20) - } -} - -const DEFAULT_CONCURRENT_REQUESTS: u8 = 4; - -#[cfg(test)] -mod test { - #[test] - #[cfg(feature = "test-esplora")] - fn test_esplora_with_variable_configs() { - use super::*; - - use crate::testutils::{ - blockchain_tests::TestClient, - configurable_blockchain_tests::ConfigurableBlockchainTester, - }; - - struct EsploraTester; - - impl ConfigurableBlockchainTester for EsploraTester { - const BLOCKCHAIN_NAME: &'static str = "Esplora"; - - fn config_with_stop_gap( - &self, - test_client: &mut TestClient, - stop_gap: usize, - ) -> Option { - Some(EsploraBlockchainConfig { - base_url: format!( - "http://{}", - test_client.electrsd.esplora_url.as_ref().unwrap() - ), - proxy: None, - concurrency: None, - stop_gap: stop_gap, - timeout: None, - }) - } - } - - EsploraTester.run(); - } -} diff --git a/src/blockchain/mod.rs b/src/blockchain/mod.rs deleted file mode 100644 index 2502f61b0c..0000000000 --- a/src/blockchain/mod.rs +++ /dev/null @@ -1,393 +0,0 @@ -// Bitcoin Dev Kit -// Written in 2020 by Alekos Filini -// -// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. - -//! Blockchain backends -//! -//! This module provides the implementation of a few commonly-used backends like -//! [Electrum](crate::blockchain::electrum), [Esplora](crate::blockchain::esplora) and -//! [Compact Filters/Neutrino](crate::blockchain::compact_filters), along with a generalized trait -//! [`Blockchain`] that can be implemented to build customized backends. - -use std::collections::HashSet; -use std::ops::Deref; -use std::sync::mpsc::{channel, Receiver, Sender}; -use std::sync::Arc; - -use bitcoin::{BlockHash, Transaction, Txid}; - -use crate::database::BatchDatabase; -use crate::error::Error; -use crate::wallet::{wallet_name_from_descriptor, Wallet}; -use crate::{FeeRate, KeychainKind}; - -#[cfg(any( - feature = "electrum", - feature = "esplora", - feature = "compact_filters", - feature = "rpc" -))] -pub mod any; -mod script_sync; - -#[cfg(any( - feature = "electrum", - feature = "esplora", - feature = "compact_filters", - feature = "rpc" -))] -pub use any::{AnyBlockchain, AnyBlockchainConfig}; - -#[cfg(feature = "electrum")] -#[cfg_attr(docsrs, doc(cfg(feature = "electrum")))] -pub mod electrum; -#[cfg(feature = "electrum")] -pub use self::electrum::ElectrumBlockchain; -#[cfg(feature = "electrum")] -pub use self::electrum::ElectrumBlockchainConfig; - -#[cfg(feature = "rpc")] -#[cfg_attr(docsrs, doc(cfg(feature = "rpc")))] -pub mod rpc; -#[cfg(feature = "rpc")] -pub use self::rpc::RpcBlockchain; -#[cfg(feature = "rpc")] -pub use self::rpc::RpcConfig; - -#[cfg(feature = "esplora")] -#[cfg_attr(docsrs, doc(cfg(feature = "esplora")))] -pub mod esplora; -#[cfg(feature = "esplora")] -pub use self::esplora::EsploraBlockchain; - -#[cfg(feature = "compact_filters")] -#[cfg_attr(docsrs, doc(cfg(feature = "compact_filters")))] -pub mod compact_filters; - -#[cfg(feature = "compact_filters")] -pub use self::compact_filters::CompactFiltersBlockchain; - -/// Capabilities that can be supported by a [`Blockchain`] backend -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub enum Capability { - /// Can recover the full history of a wallet and not only the set of currently spendable UTXOs - FullHistory, - /// Can fetch any historical transaction given its txid - GetAnyTx, - /// Can compute accurate fees for the transactions found during sync - AccurateFees, -} - -/// Trait that defines the actions that must be supported by a blockchain backend -#[maybe_async] -pub trait Blockchain: WalletSync + GetHeight + GetTx + GetBlockHash { - /// Return the set of [`Capability`] supported by this backend - fn get_capabilities(&self) -> HashSet; - /// Broadcast a transaction - fn broadcast(&self, tx: &Transaction) -> Result<(), Error>; - /// Estimate the fee rate required to confirm a transaction in a given `target` of blocks - fn estimate_fee(&self, target: usize) -> Result; -} - -/// Trait for getting the current height of the blockchain. -#[maybe_async] -pub trait GetHeight { - /// Return the current height - fn get_height(&self) -> Result; -} - -#[maybe_async] -/// Trait for getting a transaction by txid -pub trait GetTx { - /// Fetch a transaction given its txid - fn get_tx(&self, txid: &Txid) -> Result, Error>; -} - -#[maybe_async] -/// Trait for getting block hash by block height -pub trait GetBlockHash { - /// fetch block hash given its height - fn get_block_hash(&self, height: u64) -> Result; -} - -/// Trait for blockchains that can sync by updating the database directly. -#[maybe_async] -pub trait WalletSync { - /// Setup the backend and populate the internal database for the first time - /// - /// This method is the equivalent of [`Self::wallet_sync`], but it's guaranteed to only be - /// called once, at the first [`Wallet::sync`](crate::wallet::Wallet::sync). - /// - /// The rationale behind the distinction between `sync` and `setup` is that some custom backends - /// might need to perform specific actions only the first time they are synced. - /// - /// For types that do not have that distinction, only this method can be implemented, since - /// [`WalletSync::wallet_sync`] defaults to calling this internally if not overridden. - /// Populate the internal database with transactions and UTXOs - fn wallet_setup( - &self, - database: &mut D, - progress_update: Box, - ) -> Result<(), Error>; - - /// If not overridden, it defaults to calling [`Self::wallet_setup`] internally. - /// - /// This method should implement the logic required to iterate over the list of the wallet's - /// script_pubkeys using [`Database::iter_script_pubkeys`] and look for relevant transactions - /// in the blockchain to populate the database with [`BatchOperations::set_tx`] and - /// [`BatchOperations::set_utxo`]. - /// - /// This method should also take care of removing UTXOs that are seen as spent in the - /// blockchain, using [`BatchOperations::del_utxo`]. - /// - /// The `progress_update` object can be used to give the caller updates about the progress by using - /// [`Progress::update`]. - /// - /// [`Database::iter_script_pubkeys`]: crate::database::Database::iter_script_pubkeys - /// [`BatchOperations::set_tx`]: crate::database::BatchOperations::set_tx - /// [`BatchOperations::set_utxo`]: crate::database::BatchOperations::set_utxo - /// [`BatchOperations::del_utxo`]: crate::database::BatchOperations::del_utxo - fn wallet_sync( - &self, - database: &mut D, - progress_update: Box, - ) -> Result<(), Error> { - maybe_await!(self.wallet_setup(database, progress_update)) - } -} - -/// Trait for [`Blockchain`] types that can be created given a configuration -pub trait ConfigurableBlockchain: Blockchain + Sized { - /// Type that contains the configuration - type Config: std::fmt::Debug; - - /// Create a new instance given a configuration - fn from_config(config: &Self::Config) -> Result; -} - -/// Trait for blockchains that don't contain any state -/// -/// Statless blockchains can be used to sync multiple wallets with different descriptors. -/// -/// [`BlockchainFactory`] is automatically implemented for `Arc` where `T` is a stateless -/// blockchain. -pub trait StatelessBlockchain: Blockchain {} - -/// Trait for a factory of blockchains that share the underlying connection or configuration -#[cfg_attr( - not(feature = "async-interface"), - doc = r##" -## Example - -This example shows how to sync multiple walles and return the sum of their balances - -```no_run -# use bdk::Error; -# use bdk::blockchain::*; -# use bdk::database::*; -# use bdk::wallet::*; -# use bdk::*; -fn sum_of_balances(blockchain_factory: B, wallets: &[Wallet]) -> Result { - Ok(wallets - .iter() - .map(|w| -> Result<_, Error> { - blockchain_factory.sync_wallet(&w, None, SyncOptions::default())?; - w.get_balance() - }) - .collect::, _>>()? - .into_iter() - .sum()) -} -``` -"## -)] -pub trait BlockchainFactory { - /// The type returned when building a blockchain from this factory - type Inner: Blockchain; - - /// Build a new blockchain for the given descriptor wallet_name - /// - /// If `override_skip_blocks` is `None`, the returned blockchain will inherit the number of blocks - /// from the factory. Since it's not possible to override the value to `None`, set it to - /// `Some(0)` to rescan from the genesis. - fn build( - &self, - wallet_name: &str, - override_skip_blocks: Option, - ) -> Result; - - /// Build a new blockchain for a given wallet - /// - /// Internally uses [`wallet_name_from_descriptor`] to derive the name, and then calls - /// [`BlockchainFactory::build`] to create the blockchain instance. - fn build_for_wallet( - &self, - wallet: &Wallet, - override_skip_blocks: Option, - ) -> Result { - let wallet_name = wallet_name_from_descriptor( - wallet.public_descriptor(KeychainKind::External)?.unwrap(), - wallet.public_descriptor(KeychainKind::Internal)?, - wallet.network(), - wallet.secp_ctx(), - )?; - self.build(&wallet_name, override_skip_blocks) - } - - /// Use [`BlockchainFactory::build_for_wallet`] to get a blockchain, then sync the wallet - /// - /// This can be used when a new blockchain would only be used to sync a wallet and then - /// immediately dropped. Keep in mind that specific blockchain factories may perform slow - /// operations to build a blockchain for a given wallet, so if a wallet needs to be synced - /// often it's recommended to use [`BlockchainFactory::build_for_wallet`] to reuse the same - /// blockchain multiple times. - #[cfg(not(any(target_arch = "wasm32", feature = "async-interface")))] - #[cfg_attr( - docsrs, - doc(cfg(not(any(target_arch = "wasm32", feature = "async-interface")))) - )] - fn sync_wallet( - &self, - wallet: &Wallet, - override_skip_blocks: Option, - sync_options: crate::wallet::SyncOptions, - ) -> Result<(), Error> { - let blockchain = self.build_for_wallet(wallet, override_skip_blocks)?; - wallet.sync(&blockchain, sync_options) - } -} - -impl BlockchainFactory for Arc { - type Inner = Self; - - fn build(&self, _wallet_name: &str, _override_skip_blocks: Option) -> Result { - Ok(Arc::clone(self)) - } -} - -/// Data sent with a progress update over a [`channel`] -pub type ProgressData = (f32, Option); - -/// Trait for types that can receive and process progress updates during [`WalletSync::wallet_sync`] and -/// [`WalletSync::wallet_setup`] -pub trait Progress: Send + 'static + core::fmt::Debug { - /// Send a new progress update - /// - /// The `progress` value should be in the range 0.0 - 100.0, and the `message` value is an - /// optional text message that can be displayed to the user. - fn update(&self, progress: f32, message: Option) -> Result<(), Error>; -} - -/// Shortcut to create a [`channel`] (pair of [`Sender`] and [`Receiver`]) that can transport [`ProgressData`] -pub fn progress() -> (Sender, Receiver) { - channel() -} - -impl Progress for Sender { - fn update(&self, progress: f32, message: Option) -> Result<(), Error> { - if !(0.0..=100.0).contains(&progress) { - return Err(Error::InvalidProgressValue(progress)); - } - - self.send((progress, message)) - .map_err(|_| Error::ProgressUpdateError) - } -} - -/// Type that implements [`Progress`] and drops every update received -#[derive(Clone, Copy, Default, Debug)] -pub struct NoopProgress; - -/// Create a new instance of [`NoopProgress`] -pub fn noop_progress() -> NoopProgress { - NoopProgress -} - -impl Progress for NoopProgress { - fn update(&self, _progress: f32, _message: Option) -> Result<(), Error> { - Ok(()) - } -} - -/// Type that implements [`Progress`] and logs at level `INFO` every update received -#[derive(Clone, Copy, Default, Debug)] -pub struct LogProgress; - -/// Create a new instance of [`LogProgress`] -pub fn log_progress() -> LogProgress { - LogProgress -} - -impl Progress for LogProgress { - fn update(&self, progress: f32, message: Option) -> Result<(), Error> { - log::info!( - "Sync {:.3}%: `{}`", - progress, - message.unwrap_or_else(|| "".into()) - ); - - Ok(()) - } -} - -#[maybe_async] -impl Blockchain for Arc { - fn get_capabilities(&self) -> HashSet { - maybe_await!(self.deref().get_capabilities()) - } - - fn broadcast(&self, tx: &Transaction) -> Result<(), Error> { - maybe_await!(self.deref().broadcast(tx)) - } - - fn estimate_fee(&self, target: usize) -> Result { - maybe_await!(self.deref().estimate_fee(target)) - } -} - -#[maybe_async] -impl GetTx for Arc { - fn get_tx(&self, txid: &Txid) -> Result, Error> { - maybe_await!(self.deref().get_tx(txid)) - } -} - -#[maybe_async] -impl GetHeight for Arc { - fn get_height(&self) -> Result { - maybe_await!(self.deref().get_height()) - } -} - -#[maybe_async] -impl GetBlockHash for Arc { - fn get_block_hash(&self, height: u64) -> Result { - maybe_await!(self.deref().get_block_hash(height)) - } -} - -#[maybe_async] -impl WalletSync for Arc { - fn wallet_setup( - &self, - database: &mut D, - progress_update: Box, - ) -> Result<(), Error> { - maybe_await!(self.deref().wallet_setup(database, progress_update)) - } - - fn wallet_sync( - &self, - database: &mut D, - progress_update: Box, - ) -> Result<(), Error> { - maybe_await!(self.deref().wallet_sync(database, progress_update)) - } -} diff --git a/src/blockchain/rpc.rs b/src/blockchain/rpc.rs deleted file mode 100644 index e5940dec53..0000000000 --- a/src/blockchain/rpc.rs +++ /dev/null @@ -1,1000 +0,0 @@ -// Bitcoin Dev Kit -// Written in 2021 by Riccardo Casatta -// -// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. - -//! Rpc Blockchain -//! -//! Backend that gets blockchain data from Bitcoin Core RPC -//! -//! This is an **EXPERIMENTAL** feature, API and other major changes are expected. -//! -//! ## Example -//! -//! ```no_run -//! # use bdk::blockchain::{RpcConfig, RpcBlockchain, ConfigurableBlockchain, rpc::Auth}; -//! let config = RpcConfig { -//! url: "127.0.0.1:18332".to_string(), -//! auth: Auth::Cookie { -//! file: "/home/user/.bitcoin/.cookie".into(), -//! }, -//! network: bdk::bitcoin::Network::Testnet, -//! wallet_name: "wallet_name".to_string(), -//! sync_params: None, -//! }; -//! let blockchain = RpcBlockchain::from_config(&config); -//! ``` - -use crate::bitcoin::hashes::hex::ToHex; -use crate::bitcoin::{Network, OutPoint, Transaction, TxOut, Txid}; -use crate::blockchain::*; -use crate::database::{BatchDatabase, BatchOperations, DatabaseUtils}; -use crate::descriptor::calc_checksum; -use crate::error::MissingCachedScripts; -use crate::{BlockTime, Error, FeeRate, KeychainKind, LocalUtxo, TransactionDetails}; -use bitcoin::Script; -use bitcoincore_rpc::json::{ - GetTransactionResultDetailCategory, ImportMultiOptions, ImportMultiRequest, - ImportMultiRequestScriptPubkey, ImportMultiRescanSince, ListTransactionResult, - ListUnspentResultEntry, ScanningDetails, -}; -use bitcoincore_rpc::jsonrpc::serde_json::{json, Value}; -use bitcoincore_rpc::Auth as RpcAuth; -use bitcoincore_rpc::{Client, RpcApi}; -use log::{debug, info}; -use serde::{Deserialize, Serialize}; -use std::collections::{HashMap, HashSet}; -use std::ops::Deref; -use std::path::PathBuf; -use std::thread; -use std::time::Duration; - -/// The main struct for RPC backend implementing the [crate::blockchain::Blockchain] trait -#[derive(Debug)] -pub struct RpcBlockchain { - /// Rpc client to the node, includes the wallet name - client: Client, - /// Whether the wallet is a "descriptor" or "legacy" wallet in Core - is_descriptors: bool, - /// Blockchain capabilities, cached here at startup - capabilities: HashSet, - /// Sync parameters. - sync_params: RpcSyncParams, -} - -impl Deref for RpcBlockchain { - type Target = Client; - - fn deref(&self) -> &Self::Target { - &self.client - } -} - -/// RpcBlockchain configuration options -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] -pub struct RpcConfig { - /// The bitcoin node url - pub url: String, - /// The bitcoin node authentication mechanism - pub auth: Auth, - /// The network we are using (it will be checked the bitcoin node network matches this) - pub network: Network, - /// The wallet name in the bitcoin node, consider using [crate::wallet::wallet_name_from_descriptor] for this - pub wallet_name: String, - /// Sync parameters - pub sync_params: Option, -} - -/// Sync parameters for Bitcoin Core RPC. -/// -/// In general, BDK tries to sync `scriptPubKey`s cached in [`crate::database::Database`] with -/// `scriptPubKey`s imported in the Bitcoin Core Wallet. These parameters are used for determining -/// how the `importdescriptors` RPC calls are to be made. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] -pub struct RpcSyncParams { - /// The minimum number of scripts to scan for on initial sync. - pub start_script_count: usize, - /// Time in unix seconds in which initial sync will start scanning from (0 to start from genesis). - pub start_time: u64, - /// Forces every sync to use `start_time` as import timestamp. - pub force_start_time: bool, - /// RPC poll rate (in seconds) to get state updates. - pub poll_rate_sec: u64, -} - -impl Default for RpcSyncParams { - fn default() -> Self { - Self { - start_script_count: 100, - start_time: 0, - force_start_time: false, - poll_rate_sec: 3, - } - } -} - -/// This struct is equivalent to [bitcoincore_rpc::Auth] but it implements [serde::Serialize] -/// To be removed once upstream equivalent is implementing Serialize (json serialization format -/// should be the same), see [rust-bitcoincore-rpc/pull/181](https://github.com/rust-bitcoin/rust-bitcoincore-rpc/pull/181) -#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)] -#[serde(rename_all = "snake_case")] -#[serde(untagged)] -pub enum Auth { - /// None authentication - None, - /// Authentication with username and password, usually [Auth::Cookie] should be preferred - UserPass { - /// Username - username: String, - /// Password - password: String, - }, - /// Authentication with a cookie file - Cookie { - /// Cookie file - file: PathBuf, - }, -} - -impl From for RpcAuth { - fn from(auth: Auth) -> Self { - match auth { - Auth::None => RpcAuth::None, - Auth::UserPass { username, password } => RpcAuth::UserPass(username, password), - Auth::Cookie { file } => RpcAuth::CookieFile(file), - } - } -} - -impl Blockchain for RpcBlockchain { - fn get_capabilities(&self) -> HashSet { - self.capabilities.clone() - } - - fn broadcast(&self, tx: &Transaction) -> Result<(), Error> { - Ok(self.client.send_raw_transaction(tx).map(|_| ())?) - } - - fn estimate_fee(&self, target: usize) -> Result { - let sat_per_kb = self - .client - .estimate_smart_fee(target as u16, None)? - .fee_rate - .ok_or(Error::FeeRateUnavailable)? - .to_sat() as f64; - - Ok(FeeRate::from_sat_per_vb((sat_per_kb / 1000f64) as f32)) - } -} - -impl GetTx for RpcBlockchain { - fn get_tx(&self, txid: &Txid) -> Result, Error> { - Ok(Some(self.client.get_raw_transaction(txid, None)?)) - } -} - -impl GetHeight for RpcBlockchain { - fn get_height(&self) -> Result { - Ok(self.client.get_blockchain_info().map(|i| i.blocks as u32)?) - } -} - -impl GetBlockHash for RpcBlockchain { - fn get_block_hash(&self, height: u64) -> Result { - Ok(self.client.get_block_hash(height)?) - } -} - -impl WalletSync for RpcBlockchain { - fn wallet_setup(&self, db: &mut D, prog: Box) -> Result<(), Error> - where - D: BatchDatabase, - { - let batch = DbState::new(db, &self.sync_params, &*prog)? - .sync_with_core(&self.client, self.is_descriptors)? - .as_db_batch()?; - - db.commit_batch(batch) - } -} - -impl ConfigurableBlockchain for RpcBlockchain { - type Config = RpcConfig; - - /// Returns RpcBlockchain backend creating an RPC client to a specific wallet named as the descriptor's checksum - /// if it's the first time it creates the wallet in the node and upon return is granted the wallet is loaded - fn from_config(config: &Self::Config) -> Result { - let wallet_url = format!("{}/wallet/{}", config.url, &config.wallet_name); - - let client = Client::new(wallet_url.as_str(), config.auth.clone().into())?; - let rpc_version = client.version()?; - - info!("connected to '{}' with auth: {:?}", wallet_url, config.auth); - - if client.list_wallets()?.contains(&config.wallet_name) { - info!("wallet already loaded: {}", config.wallet_name); - } else if list_wallet_dir(&client)?.contains(&config.wallet_name) { - client.load_wallet(&config.wallet_name)?; - info!("wallet loaded: {}", config.wallet_name); - } else { - // pre-0.21 use legacy wallets - if rpc_version < 210_000 { - client.create_wallet(&config.wallet_name, Some(true), None, None, None)?; - } else { - // TODO: move back to api call when https://github.com/rust-bitcoin/rust-bitcoincore-rpc/issues/225 is closed - let args = [ - Value::String(config.wallet_name.clone()), - Value::Bool(true), - Value::Bool(false), - Value::Null, - Value::Bool(false), - Value::Bool(true), - ]; - let _: Value = client.call("createwallet", &args)?; - } - - info!("wallet created: {}", config.wallet_name); - } - - let is_descriptors = is_wallet_descriptor(&client)?; - - let blockchain_info = client.get_blockchain_info()?; - let network = match blockchain_info.chain.as_str() { - "main" => Network::Bitcoin, - "test" => Network::Testnet, - "regtest" => Network::Regtest, - "signet" => Network::Signet, - _ => return Err(Error::Generic("Invalid network".to_string())), - }; - if network != config.network { - return Err(Error::InvalidNetwork { - requested: config.network, - found: network, - }); - } - - let mut capabilities: HashSet<_> = vec![Capability::FullHistory].into_iter().collect(); - if rpc_version >= 210_000 { - let info: HashMap = client.call("getindexinfo", &[]).unwrap(); - if info.contains_key("txindex") { - capabilities.insert(Capability::GetAnyTx); - capabilities.insert(Capability::AccurateFees); - } - } - - Ok(RpcBlockchain { - client, - capabilities, - is_descriptors, - sync_params: config.sync_params.clone().unwrap_or_default(), - }) - } -} - -/// return the wallets available in default wallet directory -//TODO use bitcoincore_rpc method when PR #179 lands -fn list_wallet_dir(client: &Client) -> Result, Error> { - #[derive(Deserialize)] - struct Name { - name: String, - } - #[derive(Deserialize)] - struct CallResult { - wallets: Vec, - } - - let result: CallResult = client.call("listwalletdir", &[])?; - Ok(result.wallets.into_iter().map(|n| n.name).collect()) -} - -/// Represents the state of the [`crate::database::Database`]. -struct DbState<'a, D> { - db: &'a D, - params: &'a RpcSyncParams, - prog: &'a dyn Progress, - - ext_spks: Vec