diff --git a/crates/bdk/src/wallet/mod.rs b/crates/bdk/src/wallet/mod.rs index cbe1a8241..24e37884e 100644 --- a/crates/bdk/src/wallet/mod.rs +++ b/crates/bdk/src/wallet/mod.rs @@ -2419,6 +2419,7 @@ impl Wallet { /// start a blockchain sync with a spk based blockchain client. pub fn start_sync_with_revealed_spks(&self) -> SyncRequest { SyncRequest::from_chain_tip(self.chain.tip()) + .cache_graph_txs(self.tx_graph()) .populate_with_revealed_spks(&self.indexed_graph.index, ..) } @@ -2432,6 +2433,7 @@ impl Wallet { /// in which the list of used scripts is not known. pub fn start_full_scan(&self) -> FullScanRequest { FullScanRequest::from_keychain_txout_index(self.chain.tip(), &self.indexed_graph.index) + .cache_graph_txs(self.tx_graph()) } } diff --git a/crates/chain/src/spk_client.rs b/crates/chain/src/spk_client.rs index 6eb32da71..a44a4c2fb 100644 --- a/crates/chain/src/spk_client.rs +++ b/crates/chain/src/spk_client.rs @@ -1,11 +1,18 @@ //! Helper types for spk-based blockchain clients. +use crate::{ + collections::{BTreeMap, HashMap}, + local_chain::CheckPoint, + ConfirmationTimeHeightAnchor, TxGraph, +}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use bitcoin::{OutPoint, Script, ScriptBuf, Transaction, Txid}; use core::{fmt::Debug, marker::PhantomData, ops::RangeBounds}; -use alloc::{boxed::Box, collections::BTreeMap, vec::Vec}; -use bitcoin::{OutPoint, Script, ScriptBuf, Txid}; - -use crate::{local_chain::CheckPoint, ConfirmationTimeHeightAnchor, TxGraph}; +/// A cache of [`Arc`]-wrapped full transactions, identified by their [`Txid`]s. +/// +/// This is used by the chain-source to avoid re-fetching full transactions. +pub type TxCache = HashMap>; /// Data required to perform a spk-based blockchain client sync. /// @@ -17,6 +24,8 @@ pub struct SyncRequest { /// /// [`LocalChain::tip`]: crate::local_chain::LocalChain::tip pub chain_tip: CheckPoint, + /// Cache of full transactions, so the chain-source can avoid re-fetching. + pub tx_cache: TxCache, /// Transactions that spend from or to these indexed script pubkeys. pub spks: Box + Send>, /// Transactions with these txids. @@ -30,12 +39,37 @@ impl SyncRequest { pub fn from_chain_tip(cp: CheckPoint) -> Self { Self { chain_tip: cp, + tx_cache: TxCache::new(), spks: Box::new(core::iter::empty()), txids: Box::new(core::iter::empty()), outpoints: Box::new(core::iter::empty()), } } + /// Add to the [`Transaction`] cache that allows the chain source to avoid re-fetching full + /// transactions. + /// + /// This consumes the [`SyncRequest`] and returns the updated one. + #[must_use] + pub fn cache_txs(mut self, full_txs: impl IntoIterator) -> Self + where + T: Into>, + { + self.tx_cache = full_txs + .into_iter() + .map(|(txid, tx)| (txid, tx.into())) + .collect(); + self + } + + /// Add all transactions from [`TxGraph`] into the [`Transaction`] cache. + /// + /// This consumes the [`SyncRequest`] and returns the updated one. + #[must_use] + pub fn cache_graph_txs(self, graph: &TxGraph) -> Self { + self.cache_txs(graph.full_txs().map(|tx_node| (tx_node.txid, tx_node.tx))) + } + /// Set the [`Script`]s that will be synced against. /// /// This consumes the [`SyncRequest`] and returns the updated one. @@ -194,6 +228,8 @@ pub struct FullScanRequest { /// /// [`LocalChain::tip`]: crate::local_chain::LocalChain::tip pub chain_tip: CheckPoint, + /// Cache of full transactions, so the chain-source can avoid re-fetching. + pub tx_cache: TxCache, /// Iterators of script pubkeys indexed by the keychain index. pub spks_by_keychain: BTreeMap + Send>>, } @@ -204,10 +240,35 @@ impl FullScanRequest { pub fn from_chain_tip(chain_tip: CheckPoint) -> Self { Self { chain_tip, + tx_cache: TxCache::new(), spks_by_keychain: BTreeMap::new(), } } + /// Add to the [`Transaction`] cache that allows the chain source to avoid re-fetching full + /// transactions. + /// + /// This consumes the [`SyncRequest`] and returns the updated one. + #[must_use] + pub fn cache_txs(mut self, full_txs: impl IntoIterator) -> Self + where + T: Into>, + { + self.tx_cache = full_txs + .into_iter() + .map(|(txid, tx)| (txid, tx.into())) + .collect(); + self + } + + /// Add all transactions from [`TxGraph`] into the [`Transaction`] cache. + /// + /// This consumes the [`SyncRequest`] and returns the updated one. + #[must_use] + pub fn cache_graph_txs(self, graph: &TxGraph) -> Self { + self.cache_txs(graph.full_txs().map(|tx_node| (tx_node.txid, tx_node.tx))) + } + /// Construct a new [`FullScanRequest`] from a given `chain_tip` and `index`. /// /// Unbounded script pubkey iterators for each keychain (`K`) are extracted using @@ -316,9 +377,9 @@ impl FullScanRequest { /// Data returned from a spk-based blockchain client full scan. /// /// See also [`FullScanRequest`]. -pub struct FullScanResult { +pub struct FullScanResult { /// The update to apply to the receiving [`LocalChain`](crate::local_chain::LocalChain). - pub graph_update: TxGraph, + pub graph_update: TxGraph, /// The update to apply to the receiving [`TxGraph`]. pub chain_update: CheckPoint, /// Last active indices for the corresponding keychains (`K`). diff --git a/crates/chain/src/tx_graph.rs b/crates/chain/src/tx_graph.rs index f6144e7a2..565ef5d0d 100644 --- a/crates/chain/src/tx_graph.rs +++ b/crates/chain/src/tx_graph.rs @@ -516,12 +516,12 @@ impl TxGraph { /// Inserts the given transaction into [`TxGraph`]. /// /// The [`ChangeSet`] returned will be empty if `tx` already exists. - pub fn insert_tx(&mut self, tx: Transaction) -> ChangeSet { + pub fn insert_tx>>(&mut self, tx: T) -> ChangeSet { + let tx = tx.into(); let mut update = Self::default(); - update.txs.insert( - tx.txid(), - (TxNodeInternal::Whole(tx.into()), BTreeSet::new(), 0), - ); + update + .txs + .insert(tx.txid(), (TxNodeInternal::Whole(tx), BTreeSet::new(), 0)); self.apply_update(update) } diff --git a/crates/electrum/Cargo.toml b/crates/electrum/Cargo.toml index 4205f2294..7bdfeb0e0 100644 --- a/crates/electrum/Cargo.toml +++ b/crates/electrum/Cargo.toml @@ -12,7 +12,7 @@ readme = "README.md" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -bdk_chain = { path = "../chain", version = "0.13.0", default-features = false } +bdk_chain = { path = "../chain", version = "0.13.0" } electrum-client = { version = "0.19" } #rustls = { version = "=0.21.1", optional = true, features = ["dangerous_configuration"] } diff --git a/crates/electrum/src/electrum_ext.rs b/crates/electrum/src/electrum_ext.rs index 5a6c5d116..d0e0941d5 100644 --- a/crates/electrum/src/electrum_ext.rs +++ b/crates/electrum/src/electrum_ext.rs @@ -1,159 +1,44 @@ use bdk_chain::{ bitcoin::{OutPoint, ScriptBuf, Transaction, Txid}, + collections::{BTreeMap, HashMap, HashSet}, local_chain::CheckPoint, - tx_graph::{self, TxGraph}, - Anchor, BlockId, ConfirmationHeightAnchor, ConfirmationTimeHeightAnchor, -}; -use electrum_client::{Client, ElectrumApi, Error, HeaderNotification}; -use std::{ - collections::{BTreeMap, BTreeSet, HashMap, HashSet}, - fmt::Debug, - str::FromStr, + spk_client::{FullScanRequest, FullScanResult, SyncRequest, SyncResult, TxCache}, + tx_graph::TxGraph, + BlockId, ConfirmationHeightAnchor, ConfirmationTimeHeightAnchor, }; +use core::str::FromStr; +use electrum_client::{ElectrumApi, Error, HeaderNotification}; +use std::sync::Arc; /// We include a chain suffix of a certain length for the purpose of robustness. const CHAIN_SUFFIX_LENGTH: u32 = 8; -/// Represents updates fetched from an Electrum server, but excludes full transactions. -/// -/// To provide a complete update to [`TxGraph`], you'll need to call [`Self::missing_full_txs`] to -/// determine the full transactions missing from [`TxGraph`]. Then call [`Self::into_tx_graph`] to -/// fetch the full transactions from Electrum and finalize the update. -#[derive(Debug, Default, Clone)] -pub struct RelevantTxids(HashMap>); - -impl RelevantTxids { - /// Determine the full transactions that are missing from `graph`. - /// - /// Refer to [`RelevantTxids`] for more details. - pub fn missing_full_txs(&self, graph: &TxGraph) -> Vec { - self.0 - .keys() - .filter(move |&&txid| graph.as_ref().get_tx(txid).is_none()) - .cloned() - .collect() - } - - /// Finalizes the [`TxGraph`] update by fetching `missing` txids from the `client`. - /// - /// Refer to [`RelevantTxids`] for more details. - pub fn into_tx_graph( - self, - client: &Client, - missing: Vec, - ) -> Result, Error> { - let new_txs = client.batch_transaction_get(&missing)?; - let mut graph = TxGraph::::new(new_txs); - for (txid, anchors) in self.0 { - for anchor in anchors { - let _ = graph.insert_anchor(txid, anchor); - } - } - Ok(graph) - } - - /// Finalizes the update by fetching `missing` txids from the `client`, where the - /// resulting [`TxGraph`] has anchors of type [`ConfirmationTimeHeightAnchor`]. - /// - /// Refer to [`RelevantTxids`] for more details. - /// - /// **Note:** The confirmation time might not be precisely correct if there has been a reorg. - // Electrum's API intends that we use the merkle proof API, we should change `bdk_electrum` to - // use it. - pub fn into_confirmation_time_tx_graph( - self, - client: &Client, - missing: Vec, - ) -> Result, Error> { - let graph = self.into_tx_graph(client, missing)?; - - let relevant_heights = { - let mut visited_heights = HashSet::new(); - graph - .all_anchors() - .iter() - .map(|(a, _)| a.confirmation_height_upper_bound()) - .filter(move |&h| visited_heights.insert(h)) - .collect::>() - }; - - let height_to_time = relevant_heights - .clone() - .into_iter() - .zip( - client - .batch_block_header(relevant_heights)? - .into_iter() - .map(|bh| bh.time as u64), - ) - .collect::>(); - - let graph_changeset = { - let old_changeset = TxGraph::default().apply_update(graph); - tx_graph::ChangeSet { - txs: old_changeset.txs, - txouts: old_changeset.txouts, - last_seen: old_changeset.last_seen, - anchors: old_changeset - .anchors - .into_iter() - .map(|(height_anchor, txid)| { - let confirmation_height = height_anchor.confirmation_height; - let confirmation_time = height_to_time[&confirmation_height]; - let time_anchor = ConfirmationTimeHeightAnchor { - anchor_block: height_anchor.anchor_block, - confirmation_height, - confirmation_time, - }; - (time_anchor, txid) - }) - .collect(), - } - }; - - let mut new_graph = TxGraph::default(); - new_graph.apply_changeset(graph_changeset); - Ok(new_graph) - } -} - -/// Combination of chain and transactions updates from electrum -/// -/// We have to update the chain and the txids at the same time since we anchor the txids to -/// the same chain tip that we check before and after we gather the txids. -#[derive(Debug)] -pub struct ElectrumUpdate { - /// Chain update - pub chain_update: CheckPoint, - /// Transaction updates from electrum - pub relevant_txids: RelevantTxids, -} - -/// Trait to extend [`Client`] functionality. +/// Trait to extend [`electrum_client::Client`] functionality. pub trait ElectrumExt { /// Full scan the keychain scripts specified with the blockchain (via an Electrum client) and /// returns updates for [`bdk_chain`] data structures. /// /// - `prev_tip`: the most recent blockchain tip present locally /// - `keychain_spks`: keychains that we want to scan transactions for + /// - `full_txs`: [`TxGraph`] that contains all previously known transactions /// /// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated /// transactions. `batch_size` specifies the max number of script pubkeys to request for in a /// single batch request. fn full_scan( &self, - prev_tip: CheckPoint, - keychain_spks: BTreeMap>, + request: FullScanRequest, stop_gap: usize, batch_size: usize, - ) -> Result<(ElectrumUpdate, BTreeMap), Error>; + ) -> Result, Error>; /// Sync a set of scripts with the blockchain (via an Electrum client) for the data specified /// and returns updates for [`bdk_chain`] data structures. /// /// - `prev_tip`: the most recent blockchain tip present locally /// - `misc_spks`: an iterator of scripts we want to sync transactions for - /// - `txids`: transactions for which we want updated [`Anchor`]s + /// - `full_txs`: [`TxGraph`] that contains all previously known transactions + /// - `txids`: transactions for which we want updated [`bdk_chain::Anchor`]s /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we /// want to include in the update /// @@ -166,31 +51,34 @@ pub trait ElectrumExt { /// [`full_scan`]: ElectrumExt::full_scan fn sync( &self, - prev_tip: CheckPoint, - misc_spks: impl IntoIterator, - txids: impl IntoIterator, - outpoints: impl IntoIterator, + request: SyncRequest, batch_size: usize, - ) -> Result; + ) -> Result, Error>; } -impl ElectrumExt for A { +impl ElectrumExt for E { fn full_scan( &self, - prev_tip: CheckPoint, - keychain_spks: BTreeMap>, + mut request: FullScanRequest, stop_gap: usize, batch_size: usize, - ) -> Result<(ElectrumUpdate, BTreeMap), Error> { - let mut request_spks = keychain_spks - .into_iter() - .map(|(k, s)| (k, s.into_iter())) - .collect::>(); + ) -> Result, Error> { + let mut request_spks = request.spks_by_keychain; + // .into_iter() + // .map(|(k, s)| (k, s.into_iter())) + // .collect::>(); + + // We keep track of already-scanned spks just in case a reorg happens and we need to do a + // rescan. We need to keep track of this as iterators in `keychain_spks` are "unbounded" so + // cannot be collected. In addition, we keep track of whether an spk has an active tx + // history for determining the `last_active_index`. + // * key: (keychain, spk_index) that identifies the spk. + // * val: (script_pubkey, has_tx_history). let mut scanned_spks = BTreeMap::<(K, u32), (ScriptBuf, bool)>::new(); - let (electrum_update, keychain_update) = loop { - let (tip, _) = construct_update_tip(self, prev_tip.clone())?; - let mut relevant_txids = RelevantTxids::default(); + let update = loop { + let (tip, _) = construct_update_tip(self, request.chain_tip.clone())?; + let mut graph_update = TxGraph::::default(); let cps = tip .iter() .take(10) @@ -202,7 +90,8 @@ impl ElectrumExt for A { scanned_spks.append(&mut populate_with_spks( self, &cps, - &mut relevant_txids, + &mut request.tx_cache, + &mut graph_update, &mut scanned_spks .iter() .map(|(i, (spk, _))| (i.clone(), spk.clone())), @@ -215,7 +104,8 @@ impl ElectrumExt for A { populate_with_spks( self, &cps, - &mut relevant_txids, + &mut request.tx_cache, + &mut graph_update, keychain_spks, stop_gap, batch_size, @@ -245,54 +135,118 @@ impl ElectrumExt for A { }) .collect::>(); - break ( - ElectrumUpdate { - chain_update, - relevant_txids, - }, - keychain_update, - ); + break FullScanResult { + graph_update, + chain_update, + last_active_indices: keychain_update, + }; }; - Ok((electrum_update, keychain_update)) + Ok(update) } fn sync( &self, - prev_tip: CheckPoint, - misc_spks: impl IntoIterator, - txids: impl IntoIterator, - outpoints: impl IntoIterator, + request: SyncRequest, batch_size: usize, - ) -> Result { - let spk_iter = misc_spks - .into_iter() - .enumerate() - .map(|(i, spk)| (i as u32, spk)); + ) -> Result, Error> { + let mut tx_cache = request.tx_cache.clone(); - let (mut electrum_update, _) = self.full_scan( - prev_tip.clone(), - [((), spk_iter)].into(), - usize::MAX, - batch_size, - )?; + let full_scan_req = FullScanRequest::from_chain_tip(request.chain_tip.clone()) + .cache_txs(request.tx_cache) + .set_spks_for_keychain((), request.spks.enumerate().map(|(i, spk)| (i as u32, spk))); + let full_scan_res = self.full_scan(full_scan_req, usize::MAX, batch_size)?; - let (tip, _) = construct_update_tip(self, prev_tip)?; + let (tip, _) = construct_update_tip(self, request.chain_tip)?; let cps = tip .iter() .take(10) .map(|cp| (cp.height(), cp)) .collect::>(); - populate_with_txids(self, &cps, &mut electrum_update.relevant_txids, txids)?; + let mut tx_graph = TxGraph::::default(); + populate_with_txids(self, &cps, &mut tx_cache, &mut tx_graph, request.txids)?; + populate_with_outpoints(self, &cps, &mut tx_cache, &mut tx_graph, request.outpoints)?; + + Ok(SyncResult { + chain_update: full_scan_res.chain_update, + graph_update: full_scan_res.graph_update, + }) + } +} + +/// Trait that extends [`SyncResult`] and [`FullScanResult`] functionality. +/// +/// Currently, only a single method exists that converts the update [`TxGraph`] to have an anchor +/// type of [`ConfirmationTimeHeightAnchor`]. +pub trait ElectrumResultExt { + /// New result type with a [`TxGraph`] that contains the [`ConfirmationTimeHeightAnchor`]. + type NewResult; + + /// Convert result type to have an update [`TxGraph`] that contains the [`ConfirmationTimeHeightAnchor`] . + fn try_into_confirmation_time_result( + self, + client: &impl ElectrumApi, + ) -> Result; +} + +impl ElectrumResultExt for FullScanResult { + type NewResult = FullScanResult; + + fn try_into_confirmation_time_result( + self, + client: &impl ElectrumApi, + ) -> Result { + Ok(FullScanResult:: { + graph_update: try_into_confirmation_time_result(self.graph_update, client)?, + chain_update: self.chain_update, + last_active_indices: self.last_active_indices, + }) + } +} - let _txs = - populate_with_outpoints(self, &cps, &mut electrum_update.relevant_txids, outpoints)?; +impl ElectrumResultExt for SyncResult { + type NewResult = SyncResult; - Ok(electrum_update) + fn try_into_confirmation_time_result( + self, + client: &impl ElectrumApi, + ) -> Result { + Ok(SyncResult { + graph_update: try_into_confirmation_time_result(self.graph_update, client)?, + chain_update: self.chain_update, + }) } } +fn try_into_confirmation_time_result( + graph_update: TxGraph, + client: &impl ElectrumApi, +) -> Result, Error> { + let relevant_heights = graph_update + .all_anchors() + .iter() + .map(|(a, _)| a.confirmation_height) + .collect::>(); + + let height_to_time = relevant_heights + .clone() + .into_iter() + .zip( + client + .batch_block_header(relevant_heights)? + .into_iter() + .map(|bh| bh.time as u64), + ) + .collect::>(); + + Ok(graph_update.map_anchors(|a| ConfirmationTimeHeightAnchor { + anchor_block: a.anchor_block, + confirmation_height: a.confirmation_height, + confirmation_time: height_to_time[&a.confirmation_height], + })) +} + /// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`. fn construct_update_tip( client: &impl ElectrumApi, @@ -411,10 +365,10 @@ fn determine_tx_anchor( fn populate_with_outpoints( client: &impl ElectrumApi, cps: &BTreeMap, - relevant_txids: &mut RelevantTxids, + tx_cache: &mut TxCache, + tx_graph: &mut TxGraph, outpoints: impl IntoIterator, -) -> Result, Error> { - let mut full_txs = HashMap::new(); +) -> Result<(), Error> { for outpoint in outpoints { let txid = outpoint.txid; let tx = client.transaction_get(&txid)?; @@ -437,17 +391,19 @@ fn populate_with_outpoints( continue; } has_residing = true; - full_txs.insert(res.tx_hash, tx.clone()); + if tx_graph.get_tx(res.tx_hash).is_none() { + let _ = tx_graph.insert_tx(tx.clone()); + } } else { if has_spending { continue; } - let res_tx = match full_txs.get(&res.tx_hash) { + let res_tx = match tx_graph.get_tx(res.tx_hash) { Some(tx) => tx, None => { - let res_tx = client.transaction_get(&res.tx_hash)?; - full_txs.insert(res.tx_hash, res_tx); - full_txs.get(&res.tx_hash).expect("just inserted") + let res_tx = fetch_tx(client, tx_cache, res.tx_hash)?; + let _ = tx_graph.insert_tx(Arc::clone(&res_tx)); + res_tx } }; has_spending = res_tx @@ -459,24 +415,23 @@ fn populate_with_outpoints( } }; - let anchor = determine_tx_anchor(cps, res.height, res.tx_hash); - let tx_entry = relevant_txids.0.entry(res.tx_hash).or_default(); - if let Some(anchor) = anchor { - tx_entry.insert(anchor); + if let Some(anchor) = determine_tx_anchor(cps, res.height, res.tx_hash) { + let _ = tx_graph.insert_anchor(res.tx_hash, anchor); } } } - Ok(full_txs) + Ok(()) } fn populate_with_txids( client: &impl ElectrumApi, cps: &BTreeMap, - relevant_txids: &mut RelevantTxids, + tx_cache: &mut TxCache, + graph_update: &mut TxGraph, txids: impl IntoIterator, ) -> Result<(), Error> { for txid in txids { - let tx = match client.transaction_get(&txid) { + let tx = match fetch_tx(client, tx_cache, txid) { Ok(tx) => tx, Err(electrum_client::Error::Protocol(_)) => continue, Err(other_err) => return Err(other_err), @@ -497,18 +452,36 @@ fn populate_with_txids( None => continue, }; - let tx_entry = relevant_txids.0.entry(txid).or_default(); + if graph_update.get_tx(txid).is_none() { + // TODO: We need to be able to insert an `Arc` of a transaction. + let _ = graph_update.insert_tx(tx); + } if let Some(anchor) = anchor { - tx_entry.insert(anchor); + let _ = graph_update.insert_anchor(txid, anchor); } } Ok(()) } +fn fetch_tx( + client: &C, + tx_cache: &mut TxCache, + txid: Txid, +) -> Result, Error> { + use bdk_chain::collections::hash_map::Entry; + Ok(match tx_cache.entry(txid) { + Entry::Occupied(entry) => entry.get().clone(), + Entry::Vacant(entry) => entry + .insert(Arc::new(client.transaction_get(&txid)?)) + .clone(), + }) +} + fn populate_with_spks( client: &impl ElectrumApi, cps: &BTreeMap, - relevant_txids: &mut RelevantTxids, + tx_cache: &mut TxCache, + graph_update: &mut TxGraph, spks: &mut impl Iterator, stop_gap: usize, batch_size: usize, @@ -540,10 +513,10 @@ fn populate_with_spks( unused_spk_count = 0; } - for tx in spk_history { - let tx_entry = relevant_txids.0.entry(tx.tx_hash).or_default(); - if let Some(anchor) = determine_tx_anchor(cps, tx.height, tx.tx_hash) { - tx_entry.insert(anchor); + for tx_res in spk_history { + let _ = graph_update.insert_tx(fetch_tx(client, tx_cache, tx_res.tx_hash)?); + if let Some(anchor) = determine_tx_anchor(cps, tx_res.height, tx_res.tx_hash) { + let _ = graph_update.insert_anchor(tx_res.tx_hash, anchor); } } } diff --git a/crates/electrum/src/lib.rs b/crates/electrum/src/lib.rs index 87c0e4618..f645653e4 100644 --- a/crates/electrum/src/lib.rs +++ b/crates/electrum/src/lib.rs @@ -7,19 +7,10 @@ //! keychain where the range of possibly used scripts is not known. In this case it is necessary to //! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a //! sync or full scan the user receives relevant blockchain data and output updates for -//! [`bdk_chain`] including [`RelevantTxids`]. -//! -//! The [`RelevantTxids`] only includes `txid`s and not full transactions. The caller is responsible -//! for obtaining full transactions before applying new data to their [`bdk_chain`]. This can be -//! done with these steps: -//! -//! 1. Determine which full transactions are missing. Use [`RelevantTxids::missing_full_txs`]. -//! -//! 2. Obtaining the full transactions. To do this via electrum use [`ElectrumApi::batch_transaction_get`]. +//! [`bdk_chain`] including [`bdk_chain::TxGraph`], which includes `txid`s and full transactions. //! //! Refer to [`example_electrum`] for a complete example. //! -//! [`ElectrumApi::batch_transaction_get`]: electrum_client::ElectrumApi::batch_transaction_get //! [`example_electrum`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_electrum #![warn(missing_docs)] diff --git a/crates/electrum/tests/test_electrum.rs b/crates/electrum/tests/test_electrum.rs index 8f77209fc..b627066e7 100644 --- a/crates/electrum/tests/test_electrum.rs +++ b/crates/electrum/tests/test_electrum.rs @@ -3,9 +3,10 @@ use bdk_chain::{ bitcoin::{hashes::Hash, Address, Amount, ScriptBuf, WScriptHash}, keychain::Balance, local_chain::LocalChain, + spk_client::SyncRequest, ConfirmationTimeHeightAnchor, IndexedTxGraph, SpkTxOutIndex, }; -use bdk_electrum::{ElectrumExt, ElectrumUpdate}; +use bdk_electrum::{ElectrumExt, ElectrumResultExt}; use bdk_testenv::TestEnv; use electrsd::bitcoind::bitcoincore_rpc::RpcApi; @@ -62,17 +63,23 @@ fn scan_detects_confirmed_tx() -> Result<()> { // Sync up to tip. env.wait_until_electrum_sees_block()?; - let ElectrumUpdate { - chain_update, - relevant_txids, - } = client.sync(recv_chain.tip(), [spk_to_track], None, None, 5)?; + let update = client + .sync( + SyncRequest::from_chain_tip(recv_chain.tip()) + .chain_spks(core::iter::once(spk_to_track)), + // &mut Default::default(), + // recv_chain.tip(), + // [spk_to_track], + // None, + // None, + 5, + )? + .try_into_confirmation_time_result(&client)?; - let missing = relevant_txids.missing_full_txs(recv_graph.graph()); - let graph_update = relevant_txids.into_confirmation_time_tx_graph(&client, missing)?; let _ = recv_chain - .apply_update(chain_update) + .apply_update(update.chain_update) .map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?; - let _ = recv_graph.apply_update(graph_update); + let _ = recv_graph.apply_update(update.graph_update); // Check to see if tx is confirmed. assert_eq!( @@ -128,20 +135,20 @@ fn tx_can_become_unconfirmed_after_reorg() -> Result<()> { // Sync up to tip. env.wait_until_electrum_sees_block()?; - let ElectrumUpdate { - chain_update, - relevant_txids, - } = client.sync(recv_chain.tip(), [spk_to_track.clone()], None, None, 5)?; + let update = client + .sync( + SyncRequest::from_chain_tip(recv_chain.tip()).chain_spks([spk_to_track.clone()]), + 5, + )? + .try_into_confirmation_time_result(&client)?; - let missing = relevant_txids.missing_full_txs(recv_graph.graph()); - let graph_update = relevant_txids.into_confirmation_time_tx_graph(&client, missing)?; let _ = recv_chain - .apply_update(chain_update) + .apply_update(update.chain_update) .map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?; - let _ = recv_graph.apply_update(graph_update.clone()); + let _ = recv_graph.apply_update(update.graph_update.clone()); // Retain a snapshot of all anchors before reorg process. - let initial_anchors = graph_update.all_anchors(); + let initial_anchors = update.graph_update.all_anchors(); // Check if initial balance is correct. assert_eq!( @@ -158,22 +165,22 @@ fn tx_can_become_unconfirmed_after_reorg() -> Result<()> { env.reorg_empty_blocks(depth)?; env.wait_until_electrum_sees_block()?; - let ElectrumUpdate { - chain_update, - relevant_txids, - } = client.sync(recv_chain.tip(), [spk_to_track.clone()], None, None, 5)?; + let update = client + .sync( + SyncRequest::from_chain_tip(recv_chain.tip()).chain_spks([spk_to_track.clone()]), + 5, + )? + .try_into_confirmation_time_result(&client)?; - let missing = relevant_txids.missing_full_txs(recv_graph.graph()); - let graph_update = relevant_txids.into_confirmation_time_tx_graph(&client, missing)?; let _ = recv_chain - .apply_update(chain_update) + .apply_update(update.chain_update) .map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?; // Check to see if a new anchor is added during current reorg. - if !initial_anchors.is_superset(graph_update.all_anchors()) { + if !initial_anchors.is_superset(update.graph_update.all_anchors()) { println!("New anchor added at reorg depth {}", depth); } - let _ = recv_graph.apply_update(graph_update); + let _ = recv_graph.apply_update(update.graph_update); assert_eq!( get_balance(&recv_chain, &recv_graph)?, diff --git a/example-crates/example_electrum/src/main.rs b/example-crates/example_electrum/src/main.rs index e3b758e74..40d639c5d 100644 --- a/example-crates/example_electrum/src/main.rs +++ b/example-crates/example_electrum/src/main.rs @@ -1,19 +1,20 @@ use std::{ - collections::BTreeMap, io::{self, Write}, sync::Mutex, }; use bdk_chain::{ - bitcoin::{constants::genesis_block, Address, Network, OutPoint, Txid}, + bitcoin::{constants::genesis_block, Address, Network, Txid}, + collections::BTreeSet, indexed_tx_graph::{self, IndexedTxGraph}, keychain, local_chain::{self, LocalChain}, + spk_client::{FullScanRequest, SyncRequest}, Append, ConfirmationHeightAnchor, }; use bdk_electrum::{ electrum_client::{self, Client, ElectrumApi}, - ElectrumExt, ElectrumUpdate, + ElectrumExt, }; use example_cli::{ anyhow::{self, Context}, @@ -147,42 +148,47 @@ fn main() -> anyhow::Result<()> { let client = electrum_cmd.electrum_args().client(args.network)?; - let response = match electrum_cmd.clone() { + let (chain_update, mut graph_update, keychain_update) = match electrum_cmd.clone() { ElectrumCommands::Scan { stop_gap, scan_options, .. } => { - let (keychain_spks, tip) = { + let request = { let graph = &*graph.lock().unwrap(); let chain = &*chain.lock().unwrap(); - let keychain_spks = graph - .index - .all_unbounded_spk_iters() - .into_iter() - .map(|(keychain, iter)| { - let mut first = true; - let spk_iter = iter.inspect(move |(i, _)| { - if first { - eprint!("\nscanning {}: ", keychain); - first = false; + FullScanRequest::from_chain_tip(chain.tip()) + .cache_graph_txs(graph.graph()) + .set_spks_for_keychain( + Keychain::External, + graph.index.unbounded_spk_iter(&Keychain::External), + ) + .set_spks_for_keychain( + Keychain::Internal, + graph.index.unbounded_spk_iter(&Keychain::Internal), + ) + .inspect_spks_for_all_keychains({ + let mut once = BTreeSet::new(); + move |k, spk_i, _| { + if once.insert(k) { + eprint!("\nScanning {}: ", k); + } else { + eprint!("{} ", spk_i); } - - eprint!("{} ", i); let _ = io::stdout().flush(); - }); - (keychain, spk_iter) + } }) - .collect::>(); - - let tip = chain.tip(); - (keychain_spks, tip) }; - client - .full_scan(tip, keychain_spks, stop_gap, scan_options.batch_size) - .context("scanning the blockchain")? + let res = client + .full_scan::<_>(request, stop_gap, scan_options.batch_size) + .context("scanning the blockchain")?; + ( + res.chain_update, + res.graph_update, + Some(res.last_active_indices), + ) } ElectrumCommands::Sync { mut unused_spks, @@ -195,7 +201,6 @@ fn main() -> anyhow::Result<()> { // Get a short lock on the tracker to get the spks we're interested in let graph = graph.lock().unwrap(); let chain = chain.lock().unwrap(); - let chain_tip = chain.tip().block_id(); if !(all_spks || unused_spks || utxos || unconfirmed) { unused_spks = true; @@ -205,18 +210,20 @@ fn main() -> anyhow::Result<()> { unused_spks = false; } - let mut spks: Box> = - Box::new(core::iter::empty()); + let chain_tip = chain.tip(); + let mut request = + SyncRequest::from_chain_tip(chain_tip.clone()).cache_graph_txs(graph.graph()); + if all_spks { let all_spks = graph .index .revealed_spks(..) .map(|(k, i, spk)| (k.to_owned(), i, spk.to_owned())) .collect::>(); - spks = Box::new(spks.chain(all_spks.into_iter().map(|(k, i, spk)| { - eprintln!("scanning {}:{}", k, i); + request = request.chain_spks(all_spks.into_iter().map(|(k, spk_i, spk)| { + eprintln!("scanning {}: {}", k, spk_i); spk - }))); + })); } if unused_spks { let unused_spks = graph @@ -224,82 +231,61 @@ fn main() -> anyhow::Result<()> { .unused_spks() .map(|(k, i, spk)| (k, i, spk.to_owned())) .collect::>(); - spks = Box::new(spks.chain(unused_spks.into_iter().map(|(k, i, spk)| { - eprintln!( - "Checking if address {} {}:{} has been used", - Address::from_script(&spk, args.network).unwrap(), - k, - i, - ); - spk - }))); + request = + request.chain_spks(unused_spks.into_iter().map(move |(k, spk_i, spk)| { + eprintln!( + "Checking if address {} {}:{} has been used", + Address::from_script(&spk, args.network).unwrap(), + k, + spk_i, + ); + spk + })); } - let mut outpoints: Box> = Box::new(core::iter::empty()); - if utxos { let init_outpoints = graph.index.outpoints().iter().cloned(); let utxos = graph .graph() - .filter_chain_unspents(&*chain, chain_tip, init_outpoints) + .filter_chain_unspents(&*chain, chain_tip.block_id(), init_outpoints) .map(|(_, utxo)| utxo) .collect::>(); - - outpoints = Box::new( - utxos - .into_iter() - .inspect(|utxo| { - eprintln!( - "Checking if outpoint {} (value: {}) has been spent", - utxo.outpoint, utxo.txout.value - ); - }) - .map(|utxo| utxo.outpoint), - ); + request = request.chain_outpoints(utxos.into_iter().map(|utxo| { + eprintln!( + "Checking if outpoint {} (value: {}) has been spent", + utxo.outpoint, utxo.txout.value + ); + utxo.outpoint + })); }; - let mut txids: Box> = Box::new(core::iter::empty()); - if unconfirmed { let unconfirmed_txids = graph .graph() - .list_chain_txs(&*chain, chain_tip) + .list_chain_txs(&*chain, chain_tip.block_id()) .filter(|canonical_tx| !canonical_tx.chain_position.is_confirmed()) .map(|canonical_tx| canonical_tx.tx_node.txid) .collect::>(); - txids = Box::new(unconfirmed_txids.into_iter().inspect(|txid| { - eprintln!("Checking if {} is confirmed yet", txid); - })); + request = request.chain_txids( + unconfirmed_txids + .into_iter() + .inspect(|txid| eprintln!("Checking if {} is confirmed yet", txid)), + ); } - let tip = chain.tip(); + let res = client + .sync(request, scan_options.batch_size) + .context("scanning the blockchain")?; // drop lock on graph and chain drop((graph, chain)); - let electrum_update = client - .sync(tip, spks, txids, outpoints, scan_options.batch_size) - .context("scanning the blockchain")?; - (electrum_update, BTreeMap::new()) + (res.chain_update, res.graph_update, None) } }; - let ( - ElectrumUpdate { - chain_update, - relevant_txids, - }, - keychain_update, - ) = response; - - let missing_txids = { - let graph = &*graph.lock().unwrap(); - relevant_txids.missing_full_txs(graph.graph()) - }; - - let mut graph_update = relevant_txids.into_tx_graph(&client, missing_txids)?; let now = std::time::UNIX_EPOCH .elapsed() .expect("must get time") @@ -310,21 +296,17 @@ fn main() -> anyhow::Result<()> { let mut chain = chain.lock().unwrap(); let mut graph = graph.lock().unwrap(); - let chain = chain.apply_update(chain_update)?; - - let indexed_tx_graph = { - let mut changeset = - indexed_tx_graph::ChangeSet::::default(); - let (_, indexer) = graph.index.reveal_to_target_multi(&keychain_update); - changeset.append(indexed_tx_graph::ChangeSet { - indexer, - ..Default::default() - }); - changeset.append(graph.apply_update(graph_update)); - changeset - }; - - (chain, indexed_tx_graph) + let chain_changeset = chain.apply_update(chain_update)?; + + let mut indexed_tx_graph_changeset = + indexed_tx_graph::ChangeSet::::default(); + if let Some(keychain_update) = keychain_update { + let (_, keychain_changeset) = graph.index.reveal_to_target_multi(&keychain_update); + indexed_tx_graph_changeset.append(keychain_changeset.into()); + } + indexed_tx_graph_changeset.append(graph.apply_update(graph_update)); + + (chain_changeset, indexed_tx_graph_changeset) }; let mut db = db.lock().unwrap(); diff --git a/example-crates/wallet_electrum/src/main.rs b/example-crates/wallet_electrum/src/main.rs index 4af9e71de..b4330148f 100644 --- a/example-crates/wallet_electrum/src/main.rs +++ b/example-crates/wallet_electrum/src/main.rs @@ -3,16 +3,16 @@ const SEND_AMOUNT: u64 = 5000; const STOP_GAP: usize = 50; const BATCH_SIZE: usize = 5; -use std::io::Write; use std::str::FromStr; use bdk::bitcoin::Address; -use bdk::wallet::Update; +use bdk::chain::collections::HashSet; use bdk::{bitcoin::Network, Wallet}; use bdk::{KeychainKind, SignOptions}; +use bdk_electrum::ElectrumResultExt; use bdk_electrum::{ electrum_client::{self, ElectrumApi}, - ElectrumExt, ElectrumUpdate, + ElectrumExt, }; use bdk_file_store::Store; @@ -38,44 +38,24 @@ fn main() -> Result<(), anyhow::Error> { print!("Syncing..."); let client = electrum_client::Client::new("ssl://electrum.blockstream.info:60002")?; - let prev_tip = wallet.latest_checkpoint(); - let keychain_spks = wallet - .all_unbounded_spk_iters() - .into_iter() - .map(|(k, k_spks)| { - let mut once = Some(()); - let mut stdout = std::io::stdout(); - let k_spks = k_spks - .inspect(move |(spk_i, _)| match once.take() { - Some(_) => print!("\nScanning keychain [{:?}]", k), - None => print!(" {:<3}", spk_i), - }) - .inspect(move |_| stdout.flush().expect("must flush")); - (k, k_spks) - }) - .collect(); - - let ( - ElectrumUpdate { - chain_update, - relevant_txids, - }, - keychain_update, - ) = client.full_scan(prev_tip, keychain_spks, STOP_GAP, BATCH_SIZE)?; + let request = wallet.start_full_scan().inspect_spks_for_all_keychains({ + let mut once = HashSet::::new(); + move |k, spk_i, _| match once.insert(k) { + true => print!("\nScanning keychain [{:?}]", k), + false => print!(" {:<3}", spk_i), + } + }); - println!(); + let mut update = client + .full_scan(request, STOP_GAP, BATCH_SIZE)? + .try_into_confirmation_time_result(&client)?; - let missing = relevant_txids.missing_full_txs(wallet.as_ref()); - let mut graph_update = relevant_txids.into_confirmation_time_tx_graph(&client, missing)?; let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs(); - let _ = graph_update.update_last_seen_unconfirmed(now); - - let wallet_update = Update { - last_active_indices: keychain_update, - graph: graph_update, - chain: Some(chain_update), - }; - wallet.apply_update(wallet_update)?; + let _ = update.graph_update.update_last_seen_unconfirmed(now); + + println!(); + + wallet.apply_update(update)?; wallet.commit()?; let balance = wallet.get_balance();