From 770b6e6db72a95de71f2b4776a8add82b6cdc251 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 28 Sep 2022 18:38:08 +0000 Subject: [PATCH 1/3] Generalizes LocatorCache into a TxIndex In order to fix #130 we need to implement our own txindex. Turns out this is almost identical to our `LocatorCache`, so we can generalize it and use it for both purposes. --- teos/src/lib.rs | 1 + teos/src/main.rs | 2 +- teos/src/test_utils.rs | 2 +- teos/src/tx_index.rs | 401 +++++++++++++++++++++++++++++++++++++++++ teos/src/watcher.rs | 263 ++------------------------- 5 files changed, 415 insertions(+), 254 deletions(-) create mode 100644 teos/src/tx_index.rs diff --git a/teos/src/lib.rs b/teos/src/lib.rs index e1073ea2..2e87bcf1 100644 --- a/teos/src/lib.rs +++ b/teos/src/lib.rs @@ -22,6 +22,7 @@ pub mod responder; #[doc(hidden)] mod rpc_errors; pub mod tls; +mod tx_index; pub mod watcher; #[cfg(test)] diff --git a/teos/src/main.rs b/teos/src/main.rs index f0bb9a37..de9f4128 100644 --- a/teos/src/main.rs +++ b/teos/src/main.rs @@ -213,7 +213,7 @@ async fn main() { let watcher = Arc::new(Watcher::new( gatekeeper.clone(), responder.clone(), - last_n_blocks, + &last_n_blocks, tip.height, tower_sk, TowerId(tower_pk), diff --git a/teos/src/test_utils.rs b/teos/src/test_utils.rs index 61db7c7d..8d78e4df 100644 --- a/teos/src/test_utils.rs +++ b/teos/src/test_utils.rs @@ -419,7 +419,7 @@ pub(crate) async fn create_watcher( Watcher::new( gatekeeper, responder, - last_n_blocks, + &last_n_blocks, chain.get_block_count(), tower_sk, tower_id, diff --git a/teos/src/tx_index.rs b/teos/src/tx_index.rs new file mode 100644 index 00000000..1ff7052b --- /dev/null +++ b/teos/src/tx_index.rs @@ -0,0 +1,401 @@ +use std::collections::{HashMap, VecDeque}; +use std::fmt; +use std::hash::Hash; + +use bitcoin::hash_types::BlockHash; +use bitcoin::{BlockHeader, Transaction, Txid}; +use lightning_block_sync::poll::ValidatedBlock; + +use teos_common::appointment::Locator; + +/// A trait implemented by types that can be used as key in a [TxIndex]. +pub trait Key: Hash { + fn from_txid(txid: Txid) -> Self; +} + +impl Key for Txid { + fn from_txid(txid: Txid) -> Self { + txid + } +} + +impl Key for Locator { + fn from_txid(txid: Txid) -> Self { + Locator::new(txid) + } +} + +pub enum Type { + Transaction, + BlockHash, +} + +pub enum Data { + Transaction(Transaction), + BlockHash(BlockHash), +} + +impl fmt::Display for Data { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Data::Transaction(_) => write!(f, "Transaction"), + Data::BlockHash(_) => write!(f, "BlockHash"), + } + } +} + +/// A trait implemented by types that can be used as value in a [TxIndex]. +pub trait Value { + fn get_type() -> Type; + fn from_data(d: Data) -> Self; +} + +impl Value for BlockHash { + fn get_type() -> Type { + Type::BlockHash + } + + fn from_data(d: Data) -> Self { + match d { + Data::BlockHash(b) => b, + other => panic!("Cannot build a BlockHash from {}", other), + } + } +} + +impl Value for Transaction { + fn get_type() -> Type { + Type::Transaction + } + + fn from_data(d: Data) -> Self { + match d { + Data::Transaction(t) => t, + other => panic!("Cannot build a BlockHash from {}", other), + } + } +} + +/// Data structure used to index locators computed from parsed blocks. +/// +/// Holds up to `size` blocks with their corresponding computed [Locator]s. +#[derive(Debug)] +pub struct TxIndex { + /// A [K]:[V] map. + index: HashMap, + /// Vector of block hashes covered by the index. + blocks: VecDeque, + /// Map of [BlockHash]:[Vec]. Used to remove data from the index. + tx_in_block: HashMap>, + /// The height of the last block included in the index. + tip: u32, + /// Maximum size of the index. + size: usize, +} + +impl TxIndex +where + K: Key + std::cmp::Eq + Copy, + V: Value + Clone, + Self: Sized, +{ + pub fn new(last_n_blocks: &[ValidatedBlock], height: u32) -> Self { + let size = last_n_blocks.len(); + let mut tx_index = Self { + index: HashMap::new(), + blocks: VecDeque::with_capacity(size), + tx_in_block: HashMap::new(), + tip: height, + size, + }; + + for block in last_n_blocks.iter().rev() { + if let Some(prev_block_hash) = tx_index.blocks.back() { + if block.header.prev_blockhash != *prev_block_hash { + panic!("last_n_blocks contains unchained blocks"); + } + }; + + let map = block + .txdata + .iter() + .map(|tx| { + ( + K::from_txid(tx.txid()), + match V::get_type() { + Type::Transaction => V::from_data(Data::Transaction(tx.clone())), + Type::BlockHash => { + V::from_data(Data::BlockHash(block.header.block_hash())) + } + }, + ) + }) + .collect(); + + tx_index.update(block.header, &map); + } + + tx_index + } + + /// Gets an item from the index if present. [None] otherwise. + pub fn get<'a>(&'a self, k: &'a K) -> Option<&V> { + self.index.get(k) + } + + /// Checks whether the index contains a certain key. + pub fn contains_key(&self, k: &K) -> bool { + self.index.contains_key(k) + } + + /// Checks if the index if full. + pub fn is_full(&self) -> bool { + self.blocks.len() > self.size + } + + /// Get's the height of a given block based on its position in the block queue. + pub fn get_height(&self, block_hash: &BlockHash) -> Option { + let pos = self.blocks.iter().position(|x| x == block_hash)?; + Some(self.tip as usize + pos + 1 - self.blocks.len()) + } + + /// Updates the index by adding data from a new block. Removes the oldest block if the index is full afterwards. + pub fn update(&mut self, block_header: BlockHeader, data: &HashMap) { + self.blocks.push_back(block_header.block_hash()); + + let ks = data + .iter() + .map(|(k, v)| { + self.index.insert(*k, v.clone()); + *k + }) + .collect(); + + self.tx_in_block.insert(block_header.block_hash(), ks); + + if self.is_full() { + // Avoid logging during bootstrap + log::info!("New block added to index: {}", block_header.block_hash()); + self.tip += 1; + self.remove_oldest_block(); + } + } + + /// Fixes the index by removing disconnected data. + pub fn remove_disconnected_block(&mut self, block_hash: &BlockHash) { + if let Some(ks) = self.tx_in_block.remove(block_hash) { + self.index.retain(|k, _| !ks.contains(k)); + + // Blocks should be disconnected from last backwards. Log if that's not the case so we can revisit this and fix it. + if let Some(ref h) = self.blocks.pop_back() { + if h != block_hash { + log::error!("Disconnected block does not match the oldest block stored in the TxIndex ({} != {})", block_hash, h); + } + } + } else { + log::warn!("The index is already empty"); + } + } + + /// Removes the oldest block from the index. + /// This removes data from `self.blocks`, `self.tx_in_block` and `self.index`. + pub fn remove_oldest_block(&mut self) { + let h = self.blocks.pop_front().unwrap(); + let ks = self.tx_in_block.remove(&h).unwrap(); + self.index.retain(|k, _| !ks.contains(k)); + + log::info!("Oldest block removed from index: {}", h); + } +} + +impl fmt::Display for TxIndex { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "index: {:?}\n\nblocks: {:?}\n\ntx_in_block: {:?}\n\nsize: {}", + self.index, self.blocks, self.tx_in_block, self.size + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::ops::Deref; + + use crate::test_utils::{get_last_n_blocks, Blockchain}; + + use bitcoin::Block; + + impl TxIndex + where + K: Key + std::cmp::Eq + Copy, + V: Value + Clone, + Self: Sized, + { + pub fn index_mut(&mut self) -> &mut HashMap { + &mut self.index + } + + pub fn blocks(&self) -> &VecDeque { + &self.blocks + } + } + + #[tokio::test] + async fn test_new() { + let height = 10; + let mut chain = Blockchain::default().with_height(height as usize); + let last_six_blocks = get_last_n_blocks(&mut chain, 6).await; + let blocks: Vec = last_six_blocks + .iter() + .map(|block| block.deref().clone()) + .collect(); + + let cache: TxIndex = TxIndex::new(&last_six_blocks, height); + assert_eq!(blocks.len(), cache.size); + for block in blocks.iter() { + assert!(cache.blocks().contains(&block.block_hash())); + + let mut locators = Vec::new(); + for tx in block.txdata.iter() { + let locator = Locator::new(tx.txid()); + assert!(cache.contains_key(&locator)); + locators.push(locator); + } + + assert_eq!(cache.tx_in_block[&block.block_hash()], locators); + } + } + + #[tokio::test] + async fn test_get_height() { + let cache_size = 10; + let height = 50; + let mut chain = Blockchain::default().with_height_and_txs(height, 42); + let last_n_blocks = get_last_n_blocks(&mut chain, cache_size).await; + + // last_n_blocks is ordered from latest to earliest + let first_block = last_n_blocks.get(cache_size - 1).unwrap(); + let last_block = last_n_blocks.get(0).unwrap(); + let mid = last_n_blocks.get(cache_size / 2).unwrap(); + + let cache: TxIndex = TxIndex::new(&last_n_blocks, height as u32); + + assert_eq!( + cache.get_height(&first_block.block_hash()).unwrap(), + height - cache_size + 1 + ); + assert_eq!(cache.get_height(&last_block.block_hash()).unwrap(), height); + assert_eq!( + cache.get_height(&mid.block_hash()).unwrap(), + height - cache_size / 2 + ); + } + + #[tokio::test] + async fn test_get_height_not_found() { + let cache_size = 10; + let height = 50; + let mut chain = Blockchain::default().with_height_and_txs(height, 42); + let cache: TxIndex = TxIndex::new( + &get_last_n_blocks(&mut chain, cache_size).await, + height as u32, + ); + + let fake_hash = BlockHash::default(); + assert!(matches!(cache.get_height(&fake_hash), None)); + } + + #[tokio::test] + async fn test_update() { + let height = 10; + let mut chain = Blockchain::default().with_height(height as usize); + let mut last_n_blocks = get_last_n_blocks(&mut chain, 7).await; + + // Store the last block to use it for an update and the first to check eviction + // Notice that the list of blocks is ordered from last to first. + let last_block = last_n_blocks.remove(0); + let first_block = last_n_blocks.last().unwrap().deref().clone(); + + // Init the cache with the 6 block before the last + let mut cache = TxIndex::new(&last_n_blocks, height); + + // Update the cache with the last block + let locator_tx_map = last_block + .txdata + .iter() + .map(|tx| (Locator::new(tx.txid()), tx.clone())) + .collect(); + + cache.update(last_block.deref().header, &locator_tx_map); + + // Check that the new data is in the cache + assert!(cache.blocks().contains(&last_block.block_hash())); + for (locator, _) in locator_tx_map.iter() { + assert!(cache.contains_key(locator)); + } + assert_eq!( + cache.tx_in_block[&last_block.block_hash()], + locator_tx_map.keys().cloned().collect::>() + ); + + // Check that the data from the first block has been evicted + assert!(!cache.blocks().contains(&first_block.block_hash())); + for tx in first_block.txdata.iter() { + assert!(!cache.contains_key(&Locator::new(tx.txid()))); + } + assert!(!cache.tx_in_block.contains_key(&first_block.block_hash())); + } + + #[tokio::test] + async fn test_remove_disconnected_block() { + let cache_size = 6; + let height = cache_size * 2; + let mut chain = Blockchain::default().with_height_and_txs(height, 42); + let mut cache: TxIndex = TxIndex::new( + &get_last_n_blocks(&mut chain, cache_size).await, + height as u32, + ); + + // TxIndex::fix removes the last connected block and removes all the associated data + for i in 0..cache_size { + let header = chain + .at_height(chain.get_block_count() as usize - i) + .deref() + .header; + let locators = cache.tx_in_block.get(&header.block_hash()).unwrap().clone(); + + // Make sure there's data regarding the target block in the cache before fixing it + assert_eq!(cache.blocks().len(), cache.size - i); + assert!(cache.blocks().contains(&header.block_hash())); + assert!(!locators.is_empty()); + for locator in locators.iter() { + assert!(cache.contains_key(locator)); + } + + cache.remove_disconnected_block(&header.block_hash()); + + // Check that the block data is not in the cache anymore + assert_eq!(cache.blocks().len(), cache.size - i - 1); + assert!(!cache.blocks().contains(&header.block_hash())); + assert!(cache.tx_in_block.get(&header.block_hash()).is_none()); + for locator in locators.iter() { + assert!(!cache.contains_key(locator)); + } + } + + // At this point the cache should be empty, fixing it further shouldn't do anything + for i in cache_size..cache_size * 2 { + assert!(cache.index.is_empty()); + assert!(cache.blocks().is_empty()); + assert!(cache.tx_in_block.is_empty()); + + let header = chain + .at_height(chain.get_block_count() as usize - i) + .deref() + .header; + cache.remove_disconnected_block(&header.block_hash()); + } + } +} diff --git a/teos/src/watcher.rs b/teos/src/watcher.rs index b1a1cbb4..d08050d9 100644 --- a/teos/src/watcher.rs +++ b/teos/src/watcher.rs @@ -4,12 +4,10 @@ use log; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; -use std::fmt; use std::iter::FromIterator; use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::{Arc, Mutex}; -use bitcoin::hash_types::BlockHash; use bitcoin::secp256k1::SecretKey; use bitcoin::{BlockHeader, Transaction}; use lightning::chain; @@ -24,133 +22,7 @@ use crate::dbm::DBM; use crate::extended_appointment::{AppointmentSummary, ExtendedAppointment, UUID}; use crate::gatekeeper::{Gatekeeper, MaxSlotsReached, UserInfo}; use crate::responder::{ConfirmationStatus, Responder, TransactionTracker}; - -/// Data structure used to cache locators computed from parsed blocks. -/// -/// Holds up to `size` blocks with their corresponding computed [Locator]s. -#[derive(Debug)] -struct LocatorCache { - /// A [Locator]:[Transaction] map. - cache: HashMap, - /// Vector of block hashes corresponding to the cached blocks. - blocks: Vec, - /// Map of [BlockHash]:[Vec]. Used to remove data from the cache. - tx_in_block: HashMap>, - /// Maximum size of the cache. - size: usize, -} - -impl fmt::Display for LocatorCache { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "cache: {:?}\n\nblocks: {:?}\n\ntx_in_block: {:?}\n\nsize: {}", - self.cache, self.blocks, self.tx_in_block, self.size - ) - } -} - -impl LocatorCache { - /// Creates a new [LocatorCache] instance. - /// The cache is initialized using the provided vector of blocks. - /// The size of the cache is defined as the size of `last_n_blocks`. - /// - /// # Panics - /// - /// Panics if any of the blocks in `last_n_blocks` is unchained. That is, if the given blocks - /// are not linked in strict descending order. - fn new(last_n_blocks: Vec) -> LocatorCache { - let size = last_n_blocks.len(); - let mut cache = LocatorCache { - cache: HashMap::new(), - blocks: Vec::with_capacity(size), - tx_in_block: HashMap::new(), - size, - }; - - for block in last_n_blocks.into_iter().rev() { - if let Some(prev_block_hash) = cache.blocks.last() { - if block.header.prev_blockhash != *prev_block_hash { - panic!("last_n_blocks contains unchained blocks"); - } - }; - - let locator_tx_map = block - .txdata - .iter() - .map(|tx| (Locator::new(tx.txid()), tx.clone())) - .collect(); - - cache.update(block.header, &locator_tx_map); - } - - cache - } - - /// Gets a transaction from the cache if present. [None] otherwise. - fn get_tx(&self, locator: Locator) -> Option<&Transaction> { - self.cache.get(&locator) - } - - /// Checks if the cache if full. - fn is_full(&self) -> bool { - self.blocks.len() > self.size - } - - /// Updates the cache by adding data from a new block. Removes the oldest block if the cache is full afterwards. - fn update( - &mut self, - block_header: BlockHeader, - locator_tx_map: &HashMap, - ) { - self.blocks.push(block_header.block_hash()); - - let locators = locator_tx_map - .iter() - .map(|(l, tx)| { - self.cache.insert(*l, tx.clone()); - *l - }) - .collect(); - - self.tx_in_block.insert(block_header.block_hash(), locators); - - if self.is_full() { - // Avoid logging during bootstrap - log::info!("New block added to cache: {}", block_header.block_hash()); - self.remove_oldest_block(); - } - } - - /// Fixes the [LocatorCache] removing disconnected data. - fn fix(&mut self, header: &BlockHeader) { - if let Some(locators) = self.tx_in_block.remove(&header.block_hash()) { - for locator in locators.iter() { - self.cache.remove(locator); - } - - // Blocks should be disconnected from last backwards. Log if that's not the case so we can revisit this and fix it. - if let Some(h) = self.blocks.pop() { - if h != header.block_hash() { - log::error!("Disconnected block does not match the oldest block stored in the LocatorCache ({} != {})", header.block_hash(), h); - } - } - } else { - log::warn!("The cache is already empty"); - } - } - - /// Removes the oldest block from the cache. - /// This removes data from `self.blocks`, `self.tx_in_block` and `self.cache`. - fn remove_oldest_block(&mut self) { - let oldest_hash = self.blocks.remove(0); - for locator in self.tx_in_block.remove(&oldest_hash).unwrap() { - self.cache.remove(&locator); - } - - log::info!("Oldest block removed from cache: {}", oldest_hash); - } -} +use crate::tx_index::TxIndex; /// Structure holding data regarding a breach. /// @@ -241,7 +113,7 @@ pub struct Watcher { /// A map between [Locator]s (user identifiers for [Appointment]s) and [UUID]s (tower identifiers). locator_uuid_map: Mutex>>, /// A cache of the [Locator]s computed for the transactions in the last few blocks. - locator_cache: Mutex, + locator_cache: Mutex>, /// A [Responder] instance. Data will be passed to it once triggered (if valid). responder: Arc, /// A [Gatekeeper] instance. Data regarding users is requested to it. @@ -261,7 +133,7 @@ impl Watcher { pub fn new( gatekeeper: Arc, responder: Arc, - last_n_blocks: Vec, + last_n_blocks: &[ValidatedBlock], last_known_block_height: u32, signing_key: SecretKey, tower_id: TowerId, @@ -282,7 +154,7 @@ impl Watcher { Watcher { appointments: Mutex::new(appointments), locator_uuid_map: Mutex::new(locator_uuid_map), - locator_cache: Mutex::new(LocatorCache::new(last_n_blocks)), + locator_cache: Mutex::new(TxIndex::new(last_n_blocks, last_known_block_height)), responder, gatekeeper, last_known_block_height: AtomicU32::new(last_known_block_height), @@ -362,7 +234,7 @@ impl Watcher { .locator_cache .lock() .unwrap() - .get_tx(extended_appointment.locator()) + .get(&extended_appointment.locator()) { // Appointments that were triggered in blocks held in the cache Some(dispute_tx) => { @@ -879,7 +751,10 @@ impl chain::Listen for Watcher { /// Fixes the [LocatorCache] by removing the disconnected data and updates the last_known_block_height. fn block_disconnected(&self, header: &BlockHeader, height: u32) { log::warn!("Block disconnected: {}", header.block_hash()); - self.locator_cache.lock().unwrap().fix(header); + self.locator_cache + .lock() + .unwrap() + .remove_disconnected_block(&header.block_hash()); self.last_known_block_height .store(height - 1, Ordering::Release); } @@ -982,122 +857,6 @@ mod tests { assert_eq!(TowerId(recovered_pk), tower_id); } - #[tokio::test] - async fn test_cache_new() { - let mut chain = Blockchain::default().with_height(10); - let last_six_blocks = get_last_n_blocks(&mut chain, 6).await; - let blocks: Vec = last_six_blocks - .iter() - .map(|block| block.deref().clone()) - .collect(); - - let cache = LocatorCache::new(last_six_blocks); - assert_eq!(blocks.len(), cache.size); - for block in blocks.iter() { - assert!(cache.blocks.contains(&block.block_hash())); - - let mut locators = Vec::new(); - for tx in block.txdata.iter() { - let locator = Locator::new(tx.txid()); - assert!(cache.cache.contains_key(&locator)); - locators.push(locator); - } - - assert_eq!(cache.tx_in_block[&block.block_hash()], locators); - } - } - - #[tokio::test] - async fn test_cache_update() { - let mut chain = Blockchain::default().with_height(10); - let mut last_n_blocks = get_last_n_blocks(&mut chain, 7).await; - - // Safe the last block to use it for an update and the first to check eviction - // Notice that the list of blocks is ordered from last to first. - let last_block = last_n_blocks.remove(0); - let first_block = last_n_blocks.last().unwrap().deref().clone(); - - // Init the cache with the 6 block before the last - let mut cache = LocatorCache::new(last_n_blocks); - - // Update the cache with the last block - let locator_tx_map = last_block - .txdata - .iter() - .map(|tx| (Locator::new(tx.txid()), tx.clone())) - .collect(); - - cache.update(last_block.deref().header, &locator_tx_map); - - // Check that the new data is in the cache - assert!(cache.blocks.contains(&last_block.block_hash())); - for (locator, _) in locator_tx_map.iter() { - assert!(cache.cache.contains_key(locator)); - } - assert_eq!( - cache.tx_in_block[&last_block.block_hash()], - locator_tx_map.keys().cloned().collect::>() - ); - - // Check that the data from the first block has been evicted - assert!(!cache.blocks.contains(&first_block.block_hash())); - for tx in first_block.txdata.iter() { - assert!(!cache.cache.contains_key(&Locator::new(tx.txid()))); - } - assert!(!cache.tx_in_block.contains_key(&first_block.block_hash())); - } - - #[tokio::test] - async fn test_cache_fix() { - let cache_size = 6; - let mut chain = Blockchain::default().with_height_and_txs(cache_size * 2, 42); - - let last_n_blocks = get_last_n_blocks(&mut chain, cache_size).await; - - // Init the cache with the 6 block before the last - let mut cache = LocatorCache::new(last_n_blocks); - - // LocatorCache::fix removes the last connected block and removes all the associated data - for i in 0..cache_size { - let header = chain - .at_height(chain.get_block_count() as usize - i) - .deref() - .header; - let locators = cache.tx_in_block.get(&header.block_hash()).unwrap().clone(); - - // Make sure there's data regarding the target block in the cache before fixing it - assert_eq!(cache.blocks.len(), cache.size - i); - assert!(cache.blocks.contains(&header.block_hash())); - assert!(!locators.is_empty()); - for locator in locators.iter() { - assert!(cache.cache.contains_key(locator)); - } - - cache.fix(&header); - - // Check that the block data is not in the cache anymore - assert_eq!(cache.blocks.len(), cache.size - i - 1); - assert!(!cache.blocks.contains(&header.block_hash())); - assert!(cache.tx_in_block.get(&header.block_hash()).is_none()); - for locator in locators.iter() { - assert!(!cache.cache.contains_key(locator)); - } - } - - // At this point the cache should be empty, fixing it further shouldn't do anything - for i in cache_size..cache_size * 2 { - assert!(cache.cache.is_empty()); - assert!(cache.blocks.is_empty()); - assert!(cache.tx_in_block.is_empty()); - - let header = chain - .at_height(chain.get_block_count() as usize - i) - .deref() - .header; - cache.fix(&header); - } - } - #[tokio::test] async fn test_new() { // A fresh watcher has no associated data @@ -2122,7 +1881,7 @@ mod tests { .locator_cache .lock() .unwrap() - .blocks + .blocks() .contains(&last_block_header.block_hash())); watcher.block_disconnected(&last_block_header, start_height); @@ -2135,7 +1894,7 @@ mod tests { .locator_cache .lock() .unwrap() - .blocks + .blocks() .contains(&last_block_header.block_hash())); } } From 1788b4d7226e6f9d00dab987cac707bd0596aaf5 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 6 Oct 2022 13:37:40 +0200 Subject: [PATCH 2/3] Adds TxIndex to Responder --- teos/src/carrier.rs | 196 +++++++++++++--------------- teos/src/main.rs | 31 ++++- teos/src/responder.rs | 284 +++++++++++++++++++++++++++++------------ teos/src/test_utils.rs | 129 +++++++------------ teos/src/watcher.rs | 14 +- 5 files changed, 376 insertions(+), 278 deletions(-) diff --git a/teos/src/carrier.rs b/teos/src/carrier.rs index 5ead0d89..6b576e14 100644 --- a/teos/src/carrier.rs +++ b/teos/src/carrier.rs @@ -6,7 +6,7 @@ use std::sync::{Arc, Condvar, Mutex}; use crate::responder::ConfirmationStatus; use crate::{errors, rpc_errors}; -use bitcoin::{BlockHash, Transaction, Txid}; +use bitcoin::{Transaction, Txid}; use bitcoincore_rpc::{ jsonrpc::error::Error::Rpc as RpcError, jsonrpc::error::Error::Transport as TransportError, Client as BitcoindClient, Error::JsonRpc as JsonRpcError, RpcApi, @@ -22,7 +22,7 @@ pub struct Carrier { /// A map of receipts already issued by the [Carrier]. /// Used to prevent potentially re-sending the same transaction over and over. issued_receipts: HashMap, - /// The last known block header. + /// The last known block height. block_height: u32, } @@ -41,6 +41,11 @@ impl Carrier { } } + /// The last known block height. + pub(crate) fn block_height(&self) -> u32 { + self.block_height + } + /// Clears the receipts cached by the [Carrier]. Should be called periodically to prevent it from /// growing unbounded. pub(crate) fn clear_receipts(&mut self) { @@ -100,11 +105,14 @@ impl Carrier { } rpc_errors::RPC_VERIFY_ALREADY_IN_CHAIN => { log::info!( - "Transaction is already in the blockchain: {}. Getting confirmation count", + "Transaction was confirmed long ago, not keeping track of it: {}", tx.txid() ); - ConfirmationStatus::ConfirmedIn(self.get_tx_height(&tx.txid()).unwrap()) + // Given we are not using txindex, if a transaction bounces we cannot get its confirmation count. However, [send_transaction] is guarded by + // checking whether the transaction id can be found in the [Responder]'s [TxIndex], meaning that if the transaction bounces it was confirmed long + // ago (> IRREVOCABLY_RESOLVED), so we don't need to worry about it. + ConfirmationStatus::IrrevocablyResolved } rpc_errors::RPC_DESERIALIZATION_ERROR => { // Adding this here just for completeness. We should never end up here. The Carrier only sends txs handed by the Responder, @@ -139,77 +147,44 @@ impl Carrier { receipt } - /// Gets the block height at where a given [Transaction] was confirmed at (if any). - fn get_tx_height(&self, txid: &Txid) -> Option { - if let Some(block_hash) = self.get_block_hash_for_tx(txid) { - self.get_block_height(&block_hash) - } else { - None - } - } - - /// Queries the height of a given [Block](bitcoin::Block). Returns it if the block can be found. Returns [None] otherwise. - fn get_block_height(&self, block_hash: &BlockHash) -> Option { - self.hang_until_bitcoind_reachable(); - - match self.bitcoin_cli.get_block_header_info(block_hash) { - Ok(header_data) => Some(header_data.height as u32), - Err(JsonRpcError(RpcError(rpcerr))) => match rpcerr.code { - rpc_errors::RPC_INVALID_ADDRESS_OR_KEY => { - log::info!("Block not found: {}", block_hash); - None - } - e => { - log::error!("Unexpected error code when calling getblockheader: {}", e); - None - } - }, - Err(JsonRpcError(TransportError(_))) => { - // Connection refused, bitcoind is down. - log::error!("Connection lost with bitcoind, retrying request when possible"); - self.flag_bitcoind_unreachable(); - self.get_block_height(block_hash) - } - // TODO: This may need finer catching. - Err(e) => { - log::error!("Unexpected JSONRPCError when calling getblockheader: {}", e); - None - } - } - } - - /// Gets the block hash where a given [Transaction] was confirmed at (if any). - pub(crate) fn get_block_hash_for_tx(&self, txid: &Txid) -> Option { + /// Checks whether a given transaction can be found in the mempool. + /// + /// This uses `getrawtransaction` under the hood and, therefore, its behavior depends on whether `txindex` is enabled in bitcoind. + /// If `txindex` is disabled (default), it will only pull data from the mempool. Otherwise, it will also pull data from the transaction + /// index. Hence, we need to check whether the returned struct has any of the block related datum set (such as `blockhash`). + pub(crate) fn in_mempool(&self, txid: &Txid) -> bool { self.hang_until_bitcoind_reachable(); match self.bitcoin_cli.get_raw_transaction_info(txid, None) { - Ok(tx_data) => tx_data.blockhash, + Ok(tx) => tx.blockhash.is_none(), Err(JsonRpcError(RpcError(rpcerr))) => match rpcerr.code { rpc_errors::RPC_INVALID_ADDRESS_OR_KEY => { - log::info!("Transaction not found in mempool nor blockchain: {}", txid); - None + log::info!("Transaction not found in mempool: {}", txid); + false } e => { + // DISCUSS: This could result in a silent error with unknown consequences log::error!( "Unexpected error code when calling getrawtransaction: {}", e ); - None + false } }, Err(JsonRpcError(TransportError(_))) => { // Connection refused, bitcoind is down. log::error!("Connection lost with bitcoind, retrying request when possible"); self.flag_bitcoind_unreachable(); - self.get_block_hash_for_tx(txid) + self.in_mempool(txid) } // TODO: This may need finer catching. Err(e) => { + // DISCUSS: This could result in a silent error with unknown consequences log::error!( "Unexpected JSONRPCError when calling getrawtransaction: {}", e ); - None + false } } } @@ -221,7 +196,7 @@ mod tests { use std::thread; use crate::test_utils::{get_random_tx, start_server, BitcoindMock, MockOptions, START_HEIGHT}; - use teos_common::test_utils::TX_HEX; + use teos_common::test_utils::{TXID_HEX, TX_HEX}; use bitcoin::consensus; use bitcoin::hashes::hex::FromHex; @@ -241,11 +216,10 @@ mod tests { #[test] fn test_clear_receipts() { - let bitcoind_mock = BitcoindMock::new(MockOptions::empty()); + let bitcoind_mock = BitcoindMock::new(MockOptions::default()); let bitcoind_reachable = Arc::new((Mutex::new(true), Condvar::new())); let bitcoin_cli = Arc::new(BitcoindClient::new(bitcoind_mock.url(), Auth::None).unwrap()); let start_height = START_HEIGHT as u32; - start_server(bitcoind_mock.server); let mut carrier = Carrier::new(bitcoin_cli, bitcoind_reachable, start_height); @@ -265,7 +239,25 @@ mod tests { #[test] fn test_send_transaction_ok() { - let bitcoind_mock = BitcoindMock::new(MockOptions::empty()); + let bitcoind_mock = BitcoindMock::new(MockOptions::default()); + let bitcoind_reachable = Arc::new((Mutex::new(true), Condvar::new())); + let bitcoin_cli = Arc::new(BitcoindClient::new(bitcoind_mock.url(), Auth::None).unwrap()); + let start_height = START_HEIGHT as u32; + start_server(bitcoind_mock.server); + + let mut carrier = Carrier::new(bitcoin_cli, bitcoind_reachable, start_height); + let tx = consensus::deserialize(&Vec::from_hex(TX_HEX).unwrap()).unwrap(); + let r = carrier.send_transaction(&tx); + + assert_eq!(r, ConfirmationStatus::InMempoolSince(start_height)); + + // Check the receipt is on the cache + assert_eq!(carrier.issued_receipts.get(&tx.txid()).unwrap(), &r); + } + + #[test] + fn test_send_transaction_ok_already_in_mempool() { + let bitcoind_mock = BitcoindMock::new(MockOptions::in_mempool()); let bitcoind_reachable = Arc::new((Mutex::new(true), Condvar::new())); let bitcoin_cli = Arc::new(BitcoindClient::new(bitcoind_mock.url(), Auth::None).unwrap()); let start_height = START_HEIGHT as u32; @@ -328,20 +320,19 @@ mod tests { #[test] fn test_send_transaction_verify_already_in_chain() { - let bitcoind_mock = BitcoindMock::new(MockOptions::new( + let bitcoind_mock = BitcoindMock::new(MockOptions::with_error( rpc_errors::RPC_VERIFY_ALREADY_IN_CHAIN as i64, - BlockHash::default(), - START_HEIGHT, )); let bitcoind_reachable = Arc::new((Mutex::new(true), Condvar::new())); let bitcoin_cli = Arc::new(BitcoindClient::new(bitcoind_mock.url(), Auth::None).unwrap()); let start_height = START_HEIGHT as u32; start_server(bitcoind_mock.server); + let mut carrier = Carrier::new(bitcoin_cli, bitcoind_reachable, start_height); let tx = consensus::deserialize(&Vec::from_hex(TX_HEX).unwrap()).unwrap(); let r = carrier.send_transaction(&tx); - assert_eq!(r, ConfirmationStatus::ConfirmedIn(start_height)); + assert_eq!(r, ConfirmationStatus::IrrevocablyResolved); // Check the receipt is on the cache assert_eq!(carrier.issued_receipts.get(&tx.txid()).unwrap(), &r); @@ -372,7 +363,7 @@ mod tests { #[test] fn test_send_transaction_connection_error() { // Try to connect to an offline bitcoind. - let bitcoind_mock = BitcoindMock::new(MockOptions::empty()); + let bitcoind_mock = BitcoindMock::new(MockOptions::default()); let bitcoind_reachable = Arc::new((Mutex::new(false), Condvar::new())); let bitcoin_cli = Arc::new(BitcoindClient::new(bitcoind_mock.url(), Auth::None).unwrap()); let start_height = START_HEIGHT as u32; @@ -399,91 +390,86 @@ mod tests { } #[test] - fn test_get_tx_height_ok() { - let target_height = 21; - let bitcoind_mock = - BitcoindMock::new(MockOptions::with_block(BlockHash::default(), target_height)); + fn test_in_mempool() { + let bitcoind_mock = BitcoindMock::new(MockOptions::in_mempool()); let bitcoind_reachable = Arc::new((Mutex::new(true), Condvar::new())); let bitcoin_cli = Arc::new(BitcoindClient::new(bitcoind_mock.url(), Auth::None).unwrap()); let start_height = START_HEIGHT as u32; start_server(bitcoind_mock.server); let carrier = Carrier::new(bitcoin_cli, bitcoind_reachable, start_height); - let tx = consensus::deserialize::(&Vec::from_hex(TX_HEX).unwrap()).unwrap(); - assert_eq!( - carrier.get_tx_height(&tx.txid()), - Some(target_height as u32) - ); + let txid = Txid::from_hex(TXID_HEX).unwrap(); + assert!(carrier.in_mempool(&txid)); } #[test] - fn test_get_tx_height_not_found() { - // Hee we are not testing the case where the block hash is unknown (which will also return None). This is because we only - // learn block hashes from bitcoind, and once a block is known, it cannot disappear (ir can be disconnected, but not banish). - let bitcoind_mock = BitcoindMock::new(MockOptions::empty()); + fn test_not_in_mempool() { + let bitcoind_mock = BitcoindMock::new(MockOptions::default()); let bitcoind_reachable = Arc::new((Mutex::new(true), Condvar::new())); let bitcoin_cli = Arc::new(BitcoindClient::new(bitcoind_mock.url(), Auth::None).unwrap()); let start_height = START_HEIGHT as u32; start_server(bitcoind_mock.server); let carrier = Carrier::new(bitcoin_cli, bitcoind_reachable, start_height); - let tx = consensus::deserialize::(&Vec::from_hex(TX_HEX).unwrap()).unwrap(); - assert_eq!(carrier.get_tx_height(&tx.txid()), None); + let txid = Txid::from_hex(TXID_HEX).unwrap(); + assert!(!carrier.in_mempool(&txid)); } #[test] - fn test_get_block_height_ok() { - let target_height = 21; - let block_hash = BlockHash::default(); - let bitcoind_mock = BitcoindMock::new(MockOptions::with_block(block_hash, target_height)); + fn test_not_in_mempool_via_error() { + let bitcoind_mock = BitcoindMock::new(MockOptions::with_error( + rpc_errors::RPC_INVALID_ADDRESS_OR_KEY as i64, + )); let bitcoind_reachable = Arc::new((Mutex::new(true), Condvar::new())); let bitcoin_cli = Arc::new(BitcoindClient::new(bitcoind_mock.url(), Auth::None).unwrap()); let start_height = START_HEIGHT as u32; start_server(bitcoind_mock.server); let carrier = Carrier::new(bitcoin_cli, bitcoind_reachable, start_height); - assert_eq!( - carrier.get_block_height(&block_hash), - Some(target_height as u32) - ); + let txid = Txid::from_hex(TXID_HEX).unwrap(); + assert!(!carrier.in_mempool(&txid)); } #[test] - fn test_get_block_height_not_found() { - let bitcoind_mock = BitcoindMock::new(MockOptions::empty()); + fn test_in_mempool_unexpected_error() { + let bitcoind_mock = + BitcoindMock::new(MockOptions::with_error(rpc_errors::RPC_MISC_ERROR as i64)); let bitcoind_reachable = Arc::new((Mutex::new(true), Condvar::new())); let bitcoin_cli = Arc::new(BitcoindClient::new(bitcoind_mock.url(), Auth::None).unwrap()); let start_height = START_HEIGHT as u32; start_server(bitcoind_mock.server); let carrier = Carrier::new(bitcoin_cli, bitcoind_reachable, start_height); - assert_eq!(carrier.get_block_height(&BlockHash::default()), None); + let txid = Txid::from_hex(TXID_HEX).unwrap(); + assert!(!carrier.in_mempool(&txid)); } #[test] - fn test_get_block_hash_for_tx_ok() { - let block_hash = BlockHash::default(); - let bitcoind_mock = BitcoindMock::new(MockOptions::with_block(block_hash, 21)); - let bitcoind_reachable = Arc::new((Mutex::new(true), Condvar::new())); + fn test_in_mempool_connection_error() { + // Try to connect to an offline bitcoind. + let bitcoind_mock = BitcoindMock::new(MockOptions::default()); + let bitcoind_reachable = Arc::new((Mutex::new(false), Condvar::new())); let bitcoin_cli = Arc::new(BitcoindClient::new(bitcoind_mock.url(), Auth::None).unwrap()); let start_height = START_HEIGHT as u32; - start_server(bitcoind_mock.server); + let carrier = Carrier::new(bitcoin_cli, bitcoind_reachable.clone(), start_height); - let tx = consensus::deserialize::(&Vec::from_hex(TX_HEX).unwrap()).unwrap(); - let carrier = Carrier::new(bitcoin_cli, bitcoind_reachable, start_height); - assert_eq!(carrier.get_block_hash_for_tx(&tx.txid()), Some(block_hash)); - } + let txid = Txid::from_hex(TXID_HEX).unwrap(); + let delay = std::time::Duration::new(3, 0); - #[test] - fn test_get_block_hash_for_tx_not_found() { - let bitcoind_mock = BitcoindMock::new(MockOptions::empty()); - let bitcoind_reachable = Arc::new((Mutex::new(true), Condvar::new())); - let bitcoin_cli = Arc::new(BitcoindClient::new(bitcoind_mock.url(), Auth::None).unwrap()); - let start_height = START_HEIGHT as u32; - start_server(bitcoind_mock.server); + thread::spawn(move || { + thread::sleep(delay); + let (reachable, notifier) = &*bitcoind_reachable; + *reachable.lock().unwrap() = true; + notifier.notify_all(); + }); - let tx = consensus::deserialize::(&Vec::from_hex(TX_HEX).unwrap()).unwrap(); - let carrier = Carrier::new(bitcoin_cli, bitcoind_reachable, start_height); - assert_eq!(carrier.get_block_hash_for_tx(&tx.txid()), None); + let before = std::time::Instant::now(); + carrier.in_mempool(&txid); + + // Check the request has hanged for ~delay + assert_eq!( + (std::time::Instant::now() - before).as_secs(), + delay.as_secs() + ); } } diff --git a/teos/src/main.rs b/teos/src/main.rs index de9f4128..cb0bfca7 100644 --- a/teos/src/main.rs +++ b/teos/src/main.rs @@ -32,6 +32,7 @@ use teos::responder::Responder; use teos::tls::tls_init; use teos::watcher::Watcher; +use teos_common::constants::IRREVOCABLY_RESOLVED; use teos_common::cryptography::get_random_keypair; use teos_common::TowerId; @@ -44,7 +45,7 @@ where B: DerefMut + Sized + Send + Sync, T: BlockSource, { - let mut last_n_blocks = Vec::new(); + let mut last_n_blocks = Vec::with_capacity(n); for _ in 0..n { let block = poller.fetch_block(&last_known_block).await.unwrap(); last_known_block = poller @@ -187,6 +188,20 @@ async fn main() { } else { validate_best_block_header(&mut derefed).await.unwrap() }; + + // DISCUSS: This is not really required (and only triggered in regtest). This is only in place so the caches can be + // populated with enough blocks mainly because the size of the cache is based on the amount of blocks passed when initializing. + // However, we could add an additional parameter to specify the size of the cache, and initialize with however may blocks we + // could pull from the backend. Adding this functionality just for regtest seemed unnecessary though, hence the check. + if tip.height < IRREVOCABLY_RESOLVED { + log::error!( + "Not enough blocks to start teosd (required: {}). Mine at least {} more", + IRREVOCABLY_RESOLVED, + IRREVOCABLY_RESOLVED - tip.height + ); + std::process::exit(1); + } + log::info!("Last known block: {}", tip.header.block_hash()); // This is how chain poller names bitcoin networks. @@ -197,7 +212,7 @@ async fn main() { }; let mut poller = ChainPoller::new(&mut derefed, Network::from_str(btc_network).unwrap()); - let last_n_blocks = get_last_n_blocks(&mut poller, tip, 6).await; + let last_n_blocks = get_last_n_blocks(&mut poller, tip, IRREVOCABLY_RESOLVED as usize).await; // Build components let gatekeeper = Arc::new(Gatekeeper::new( @@ -208,12 +223,18 @@ async fn main() { dbm.clone(), )); - let carrier = Carrier::new(rpc, bitcoind_reachable.clone(), tip.deref().height); - let responder = Arc::new(Responder::new(carrier, gatekeeper.clone(), dbm.clone())); + let carrier = Carrier::new(rpc, bitcoind_reachable.clone(), tip.height); + let responder = Arc::new(Responder::new( + &last_n_blocks, + tip.height, + carrier, + gatekeeper.clone(), + dbm.clone(), + )); let watcher = Arc::new(Watcher::new( gatekeeper.clone(), responder.clone(), - &last_n_blocks, + &last_n_blocks[0..6], tip.height, tower_sk, TowerId(tower_pk), diff --git a/teos/src/responder.rs b/teos/src/responder.rs index f2ba7a0f..6616949e 100644 --- a/teos/src/responder.rs +++ b/teos/src/responder.rs @@ -4,9 +4,10 @@ use std::collections::{HashMap, HashSet}; use std::iter::FromIterator; use std::sync::{Arc, Mutex}; -use bitcoin::consensus; +use bitcoin::{consensus, BlockHash}; use bitcoin::{BlockHeader, Transaction, Txid}; use lightning::chain; +use lightning_block_sync::poll::ValidatedBlock; use teos_common::constants; use teos_common::protos as common_msgs; @@ -16,6 +17,7 @@ use crate::carrier::Carrier; use crate::dbm::DBM; use crate::extended_appointment::UUID; use crate::gatekeeper::{Gatekeeper, UserInfo}; +use crate::tx_index::TxIndex; use crate::watcher::Breach; /// Number of missed confirmations to wait before rebroadcasting a transaction. @@ -26,6 +28,7 @@ const CONFIRMATIONS_BEFORE_RETRY: u8 = 6; pub enum ConfirmationStatus { ConfirmedIn(u32), InMempoolSince(u32), + IrrevocablyResolved, Rejected(i32), ReorgedOut, } @@ -59,6 +62,14 @@ impl ConfirmationStatus { None } } + + /// Whether the transaction was accepted by the underlying node. + pub fn accepted(&self) -> bool { + matches!( + self, + ConfirmationStatus::ConfirmedIn(_) | &ConfirmationStatus::InMempoolSince(_) + ) + } } /// Minimal data required in memory to keep track of transaction trackers. @@ -130,6 +141,8 @@ pub struct Responder { trackers: Mutex>, /// A map between [Txid]s and [UUID]s. tx_tracker_map: Mutex>>, + /// A local, pruned, [TxIndex] used to avoid the need of `txindex=1`. + tx_index: Mutex>, /// A [Carrier] instance. Data is sent to the `bitcoind` through it. carrier: Mutex, /// A [Gatekeeper] instance. Data regarding users is requested to it. @@ -140,7 +153,13 @@ pub struct Responder { impl Responder { /// Creates a new [Responder] instance. - pub fn new(carrier: Carrier, gatekeeper: Arc, dbm: Arc>) -> Self { + pub fn new( + last_n_blocs: &[ValidatedBlock], + last_known_block_height: u32, + carrier: Carrier, + gatekeeper: Arc, + dbm: Arc>, + ) -> Self { let mut trackers = HashMap::new(); let mut tx_tracker_map: HashMap> = HashMap::new(); @@ -158,6 +177,7 @@ impl Responder { carrier: Mutex::new(carrier), trackers: Mutex::new(trackers), tx_tracker_map: Mutex::new(tx_tracker_map), + tx_index: Mutex::new(TxIndex::new(last_n_blocs, last_known_block_height)), dbm, gatekeeper, } @@ -189,12 +209,20 @@ impl Responder { return tracker.status; } - let status = self - .carrier - .lock() - .unwrap() - .send_transaction(&breach.penalty_tx); - if !matches!(status, ConfirmationStatus::Rejected { .. }) { + let mut carrier = self.carrier.lock().unwrap(); + let tx_index = self.tx_index.lock().unwrap(); + + // Check whether the transaction is in mempool or part of our internal txindex. Send it to our node otherwise. + let status = if carrier.in_mempool(&breach.penalty_tx.txid()) { + // If it's in mempool we assume it was just included + ConfirmationStatus::InMempoolSince(carrier.block_height()) + } else if let Some(block_hash) = tx_index.get(&breach.penalty_tx.txid()) { + ConfirmationStatus::ConfirmedIn(tx_index.get_height(block_hash).unwrap() as u32) + } else { + carrier.send_transaction(&breach.penalty_tx) + }; + + if status.accepted() { self.add_tracker(uuid, breach, user_id, status); } @@ -377,12 +405,15 @@ impl Responder { let mut trackers = self.trackers.lock().unwrap(); let mut carrier = self.carrier.lock().unwrap(); + let tx_index = self.tx_index.lock().unwrap(); for (uuid, (penalty_tx, dispute_tx)) in txs.into_iter() { let status = if let Some(dispute_tx) = dispute_tx { - // The tracker was reorged out, and the dispute may potentially not be in the chain anymore. - if carrier.get_block_hash_for_tx(&dispute_tx.txid()).is_some() { - // Dispute tx is on chain, so we only need to care about the penalty + // The tracker was reorged out, and the dispute may potentially not be in the chain (or mempool) anymore. + if tx_index.contains_key(&dispute_tx.txid()) + | carrier.in_mempool(&dispute_tx.txid()) + { + // Dispute tx is on chain (or mempool), so we only need to care about the penalty carrier.send_transaction(&penalty_tx) } else { // Dispute tx has also been reorged out, meaning that both transactions need to be broadcast. @@ -503,7 +534,13 @@ impl chain::Listen for Responder { log::info!("New block received: {}", header.block_hash()); self.carrier.lock().unwrap().update_height(height); - if self.trackers.lock().unwrap().len() > 0 { + let txs = txdata + .iter() + .map(|(_, tx)| (tx.txid(), header.block_hash())) + .collect(); + self.tx_index.lock().unwrap().update(*header, &txs); + + if !self.trackers.lock().unwrap().is_empty() { // Complete those appointments that are due at this height let completed_trackers = self.check_confirmations( &txdata.iter().map(|(_, tx)| tx.txid()).collect::>(), @@ -555,6 +592,10 @@ impl chain::Listen for Responder { fn block_disconnected(&self, header: &BlockHeader, height: u32) { log::warn!("Block disconnected: {}", header.block_hash()); self.carrier.lock().unwrap().update_height(height); + self.tx_index + .lock() + .unwrap() + .remove_disconnected_block(&header.block_hash()); for tracker in self.trackers.lock().unwrap().values_mut() { // The transaction has been unconfirmed. Flag it as reorged out so we can rebroadcast it. @@ -570,19 +611,19 @@ mod tests { use super::*; use lightning::chain::Listen; - use std::ops::Deref; use std::sync::{Arc, Mutex}; use crate::dbm::DBM; use crate::gatekeeper::UserInfo; use crate::rpc_errors; use crate::test_utils::{ - create_carrier, generate_dummy_appointment_with_user, generate_uuid, get_random_breach, - get_random_tracker, get_random_tx, store_appointment_and_fks_to_db, BitcoindStopper, - Blockchain, MockedServerQuery, AVAILABLE_SLOTS, DURATION, EXPIRY_DELTA, SLOTS, - START_HEIGHT, SUBSCRIPTION_EXPIRY, SUBSCRIPTION_START, + create_carrier, generate_dummy_appointment_with_user, generate_uuid, get_last_n_blocks, + get_random_breach, get_random_tracker, get_random_tx, store_appointment_and_fks_to_db, + BitcoindStopper, Blockchain, MockedServerQuery, AVAILABLE_SLOTS, DURATION, EXPIRY_DELTA, + SLOTS, START_HEIGHT, SUBSCRIPTION_EXPIRY, SUBSCRIPTION_START, }; + use teos_common::constants::IRREVOCABLY_RESOLVED; use teos_common::dbm::Error as DBError; use teos_common::test_utils::get_random_user_id; @@ -640,20 +681,30 @@ mod tests { } } - fn create_responder( - chain: &Blockchain, + async fn create_responder( + chain: &mut Blockchain, gatekeeper: Arc, dbm: Arc>, query: MockedServerQuery, ) -> (Responder, BitcoindStopper) { - let tip = chain.tip(); - let (carrier, bitcoind_stopper) = create_carrier(query, tip.deref().height); - (Responder::new(carrier, gatekeeper, dbm), bitcoind_stopper) + let height = if chain.tip().height < IRREVOCABLY_RESOLVED { + chain.tip().height + } else { + IRREVOCABLY_RESOLVED + }; + + let last_n_blocks = get_last_n_blocks(chain, height as usize).await; + + let (carrier, bitcoind_stopper) = create_carrier(query, chain.tip().height); + ( + Responder::new(&last_n_blocks, chain.tip().height, carrier, gatekeeper, dbm), + bitcoind_stopper, + ) } - fn init_responder_with_chain_and_dbm( + async fn init_responder_with_chain_and_dbm( mocked_query: MockedServerQuery, - chain: &Blockchain, + chain: &mut Blockchain, dbm: Arc>, ) -> (Responder, BitcoindStopper) { let gk = Gatekeeper::new( @@ -663,13 +714,13 @@ mod tests { EXPIRY_DELTA, dbm.clone(), ); - create_responder(chain, Arc::new(gk), dbm, mocked_query) + create_responder(chain, Arc::new(gk), dbm, mocked_query).await } - fn init_responder(mocked_query: MockedServerQuery) -> (Responder, BitcoindStopper) { + async fn init_responder(mocked_query: MockedServerQuery) -> (Responder, BitcoindStopper) { let dbm = Arc::new(Mutex::new(DBM::in_memory().unwrap())); - let chain = Blockchain::default().with_height_and_txs(START_HEIGHT, 10); - init_responder_with_chain_and_dbm(mocked_query, &chain, dbm) + let mut chain = Blockchain::default().with_height_and_txs(START_HEIGHT, 10); + init_responder_with_chain_and_dbm(mocked_query, &mut chain, dbm).await } #[test] @@ -712,13 +763,14 @@ mod tests { assert_eq!(ConfirmationStatus::ReorgedOut.to_db_data(), None); } - #[test] - fn test_responder_new() { + #[tokio::test] + async fn test_responder_new() { // A fresh responder has no associated data - let chain = Blockchain::default().with_height(START_HEIGHT); + let mut chain = Blockchain::default().with_height(START_HEIGHT); let dbm = Arc::new(Mutex::new(DBM::in_memory().unwrap())); let (responder, _s) = - init_responder_with_chain_and_dbm(MockedServerQuery::Regular, &chain, dbm.clone()); + init_responder_with_chain_and_dbm(MockedServerQuery::Regular, &mut chain, dbm.clone()) + .await; assert!(responder.is_fresh()); // If we add some trackers to the system and create a new Responder reusing the same db @@ -740,15 +792,15 @@ mod tests { // Create a new Responder reusing the same DB and check that the data is loaded let (another_r, _) = - init_responder_with_chain_and_dbm(MockedServerQuery::Regular, &chain, dbm); + init_responder_with_chain_and_dbm(MockedServerQuery::Regular, &mut chain, dbm).await; assert!(!responder.is_fresh()); assert_eq!(responder, another_r); } - #[test] - fn test_handle_breach_delivered() { + #[tokio::test] + async fn test_handle_breach_accepted() { let start_height = START_HEIGHT as u32; - let (responder, _s) = init_responder(MockedServerQuery::Regular); + let (responder, _s) = init_responder(MockedServerQuery::Regular).await; let user_id = get_random_user_id(); let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); @@ -792,11 +844,82 @@ mod tests { .contains_key(&another_breach.penalty_tx.txid())); } - #[test] - fn test_handle_breach_rejected() { + #[tokio::test] + async fn test_handle_breach_accepted_in_mempool() { + let start_height = START_HEIGHT as u32; + let (responder, _s) = init_responder(MockedServerQuery::InMempoool).await; + + let user_id = get_random_user_id(); + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + store_appointment_and_fks_to_db(&responder.dbm.lock().unwrap(), uuid, &appointment); + + let breach = get_random_breach(); + let penalty_txid = breach.penalty_tx.txid(); + + assert_eq!( + responder.handle_breach(uuid, breach, user_id), + ConfirmationStatus::InMempoolSince(start_height) + ); + assert!(responder.trackers.lock().unwrap().contains_key(&uuid)); + assert_eq!( + responder.trackers.lock().unwrap()[&uuid].status, + ConfirmationStatus::InMempoolSince(start_height) + ); + assert!(responder + .tx_tracker_map + .lock() + .unwrap() + .contains_key(&penalty_txid)); + } + + #[tokio::test] + async fn test_handle_breach_accepted_in_txindex() { + let (responder, _s) = init_responder(MockedServerQuery::Regular).await; + + let user_id = get_random_user_id(); + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + store_appointment_and_fks_to_db(&responder.dbm.lock().unwrap(), uuid, &appointment); + + let breach = get_random_breach(); + let penalty_txid = breach.penalty_tx.txid(); + + // Add the tx to our txindex + let target_block_hash = *responder.tx_index.lock().unwrap().blocks().get(2).unwrap(); + responder + .tx_index + .lock() + .unwrap() + .index_mut() + .insert(penalty_txid, target_block_hash); + let target_height = responder + .tx_index + .lock() + .unwrap() + .get_height(&target_block_hash) + .unwrap() as u32; + + assert_eq!( + responder.handle_breach(uuid, breach, user_id), + ConfirmationStatus::ConfirmedIn(target_height) + ); + assert!(responder.trackers.lock().unwrap().contains_key(&uuid)); + assert_eq!( + responder.trackers.lock().unwrap()[&uuid].status, + ConfirmationStatus::ConfirmedIn(target_height) + ); + assert!(responder + .tx_tracker_map + .lock() + .unwrap() + .contains_key(&penalty_txid)); + } + + #[tokio::test] + async fn test_handle_breach_rejected() { let (responder, _s) = init_responder(MockedServerQuery::Error( rpc_errors::RPC_VERIFY_ERROR as i64, - )); + )) + .await; let user_id = get_random_user_id(); let uuid = generate_uuid(); @@ -815,9 +938,9 @@ mod tests { .contains_key(&penalty_txid)); } - #[test] - fn test_add_tracker() { - let (responder, _s) = init_responder(MockedServerQuery::Regular); + #[tokio::test] + async fn test_add_tracker() { + let (responder, _s) = init_responder(MockedServerQuery::Regular).await; let start_height = START_HEIGHT as u32; // Add the necessary FKs in the database @@ -937,12 +1060,12 @@ mod tests { ); } - #[test] - fn test_has_tracker() { + #[tokio::test] + async fn test_has_tracker() { // Has tracker should return true as long as the given tracker is held by the Responder. // As long as the tracker is in Responder.trackers and Responder.tx_tracker_map, the return // must be true. - let (responder, _s) = init_responder(MockedServerQuery::Regular); + let (responder, _s) = init_responder(MockedServerQuery::Regular).await; // Add a new tracker let user_id = get_random_user_id(); @@ -968,11 +1091,11 @@ mod tests { assert!(!responder.has_tracker(uuid)); } - #[test] - fn test_get_tracker() { + #[tokio::test] + async fn test_get_tracker() { // Should return a tracker as long as it exists let start_height = START_HEIGHT as u32; - let (responder, _s) = init_responder(MockedServerQuery::Regular); + let (responder, _s) = init_responder(MockedServerQuery::Regular).await; // Store the user and the appointment in the database so we can add the tracker later on (due to FK restrictions) let user_id = get_random_user_id(); @@ -1008,9 +1131,9 @@ mod tests { assert_eq!(responder.get_tracker(uuid), None); } - #[test] - fn test_check_confirmations() { - let (responder, _s) = init_responder(MockedServerQuery::Regular); + #[tokio::test] + async fn test_check_confirmations() { + let (responder, _s) = init_responder(MockedServerQuery::Regular).await; let target_height = (START_HEIGHT * 2) as u32; // Unconfirmed transactions that miss a confirmation will be added to missed_confirmations (if not there) or their missed confirmation count till be increased @@ -1114,9 +1237,9 @@ mod tests { } } - #[test] - fn test_get_txs_to_rebroadcast() { - let (responder, _s) = init_responder(MockedServerQuery::Regular); + #[tokio::test] + async fn test_get_txs_to_rebroadcast() { + let (responder, _s) = init_responder(MockedServerQuery::Regular).await; let current_height = 100; let user_id = get_random_user_id(); @@ -1161,10 +1284,10 @@ mod tests { assert_eq!(responder.get_txs_to_rebroadcast(current_height), txs); } - #[test] - fn test_get_txs_to_rebroadcast_reorged() { + #[tokio::test] + async fn test_get_txs_to_rebroadcast_reorged() { // For reorged transactions this works a bit different, the dispute transaction will also be returned here - let (responder, _s) = init_responder(MockedServerQuery::Regular); + let (responder, _s) = init_responder(MockedServerQuery::Regular).await; let current_height = 100; let user_id = get_random_user_id(); @@ -1202,7 +1325,7 @@ mod tests { // Since we are adding trackers using add_trackers we'll need to manually change the state of the transaction // (reorged transactions are not passed to add_tracker, they are detected after they are already there). - // Not doing should will trigger an error in the dbm since reorged transactions are not stored in the db. + // Not doing so will trigger an error in the dbm since reorged transactions are not stored in the db. if i % 2 == 0 { responder .trackers @@ -1223,9 +1346,9 @@ mod tests { assert_eq!(responder.get_txs_to_rebroadcast(current_height), txs); } - #[test] - fn test_get_outdated_trackers() { - let (responder, _s) = init_responder(MockedServerQuery::Regular); + #[tokio::test] + async fn test_get_outdated_trackers() { + let (responder, _s) = init_responder(MockedServerQuery::Regular).await; // Outdated trackers are those whose associated subscription is outdated and have not been confirmed yet (they don't have // a single confirmation). @@ -1271,11 +1394,11 @@ mod tests { ); } - #[test] - fn test_rebroadcast_accepted() { + #[tokio::test] + async fn test_rebroadcast_accepted() { // This test positive rebroadcast cases, including reorgs. However, complex reorg logic is not tested here, it will need a // dedicated test (against bitcoind, not mocked). - let (responder, _s) = init_responder(MockedServerQuery::Regular); + let (responder, _s) = init_responder(MockedServerQuery::Regular).await; let current_height = 100; // Add user to the database @@ -1339,13 +1462,14 @@ mod tests { assert!(rejected.is_empty()); } - #[test] - fn test_rebroadcast_rejected() { + #[tokio::test] + async fn test_rebroadcast_rejected() { // This test negative rebroadcast cases, including reorgs. However, complex reorg logic is not tested here, it will need a // dedicated test (against bitcoind, not mocked). let (responder, _s) = init_responder(MockedServerQuery::Error( rpc_errors::RPC_VERIFY_ERROR as i64, - )); + )) + .await; let current_height = 100; // Add user to the database @@ -1408,9 +1532,9 @@ mod tests { assert!(accepted.is_empty()); } - #[test] - fn test_delete_trackers_from_memory() { - let (responder, _s) = init_responder(MockedServerQuery::Regular); + #[tokio::test] + async fn test_delete_trackers_from_memory() { + let (responder, _s) = init_responder(MockedServerQuery::Regular).await; // Add user to the database let user_id = get_random_user_id(); @@ -1465,9 +1589,9 @@ mod tests { } } - #[test] - fn test_delete_trackers() { - let (responder, _s) = init_responder(MockedServerQuery::Regular); + #[tokio::test] + async fn test_delete_trackers() { + let (responder, _s) = init_responder(MockedServerQuery::Regular).await; // Add user to the database let user_id = get_random_user_id(); @@ -1601,13 +1725,13 @@ mod tests { } } - #[test] - fn test_filtered_block_connected() { + #[tokio::test] + async fn test_filtered_block_connected() { let dbm = Arc::new(Mutex::new(DBM::in_memory().unwrap())); let start_height = START_HEIGHT * 2; let mut chain = Blockchain::default().with_height(start_height); let (responder, _s) = - init_responder_with_chain_and_dbm(MockedServerQuery::Regular, &chain, dbm); + init_responder_with_chain_and_dbm(MockedServerQuery::Regular, &mut chain, dbm).await; // block_connected is used to keep track of the confirmation received (or missed) by the trackers the Responder // is keeping track of. @@ -1846,12 +1970,12 @@ mod tests { ); } - #[test] - fn test_block_disconnected() { + #[tokio::test] + async fn test_block_disconnected() { let dbm = Arc::new(Mutex::new(DBM::in_memory().unwrap())); - let chain = Blockchain::default().with_height_and_txs(START_HEIGHT, 10); + let mut chain = Blockchain::default().with_height_and_txs(START_HEIGHT, 10); let (responder, _s) = - init_responder_with_chain_and_dbm(MockedServerQuery::Regular, &chain, dbm); + init_responder_with_chain_and_dbm(MockedServerQuery::Regular, &mut chain, dbm).await; // Add user to the database let user_id = get_random_user_id(); diff --git a/teos/src/test_utils.rs b/teos/src/test_utils.rs index 8d78e4df..f8cdf7ac 100644 --- a/teos/src/test_utils.rs +++ b/teos/src/test_utils.rs @@ -8,7 +8,6 @@ */ use rand::Rng; -use std::ops::Deref; use std::sync::{Arc, Condvar, Mutex}; use std::thread; @@ -36,6 +35,7 @@ use lightning_block_sync::{ AsyncBlockSourceResult, BlockHeaderData, BlockSource, BlockSourceError, UnboundedCache, }; +use teos_common::constants::IRREVOCABLY_RESOLVED; use teos_common::cryptography::{get_random_bytes, get_random_keypair}; use teos_common::test_utils::{generate_random_appointment, get_random_user_id, TXID_HEX, TX_HEX}; use teos_common::UserId; @@ -46,6 +46,7 @@ use crate::dbm::DBM; use crate::extended_appointment::{ExtendedAppointment, UUID}; use crate::gatekeeper::{Gatekeeper, UserInfo}; use crate::responder::{ConfirmationStatus, Responder, TransactionTracker}; +use crate::rpc_errors; use crate::watcher::{Breach, Watcher}; pub(crate) const SLOTS: u32 = 21; @@ -353,18 +354,15 @@ pub(crate) fn store_appointment_and_fks_to_db( } pub(crate) async fn get_last_n_blocks(chain: &mut Blockchain, n: usize) -> Vec { - let tip = chain.tip(); - let poller = ChainPoller::new(chain, Network::Bitcoin); + let mut last_n_blocks = Vec::with_capacity(n); + let mut last_known_block = Ok(chain.tip()); + let poller = ChainPoller::new(chain, Network::Regtest); - let mut last_n_blocks = Vec::new(); - let mut last_known_block = tip; for _ in 0..n { - let block = poller.fetch_block(&last_known_block).await.unwrap(); - last_known_block = poller - .look_up_previous_header(&last_known_block) - .await - .unwrap(); + let header = last_known_block.unwrap(); + let block = poller.fetch_block(&header).await.unwrap(); last_n_blocks.push(block); + last_known_block = poller.look_up_previous_header(&header).await; } last_n_blocks @@ -372,12 +370,14 @@ pub(crate) async fn get_last_n_blocks(chain: &mut Blockchain, n: usize) -> Vec (Carrier, BitcoindStopper) { let bitcoind_mock = match query { - MockedServerQuery::Regular => BitcoindMock::new(MockOptions::empty()), + MockedServerQuery::Regular => BitcoindMock::new(MockOptions::default()), + MockedServerQuery::InMempoool => BitcoindMock::new(MockOptions::in_mempool()), MockedServerQuery::Error(x) => BitcoindMock::new(MockOptions::with_error(x)), }; let bitcoin_cli = Arc::new(BitcoindClient::new(bitcoind_mock.url(), Auth::None).unwrap()); @@ -390,17 +390,23 @@ pub(crate) fn create_carrier(query: MockedServerQuery, height: u32) -> (Carrier, ) } -pub(crate) fn create_responder( - tip: ValidatedBlockHeader, +pub(crate) async fn create_responder( + chain: &mut Blockchain, gatekeeper: Arc, dbm: Arc>, server_url: &str, ) -> Responder { + let height = chain.tip().height; + // For the local TxIndex logic to be sound, our index needs to have, at least, IRREVOCABLY_RESOLVED blocks + debug_assert!(height >= IRREVOCABLY_RESOLVED); + + let last_n_blocks = get_last_n_blocks(chain, IRREVOCABLY_RESOLVED as usize).await; + let bitcoin_cli = Arc::new(BitcoindClient::new(server_url, Auth::None).unwrap()); let bitcoind_reachable = Arc::new((Mutex::new(true), Condvar::new())); - let carrier = Carrier::new(bitcoin_cli, bitcoind_reachable, tip.deref().height); + let carrier = Carrier::new(bitcoin_cli, bitcoind_reachable, height); - Responder::new(carrier, gatekeeper, dbm) + Responder::new(&last_n_blocks, height, carrier, gatekeeper, dbm) } pub(crate) async fn create_watcher( @@ -463,7 +469,7 @@ impl Default for ApiConfig { pub(crate) async fn create_api_with_config( api_config: ApiConfig, ) -> (Arc, BitcoindStopper) { - let bitcoind_mock = BitcoindMock::new(MockOptions::empty()); + let bitcoind_mock = BitcoindMock::new(MockOptions::default()); let mut chain = Blockchain::default().with_height(START_HEIGHT); let dbm = Arc::new(Mutex::new(DBM::in_memory().unwrap())); @@ -474,7 +480,8 @@ pub(crate) async fn create_api_with_config( EXPIRY_DELTA, dbm.clone(), )); - let responder = create_responder(chain.tip(), gk.clone(), dbm.clone(), bitcoind_mock.url()); + let responder = + create_responder(&mut chain, gk.clone(), dbm.clone(), bitcoind_mock.url()).await; let (watcher, stopper) = create_watcher( &mut chain, Arc::new(responder), @@ -527,43 +534,24 @@ pub(crate) struct BitcoindMock { stopper: BitcoindStopper, } +#[derive(Default)] pub(crate) struct MockOptions { error_code: Option, - block_hash: Option, - height: Option, + in_mempool: bool, } impl MockOptions { - pub fn new(error_code: i64, block_hash: BlockHash, height: usize) -> Self { - Self { - error_code: Some(error_code), - block_hash: Some(block_hash), - height: Some(height), - } - } - - pub fn empty() -> Self { - Self { - error_code: None, - block_hash: None, - height: None, - } - } - pub fn with_error(error_code: i64) -> Self { Self { error_code: Some(error_code), - block_hash: None, - height: None, + in_mempool: false, } } - #[allow(dead_code)] - pub fn with_block(block_hash: BlockHash, height: usize) -> Self { + pub fn in_mempool() -> Self { Self { error_code: None, - block_hash: Some(block_hash), - height: Some(height), + in_mempool: true, } } } @@ -577,15 +565,10 @@ impl BitcoindMock { Err(JsonRpcError::new(JsonRpcErrorCode::ServerError(error))) }); io.add_alias("sendrawtransaction", "error"); + io.add_alias("getrawtransaction", "error"); } else { BitcoindMock::add_sendrawtransaction(&mut io); - } - - if let Some(block_hash) = options.block_hash { - BitcoindMock::add_getrawtransaction(&mut io, block_hash.to_string()); - if let Some(height) = options.height { - BitcoindMock::add_getblockheader(&mut io, block_hash.to_string(), height); - } + BitcoindMock::add_getrawtransaction(&mut io, options.in_mempool); } let server = ServerBuilder::new(io) @@ -606,41 +589,25 @@ impl BitcoindMock { }); } - fn add_getrawtransaction(io: &mut IoHandler, block_hash: String) { + fn add_getrawtransaction(io: &mut IoHandler, in_mempool: bool) { io.add_sync_method("getrawtransaction", move |_params: Params| { - match _params { - Params::Array(x) => match x[1] { - Value::Bool(x) => { - if x { - Ok(serde_json::json!({"hex": TX_HEX, "txid": TXID_HEX, "hash": TXID_HEX, "size": 0, - "vsize": 0, "version": 1, "locktime": 0, "vin": [], "vout": [], "blockhash": block_hash })) - } else { - Ok(Value::String(TX_HEX.to_owned())) - } - } - _ => panic!("Boolean param not found"), - }, - _ => panic!("No params found"), - } - }) - } - - fn add_getblockheader(io: &mut IoHandler, block_hash: String, height: usize) { - io.add_sync_method("getblockheader", move |_params: Params| { - match _params { - Params::Array(x) => match x[1] { - Value::Bool(x) => { - if x { - Ok(serde_json::json!({"hash": block_hash, "confirmations": 1, "height": height, "version": 1, - "merkleroot": "4eca41cf0fa551346842eb317564a403e39553444790a65f949f95bc18d24643", "time": 1645719068, "nonce": 2, "bits": "207fffff", - "difficulty": 0.0, "chainwork": "0000000000000000000000000000000000000000000000000000000000001146", "nTx": 1})) - } else { - Ok(Value::String(TX_HEX.to_owned())) + if !in_mempool { + Err(JsonRpcError::new(JsonRpcErrorCode::ServerError(rpc_errors::RPC_INVALID_ADDRESS_OR_KEY as i64))) + } else { + match _params { + Params::Array(x) => match x[1] { + Value::Bool(x) => { + if x { + Ok(serde_json::json!({"hex": TX_HEX, "txid": TXID_HEX, "hash": TXID_HEX, "size": 0, + "vsize": 0, "version": 1, "locktime": 0, "vin": [], "vout": [] })) + } else { + Ok(Value::String(TX_HEX.to_owned())) + } } - } - _ => panic!("Boolean param not found"), - }, - _ => panic!("No params found"), + _ => panic!("Boolean param not found"), + }, + _ => panic!("No params found"), + } } }) } diff --git a/teos/src/watcher.rs b/teos/src/watcher.rs index d08050d9..5d8b9587 100644 --- a/teos/src/watcher.rs +++ b/teos/src/watcher.rs @@ -771,10 +771,10 @@ mod tests { use crate::rpc_errors; use crate::test_utils::{ create_carrier, create_responder, create_watcher, generate_dummy_appointment, - generate_dummy_appointment_with_user, generate_uuid, get_last_n_blocks, get_random_breach, - get_random_tx, store_appointment_and_fks_to_db, BitcoindMock, BitcoindStopper, Blockchain, - MockOptions, MockedServerQuery, AVAILABLE_SLOTS, DURATION, EXPIRY_DELTA, SLOTS, - START_HEIGHT, SUBSCRIPTION_EXPIRY, SUBSCRIPTION_START, + generate_dummy_appointment_with_user, generate_uuid, get_random_breach, get_random_tx, + store_appointment_and_fks_to_db, BitcoindMock, BitcoindStopper, Blockchain, MockOptions, + MockedServerQuery, AVAILABLE_SLOTS, DURATION, EXPIRY_DELTA, SLOTS, START_HEIGHT, + SUBSCRIPTION_EXPIRY, SUBSCRIPTION_START, }; use teos_common::cryptography::{get_random_bytes, get_random_keypair}; use teos_common::dbm::Error as DBError; @@ -782,7 +782,7 @@ mod tests { use bitcoin::hash_types::Txid; use bitcoin::hashes::Hash; use bitcoin::secp256k1::{PublicKey, Secp256k1}; - use bitcoin::Block; + use lightning::chain::Listen; impl PartialEq for Watcher { @@ -820,7 +820,7 @@ mod tests { chain: &mut Blockchain, dbm: Arc>, ) -> (Watcher, BitcoindStopper) { - let bitcoind_mock = BitcoindMock::new(MockOptions::empty()); + let bitcoind_mock = BitcoindMock::new(MockOptions::default()); let gk = Arc::new(Gatekeeper::new( chain.get_block_count(), @@ -829,7 +829,7 @@ mod tests { EXPIRY_DELTA, dbm.clone(), )); - let responder = create_responder(chain.tip(), gk.clone(), dbm.clone(), bitcoind_mock.url()); + let responder = create_responder(chain, gk.clone(), dbm.clone(), bitcoind_mock.url()).await; create_watcher( chain, Arc::new(responder), From 221358d6d453670c4fc3a1bf5d651db2c21930cb Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 7 Oct 2022 13:06:05 +0200 Subject: [PATCH 3/3] Updates docs regarding txindex --- DEPENDENCIES.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/DEPENDENCIES.md b/DEPENDENCIES.md index 8b1c12fe..4efa2041 100644 --- a/DEPENDENCIES.md +++ b/DEPENDENCIES.md @@ -19,7 +19,6 @@ You can get Bitcoin Core from [bitcoincore.org](https://bitcoincore.org/en/downl Bitcoin needs to be running with the following options enabled: -- `txindex` to be able to look for non-wallet transactions - `server` to run rpc commands Here's an example of a `bitcoin.conf` you can use for mainnet. **DO NOT USE THE PROVIDED RPC USER AND PASSWORD.** @@ -31,9 +30,6 @@ rpcuser=user rpcpassword=passwd rpcservertimeout=600 -# [blockchain] -txindex=1 - # [others] daemon=1 debug=1