diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 674be81d823..97a74871ea4 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -195,7 +195,7 @@ struct Peer<'a> { peers_connected: &'a RefCell<[bool; 256]>, } impl<'a> SocketDescriptor for Peer<'a> { - fn send_data(&mut self, data: &[u8], _resume_read: bool) -> usize { + fn send_data(&mut self, data: &[u8], _continue_read: bool) -> usize { data.len() } fn disconnect_socket(&mut self) { @@ -695,7 +695,7 @@ pub fn do_test(mut data: &[u8], logger: &Arc) { } let mut peer = Peer { id: peer_id, peers_connected: &peers }; match loss_detector.handler.read_event(&mut peer, get_slice!(get_slice!(1)[0])) { - Ok(res) => assert!(!res), + Ok(()) => {}, Err(_) => { peers.borrow_mut()[peer_id as usize] = false; }, diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 2b8bfb0c1a2..47a731fa30e 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -300,7 +300,7 @@ where /// Updates scorer based on event and returns whether an update occurred so we can decide whether /// to persist. -fn update_scorer<'a, S: Deref + Send + Sync, SC: 'a + WriteableScore<'a>>( +fn update_scorer<'a, S: Deref, SC: 'a + WriteableScore<'a>>( scorer: &'a S, event: &Event, duration_since_epoch: Duration, ) -> bool { match event { @@ -774,7 +774,7 @@ use futures_util::{dummy_waker, Joiner, OptionalSelector, Selector, SelectorOutp /// # #[derive(Eq, PartialEq, Clone, Hash)] /// # struct SocketDescriptor {} /// # impl lightning::ln::peer_handler::SocketDescriptor for SocketDescriptor { -/// # fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 } +/// # fn send_data(&mut self, _data: &[u8], _continue_read: bool) -> usize { 0 } /// # fn disconnect_socket(&mut self) {} /// # } /// # type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor, Arc, Arc, Arc, Arc, Arc>; @@ -887,10 +887,8 @@ pub async fn process_events_async< P: Deref, EventHandlerFuture: core::future::Future>, EventHandler: Fn(Event) -> EventHandlerFuture, - ES: Deref + Send, - M: Deref::Signer, CF, T, F, L, P, ES>> - + Send - + Sync, + ES: Deref, + M: Deref::Signer, CF, T, F, L, P, ES>>, CM: Deref, OM: Deref, PGS: Deref>, @@ -901,7 +899,7 @@ pub async fn process_events_async< O: Deref, K: Deref, OS: Deref>, - S: Deref + Send + Sync, + S: Deref, SC: for<'b> WriteableScore<'b>, SleepFuture: core::future::Future + core::marker::Unpin, Sleeper: Fn(Duration) -> SleepFuture, @@ -1356,15 +1354,13 @@ pub async fn process_events_async_with_kv_store_sync< T: Deref, F: Deref, G: Deref>, - L: Deref + Send + Sync, + L: Deref, P: Deref, EventHandlerFuture: core::future::Future>, EventHandler: Fn(Event) -> EventHandlerFuture, - ES: Deref + Send, - M: Deref::Signer, CF, T, F, L, P, ES>> - + Send - + Sync, - CM: Deref + Send + Sync, + ES: Deref, + M: Deref::Signer, CF, T, F, L, P, ES>>, + CM: Deref, OM: Deref, PGS: Deref>, RGS: Deref>, @@ -1374,7 +1370,7 @@ pub async fn process_events_async_with_kv_store_sync< O: Deref, K: Deref, OS: Deref>, - S: Deref + Send + Sync, + S: Deref, SC: for<'b> WriteableScore<'b>, SleepFuture: core::future::Future + core::marker::Unpin, Sleeper: Fn(Duration) -> SleepFuture, @@ -1878,7 +1874,7 @@ mod tests { #[derive(Clone, Hash, PartialEq, Eq)] struct TestDescriptor {} impl SocketDescriptor for TestDescriptor { - fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { + fn send_data(&mut self, _data: &[u8], _continue_read: bool) -> usize { 0 } diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 2ec69de3f5d..068f77a84bb 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -243,13 +243,8 @@ impl Connection { Ok(len) => { let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]); - let mut us_lock = us.lock().unwrap(); match read_res { - Ok(pause_read) => { - if pause_read { - us_lock.read_paused = true; - } - }, + Ok(()) => {}, Err(_) => break Disconnect::CloseConnection, } }, @@ -533,7 +528,7 @@ impl SocketDescriptor { } } impl peer_handler::SocketDescriptor for SocketDescriptor { - fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize { + fn send_data(&mut self, data: &[u8], continue_read: bool) -> usize { // To send data, we take a lock on our Connection to access the TcpStream, writing to it if // there's room in the kernel buffer, or otherwise create a new Waker with a // SocketDescriptor in it which can wake up the write_avail Sender, waking up the @@ -544,13 +539,16 @@ impl peer_handler::SocketDescriptor for SocketDescriptor { return 0; } - if resume_read && us.read_paused { + let read_was_paused = us.read_paused; + us.read_paused = !continue_read; + + if continue_read && read_was_paused { // The schedule_read future may go to lock up but end up getting woken up by there // being more room in the write buffer, dropping the other end of this Sender // before we get here, so we ignore any failures to wake it up. - us.read_paused = false; let _ = us.read_waker.try_send(()); } + if data.is_empty() { return 0; } @@ -576,16 +574,7 @@ impl peer_handler::SocketDescriptor for SocketDescriptor { } }, task::Poll::Ready(Err(_)) => return written_len, - task::Poll::Pending => { - // We're queued up for a write event now, but we need to make sure we also - // pause read given we're now waiting on the remote end to ACK (and in - // accordance with the send_data() docs). - us.read_paused = true; - // Further, to avoid any current pending read causing a `read_event` call, wake - // up the read_waker and restart its loop. - let _ = us.read_waker.try_send(()); - return written_len; - }, + task::Poll::Pending => return written_len, } } } diff --git a/lightning-tests/Cargo.toml b/lightning-tests/Cargo.toml index 23c81fae4a3..36b03a2756a 100644 --- a/lightning-tests/Cargo.toml +++ b/lightning-tests/Cargo.toml @@ -14,7 +14,7 @@ lightning-types = { path = "../lightning-types", features = ["_test_utils"] } lightning-invoice = { path = "../lightning-invoice", default-features = false } lightning-macros = { path = "../lightning-macros" } lightning = { path = "../lightning", features = ["_test_utils"] } -lightning_0_1 = { package = "lightning", version = "0.1.1", features = ["_test_utils"] } +lightning_0_1 = { package = "lightning", version = "0.1.7", features = ["_test_utils"] } lightning_0_0_125 = { package = "lightning", version = "0.0.125", features = ["_test_utils"] } bitcoin = { version = "0.32.2", default-features = false } diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index 0b3f5dff427..d1153b7a8fa 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -10,9 +10,15 @@ //! Tests which test upgrading from previous versions of LDK or downgrading to previous versions of //! LDK. +use lightning_0_1::commitment_signed_dance as commitment_signed_dance_0_1; use lightning_0_1::events::ClosureReason as ClosureReason_0_1; +use lightning_0_1::expect_pending_htlcs_forwardable_ignore as expect_pending_htlcs_forwardable_ignore_0_1; use lightning_0_1::get_monitor as get_monitor_0_1; +use lightning_0_1::ln::channelmanager::PaymentId as PaymentId_0_1; +use lightning_0_1::ln::channelmanager::RecipientOnionFields as RecipientOnionFields_0_1; use lightning_0_1::ln::functional_test_utils as lightning_0_1_utils; +use lightning_0_1::ln::msgs::ChannelMessageHandler as _; +use lightning_0_1::routing::router as router_0_1; use lightning_0_1::util::ser::Writeable as _; use lightning_0_0_125::chain::ChannelMonitorUpdateStatus as ChannelMonitorUpdateStatus_0_0_125; @@ -29,16 +35,23 @@ use lightning_0_0_125::ln::msgs::ChannelMessageHandler as _; use lightning_0_0_125::routing::router as router_0_0_125; use lightning_0_0_125::util::ser::Writeable as _; -use lightning::chain::channelmonitor::ANTI_REORG_DELAY; -use lightning::events::{ClosureReason, Event}; +use lightning::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER}; +use lightning::events::bump_transaction::sync::WalletSourceSync; +use lightning::events::{ClosureReason, Event, HTLCHandlingFailureType}; use lightning::ln::functional_test_utils::*; +use lightning::ln::funding::SpliceContribution; +use lightning::ln::msgs::BaseMessageHandler as _; +use lightning::ln::msgs::ChannelMessageHandler as _; +use lightning::ln::msgs::MessageSendEvent; +use lightning::ln::splicing_tests::*; +use lightning::ln::types::ChannelId; use lightning::sign::OutputSpender; -use lightning_types::payment::PaymentPreimage; +use lightning_types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; -use bitcoin::opcodes; use bitcoin::script::Builder; use bitcoin::secp256k1::Secp256k1; +use bitcoin::{opcodes, Amount, TxOut}; use std::sync::Arc; @@ -299,3 +312,188 @@ fn test_0_1_legacy_remote_key_derivation() { panic!("Wrong event"); } } + +fn do_test_0_1_htlc_forward_after_splice(fail_htlc: bool) { + // Test what happens if an HTLC set to be forwarded in 0.1 is forwarded after the inbound + // channel is spliced. In the initial splice code, this could have led to a dangling HTLC if + // the HTLC is failed as the backwards-failure would use the channel's original SCID which is + // no longer valid. + // In some later splice code, this also failed because the `KeysManager` would have tried to + // rotate the `to_remote` key, which we aren't able to do in the splicing protocol. + let (node_a_ser, node_b_ser, node_c_ser, mon_a_1_ser, mon_b_1_ser, mon_b_2_ser, mon_c_1_ser); + let (node_a_id, node_b_id, node_c_id); + let (chan_id_bytes_a, chan_id_bytes_b); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + let (node_a_blocks, node_b_blocks, node_c_blocks); + + const EXTRA_BLOCKS_BEFORE_FAIL: u32 = 145; + + { + let chanmon_cfgs = lightning_0_1_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_1_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_1_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_0_1_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_a = lightning_0_1_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + chan_id_bytes_a = chan_id_a.0; + + let chan_id_b = lightning_0_1_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + chan_id_bytes_b = chan_id_b.0; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_0_1_utils::connect_blocks(node, blocks_to_mine); + } + } + + let (preimage, hash, secret) = + lightning_0_1_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + let pay_params = router_0_1::PaymentParameters::from_node_id( + node_c_id, + lightning_0_1_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_1::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let mut route = lightning_0_1_utils::get_route(&nodes[0], &route_params).unwrap(); + route.paths[0].hops[1].cltv_expiry_delta = + EXTRA_BLOCKS_BEFORE_FAIL + HTLC_FAIL_BACK_BUFFER + 1; + if fail_htlc { + // Pay more than the channel's value (and probably not enough fee) + route.paths[0].hops[1].fee_msat = 50_000_000; + } + + let onion = RecipientOnionFields_0_1::secret_only(secret); + let id = PaymentId_0_1(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_1_utils::check_added_monitors(&nodes[0], 1); + let send_event = lightning_0_1_utils::SendEvent::from_node(&nodes[0]); + + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + commitment_signed_dance_0_1!(nodes[1], nodes[0], send_event.commitment_msg, false); + expect_pending_htlcs_forwardable_ignore_0_1!(nodes[1]); + + // We now have an HTLC pending in node B's forwarding queue with the original channel's + // SCID as the source. + // We now upgrade to 0.2 and splice before forwarding that HTLC... + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + node_c_ser = nodes[2].node.encode(); + mon_a_1_ser = get_monitor_0_1!(nodes[0], chan_id_a).encode(); + mon_b_1_ser = get_monitor_0_1!(nodes[1], chan_id_a).encode(); + mon_b_2_ser = get_monitor_0_1!(nodes[1], chan_id_b).encode(); + mon_c_1_ser = get_monitor_0_1!(nodes[2], chan_id_b).encode(); + + node_a_blocks = Arc::clone(&nodes[0].blocks); + node_b_blocks = Arc::clone(&nodes[1].blocks); + node_c_blocks = Arc::clone(&nodes[2].blocks); + } + + // Create a dummy node to reload over with the 0.1 state + let mut chanmon_cfgs = create_chanmon_cfgs(3); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + chanmon_cfgs[0].tx_broadcaster.blocks = node_a_blocks; + chanmon_cfgs[1].tx_broadcaster.blocks = node_b_blocks; + chanmon_cfgs[2].tx_broadcaster.blocks = node_c_blocks; + + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let config = test_default_channel_config(); + let a_mons = &[&mon_a_1_ser[..]]; + reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; + reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + let c_mons = &[&mon_c_1_ser[..]]; + reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + + reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + reconnect_nodes(reconnect_b_c_args); + + let contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }], + }; + let splice_tx = splice_channel(&nodes[0], &nodes[1], ChannelId(chan_id_bytes_a), contribution); + for node in nodes.iter() { + mine_transaction(node, &splice_tx); + connect_blocks(node, ANTI_REORG_DELAY - 1); + } + + let splice_locked = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceLocked, node_b_id); + lock_splice(&nodes[0], &nodes[1], &splice_locked, false); + + for node in nodes.iter() { + connect_blocks(node, EXTRA_BLOCKS_BEFORE_FAIL - ANTI_REORG_DELAY); + } + + // Now release the HTLC to be failed back to node A + nodes[1].node.process_pending_htlc_forwards(); + + let pay_secret = PaymentSecret(payment_secret_bytes); + let pay_hash = PaymentHash(payment_hash_bytes); + let pay_preimage = PaymentPreimage(payment_preimage_bytes); + + if fail_htlc { + let failure = HTLCHandlingFailureType::Forward { + node_id: Some(node_c_id), + channel_id: ChannelId(chan_id_bytes_b), + }; + expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[failure]); + check_added_monitors(&nodes[1], 1); + + let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); + let conditions = PaymentFailedConditions::new(); + expect_payment_failed_conditions(&nodes[0], pay_hash, false, conditions); + } else { + check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + commitment_signed_dance!(nodes[2], nodes[1], forward_event.commitment_msg, false); + + expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); + } +} + +#[test] +fn test_0_1_htlc_forward_after_splice() { + do_test_0_1_htlc_forward_after_splice(true); + do_test_0_1_htlc_forward_after_splice(false); +} diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 1f945a5c5d1..5dc46d5a055 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -36,9 +36,12 @@ pub trait BroadcasterInterface { /// In some cases LDK may attempt to broadcast a transaction which double-spends another /// and this isn't a bug and can be safely ignored. /// - /// If more than one transaction is given, these transactions should be considered to be a - /// package and broadcast together. Some of the transactions may or may not depend on each other, - /// be sure to manage both cases correctly. + /// If more than one transaction is given, these transactions MUST be a + /// single child and its parents and be broadcast together as a package + /// (see the [`submitpackage`](https://bitcoincore.org/en/doc/30.0.0/rpc/rawtransactions/submitpackage) + /// Bitcoin Core RPC). + /// + /// Implementations MUST NOT assume any topological order on the transactions. /// /// Bitcoin transaction packages are defined in BIP 331 and here: /// diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index c6143af985b..ab695ee3530 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -327,7 +327,7 @@ pub const ARCHIVAL_DELAY_BLOCKS: u32 = 4032; /// (2) is the same, but with an additional buffer to avoid accepting an HTLC which is immediately /// in a race condition between the user connecting a block (which would fail it) and the user /// providing us the preimage (which would claim it). -pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS; +pub const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS; // Deprecated, use [`HolderCommitment`] or [`HolderCommitmentTransaction`]. #[derive(Clone, PartialEq, Eq)] @@ -1202,6 +1202,19 @@ pub(crate) struct ChannelMonitorImpl { funding: FundingScope, pending_funding: Vec, + /// True if this channel was configured for manual funding broadcasts. Monitors written by + /// versions prior to LDK 0.2 load with `false` until a new update persists it. + is_manual_broadcast: bool, + /// True once we've observed either funding transaction on-chain. Older monitors prior to LDK 0.2 + /// assume this is `true` when absent during upgrade so holder broadcasts aren't gated unexpectedly. + /// In manual-broadcast channels we also use this to trigger deferred holder + /// broadcasts once the funding transaction finally appears on-chain. + /// + /// Note: This tracks whether the funding transaction was ever broadcast, not whether it is + /// currently confirmed. It is never reset, even if the funding transaction is unconfirmed due + /// to a reorg. + funding_seen_onchain: bool, + latest_update_id: u64, commitment_transaction_number_obscure_factor: u64, @@ -1740,6 +1753,8 @@ pub(crate) fn write_chanmon_internal( (32, channel_monitor.pending_funding, optional_vec), (33, channel_monitor.htlcs_resolved_to_user, required), (34, channel_monitor.alternative_funding_confirmed, option), + (35, channel_monitor.is_manual_broadcast, required), + (37, channel_monitor.funding_seen_onchain, required), }); Ok(()) @@ -1868,6 +1883,7 @@ impl ChannelMonitor { commitment_transaction_number_obscure_factor: u64, initial_holder_commitment_tx: HolderCommitmentTransaction, best_block: BestBlock, counterparty_node_id: PublicKey, channel_id: ChannelId, + is_manual_broadcast: bool, ) -> ChannelMonitor { assert!(commitment_transaction_number_obscure_factor <= (1 << 48)); @@ -1914,6 +1930,9 @@ impl ChannelMonitor { }, pending_funding: vec![], + is_manual_broadcast, + funding_seen_onchain: false, + latest_update_id: 0, commitment_transaction_number_obscure_factor, @@ -2327,19 +2346,33 @@ impl ChannelMonitor { /// close channel with their commitment transaction after a substantial amount of time. Best /// may be to contact the other node operator out-of-band to coordinate other options available /// to you. - #[rustfmt::skip] + /// + /// Note: For channels using manual funding broadcast (see + /// [`crate::ln::channelmanager::ChannelManager::funding_transaction_generated_manual_broadcast`]), + /// automatic broadcasts are suppressed until the funding transaction has been observed on-chain. + /// Calling this method overrides that suppression and queues the latest holder commitment + /// transaction for broadcast even if the funding has not yet been seen on-chain. This may result + /// in unconfirmable transactions being broadcast or [`Event::BumpTransaction`] notifications for + /// transactions that cannot be confirmed until the funding transaction is visible. + /// + /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction pub fn broadcast_latest_holder_commitment_txn( - &self, broadcaster: &B, fee_estimator: &F, logger: &L - ) - where + &self, broadcaster: &B, fee_estimator: &F, logger: &L, + ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, - L::Target: Logger + L::Target: Logger, { let mut inner = self.inner.lock().unwrap(); let fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); - inner.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &fee_estimator, &logger); + + inner.queue_latest_holder_commitment_txn_for_broadcast( + broadcaster, + &fee_estimator, + &logger, + false, + ); } /// Unsafe test-only version of `broadcast_latest_holder_commitment_txn` used by our test framework @@ -3900,6 +3933,7 @@ impl ChannelMonitorImpl { #[rustfmt::skip] fn generate_claimable_outpoints_and_watch_outputs( &mut self, generate_monitor_event_with_reason: Option, + require_funding_seen: bool, ) -> (Vec, Vec) { let funding = get_confirmed_funding_scope!(self); let holder_commitment_tx = &funding.current_holder_commitment_tx; @@ -3927,6 +3961,13 @@ impl ChannelMonitorImpl { // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject // new channel updates. self.holder_tx_signed = true; + + // In manual-broadcast mode, if we have not yet observed the funding transaction on-chain, + // return empty vectors rather than triggering a broadcast. + if require_funding_seen && self.is_manual_broadcast && !self.funding_seen_onchain { + return (Vec::new(), Vec::new()); + } + let mut watch_outputs = Vec::new(); // In CSV anchor channels, we can't broadcast our HTLC transactions while the commitment transaction is // unconfirmed. @@ -3956,8 +3997,17 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] + /// Note: For channels where the funding transaction is being manually managed (see + /// [`crate::ln::channelmanager::ChannelManager::funding_transaction_generated_manual_broadcast`]), + /// this method returns without queuing any transactions until the funding transaction has been + /// observed on-chain, unless `require_funding_seen` is `false`. This prevents attempting to + /// broadcast unconfirmable holder commitment transactions before the funding is visible. + /// See also [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]. + /// + /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]: crate::chain::channelmonitor::ChannelMonitor::broadcast_latest_holder_commitment_txn pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast( - &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithChannelMonitor + &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithChannelMonitor, + require_funding_seen: bool, ) where B::Target: BroadcasterInterface, @@ -3968,7 +4018,14 @@ impl ChannelMonitorImpl { broadcasted_latest_txn: Some(true), message: "ChannelMonitor-initiated commitment transaction broadcast".to_owned(), }; - let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(Some(reason)); + let (claimable_outpoints, _) = + self.generate_claimable_outpoints_and_watch_outputs(Some(reason), require_funding_seen); + // In manual-broadcast mode, if `require_funding_seen` is true and we have not yet observed + // the funding transaction on-chain, do not queue any transactions. + if require_funding_seen && self.is_manual_broadcast && !self.funding_seen_onchain { + log_info!(logger, "Not broadcasting holder commitment for manual-broadcast channel before funding appears on-chain"); + return; + } let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests( claimable_outpoints, self.best_block.height, self.best_block.height, broadcaster, @@ -4118,6 +4175,14 @@ impl ChannelMonitorImpl { self.funding.prev_holder_commitment_tx.clone(), ); + // It's possible that no commitment updates happened during the lifecycle of the pending + // splice's `FundingScope` that was promoted. If so, our `prev_holder_htlc_data` is + // now irrelevant, since there's no valid previous commitment that exists for the current + // funding transaction that could be broadcast. + if self.funding.prev_holder_commitment_tx.is_none() { + self.prev_holder_htlc_data.take(); + } + let no_further_updates_allowed = self.no_further_updates_allowed(); // The swap above places the previous `FundingScope` into `pending_funding`. @@ -4283,7 +4348,7 @@ impl ChannelMonitorImpl { log_trace!(logger, "Avoiding commitment broadcast, already detected confirmed spend onchain"); continue; } - self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger); + self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger, true); } else if !self.holder_tx_signed { log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast"); log_error!(logger, " in channel monitor for channel {}!", &self.channel_id()); @@ -5302,7 +5367,21 @@ impl ChannelMonitorImpl { F::Target: FeeEstimator, L::Target: Logger, { + let funding_seen_before = self.funding_seen_onchain; let txn_matched = self.filter_block(txdata); + + if !self.funding_seen_onchain { + for &(_, tx) in txdata.iter() { + let txid = tx.compute_txid(); + if txid == self.funding.funding_txid() || + self.pending_funding.iter().any(|f| f.funding_txid() == txid) + { + self.funding_seen_onchain = true; + break; + } + } + } + for tx in &txn_matched { let mut output_val = Amount::ZERO; for out in tx.output.iter() { @@ -5323,6 +5402,11 @@ impl ChannelMonitorImpl { let mut watch_outputs = Vec::new(); let mut claimable_outpoints = Vec::new(); + + if self.is_manual_broadcast && !funding_seen_before && self.funding_seen_onchain && self.holder_tx_signed + { + should_broadcast_commitment = true; + } 'tx_iter: for tx in &txn_matched { let txid = tx.compute_txid(); log_trace!(logger, "Transaction {} confirmed in block {}", txid , block_hash); @@ -5538,7 +5622,7 @@ impl ChannelMonitorImpl { if should_broadcast_commitment { let (mut claimables, mut outputs) = - self.generate_claimable_outpoints_and_watch_outputs(None); + self.generate_claimable_outpoints_and_watch_outputs(None, false); claimable_outpoints.append(&mut claimables); watch_outputs.append(&mut outputs); } @@ -5574,13 +5658,20 @@ impl ChannelMonitorImpl { log_trace!(logger, "Processing {} matched transactions for block at height {}.", txn_matched.len(), conf_height); debug_assert!(self.best_block.height >= conf_height); - let should_broadcast = self.should_broadcast_holder_commitment_txn(logger); - if let Some(payment_hash) = should_broadcast { - let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }; - let (mut new_outpoints, mut new_outputs) = - self.generate_claimable_outpoints_and_watch_outputs(Some(reason)); - claimable_outpoints.append(&mut new_outpoints); - watch_outputs.append(&mut new_outputs); + // Only generate claims if we haven't already done so (e.g., in transactions_confirmed). + if claimable_outpoints.is_empty() { + let should_broadcast = self.should_broadcast_holder_commitment_txn(logger); + if let Some(payment_hash) = should_broadcast { + let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }; + let (mut new_outpoints, mut new_outputs) = + self.generate_claimable_outpoints_and_watch_outputs(Some(reason), false); + if !self.is_manual_broadcast || self.funding_seen_onchain { + claimable_outpoints.append(&mut new_outpoints); + watch_outputs.append(&mut new_outputs); + } else { + log_info!(logger, "Not broadcasting holder commitment for manual-broadcast channel before funding appears on-chain"); + } + } } // Find which on-chain events have reached their confirmation threshold. @@ -5818,7 +5909,7 @@ impl ChannelMonitorImpl { // Only attempt to broadcast the new commitment after the `block_disconnected` call above so that // it doesn't get removed from the set of pending claims. if should_broadcast_commitment { - self.queue_latest_holder_commitment_txn_for_broadcast(&broadcaster, &bounded_fee_estimator, logger); + self.queue_latest_holder_commitment_txn_for_broadcast(&broadcaster, &bounded_fee_estimator, logger, true); } self.best_block = fork_point; @@ -5879,7 +5970,7 @@ impl ChannelMonitorImpl { // Only attempt to broadcast the new commitment after the `transaction_unconfirmed` call above so // that it doesn't get removed from the set of pending claims. if should_broadcast_commitment { - self.queue_latest_holder_commitment_txn_for_broadcast(&broadcaster, fee_estimator, logger); + self.queue_latest_holder_commitment_txn_for_broadcast(&broadcaster, fee_estimator, logger, true); } } @@ -6567,6 +6658,8 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let mut channel_parameters = None; let mut pending_funding = None; let mut alternative_funding_confirmed = None; + let mut is_manual_broadcast = RequiredWrapper(None); + let mut funding_seen_onchain = RequiredWrapper(None); read_tlv_fields!(reader, { (1, funding_spend_confirmed, option), (3, htlcs_resolved_on_chain, optional_vec), @@ -6587,6 +6680,8 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (32, pending_funding, optional_vec), (33, htlcs_resolved_to_user, option), (34, alternative_funding_confirmed, option), + (35, is_manual_broadcast, (default_value, false)), + (37, funding_seen_onchain, (default_value, true)), }); // Note that `payment_preimages_with_info` was added (and is always written) in LDK 0.1, so // we can use it to determine if this monitor was last written by LDK 0.1 or later. @@ -6700,6 +6795,10 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP prev_holder_commitment_tx, }, pending_funding: pending_funding.unwrap_or(vec![]), + is_manual_broadcast: is_manual_broadcast.0.unwrap(), + // Older monitors prior to LDK 0.2 assume this is `true` when absent + // during upgrade so holder broadcasts aren't gated unexpectedly. + funding_seen_onchain: funding_seen_onchain.0.unwrap(), latest_update_id, commitment_transaction_number_obscure_factor, @@ -7016,7 +7115,7 @@ mod tests { let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::MAX }; let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); let channel_parameters = ChannelTransactionParameters { - holder_pubkeys: keys.new_pubkeys(None, &secp_ctx), + holder_pubkeys: keys.pubkeys(&secp_ctx), holder_selected_contest_delay: 66, is_outbound_from_holder: true, counterparty_parameters: Some(CounterpartyChannelTransactionParameters { @@ -7036,7 +7135,7 @@ mod tests { let monitor = ChannelMonitor::new( Secp256k1::new(), keys, Some(shutdown_script.into_inner()), 0, &ScriptBuf::new(), &channel_parameters, true, 0, HolderCommitmentTransaction::dummy(0, funding_outpoint, Vec::new()), - best_block, dummy_key, channel_id, + best_block, dummy_key, channel_id, false, ); let nondust_htlcs = preimages_slice_to_htlcs!(preimages[0..10]); @@ -7279,7 +7378,7 @@ mod tests { let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::MAX }; let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); let channel_parameters = ChannelTransactionParameters { - holder_pubkeys: keys.new_pubkeys(None, &secp_ctx), + holder_pubkeys: keys.pubkeys(&secp_ctx), holder_selected_contest_delay: 66, is_outbound_from_holder: true, counterparty_parameters: Some(CounterpartyChannelTransactionParameters { @@ -7297,7 +7396,7 @@ mod tests { let monitor = ChannelMonitor::new( Secp256k1::new(), keys, Some(shutdown_script.into_inner()), 0, &ScriptBuf::new(), &channel_parameters, true, 0, HolderCommitmentTransaction::dummy(0, funding_outpoint, Vec::new()), - best_block, dummy_key, channel_id, + best_block, dummy_key, channel_id, false, ); let chan_id = monitor.inner.lock().unwrap().channel_id(); diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index 2a6d3d23e80..b4cc6a302ae 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -12,6 +12,8 @@ use bitcoin::block::{Block, Header}; use bitcoin::constants::genesis_block; use bitcoin::hash_types::{BlockHash, Txid}; +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::{Hash, HashEngine}; use bitcoin::network::Network; use bitcoin::script::{Script, ScriptBuf}; use bitcoin::secp256k1::PublicKey; @@ -21,6 +23,7 @@ use crate::chain::transaction::{OutPoint, TransactionData}; use crate::impl_writeable_tlv_based; use crate::ln::types::ChannelId; use crate::sign::ecdsa::EcdsaChannelSigner; +use crate::sign::HTLCDescriptor; #[allow(unused_imports)] use crate::prelude::*; @@ -442,3 +445,20 @@ where /// This is not exported to bindings users as we just use [u8; 32] directly. #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub struct ClaimId(pub [u8; 32]); + +impl ClaimId { + pub(crate) fn from_htlcs(htlcs: &[HTLCDescriptor]) -> ClaimId { + let mut engine = Sha256::engine(); + for htlc in htlcs { + engine.input(&htlc.commitment_txid.to_byte_array()); + engine.input(&htlc.htlc.transaction_output_index.unwrap().to_be_bytes()); + } + ClaimId(Sha256::from_engine(engine).to_byte_array()) + } + pub(crate) fn step_with_bytes(&self, bytes: &[u8]) -> ClaimId { + let mut engine = Sha256::engine(); + engine.input(&self.0); + engine.input(bytes); + ClaimId(Sha256::from_engine(engine).to_byte_array()) + } +} diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 48bd40a8347..fb65aa0f157 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -14,8 +14,7 @@ use bitcoin::amount::Amount; use bitcoin::hash_types::{BlockHash, Txid}; -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::{Hash, HashEngine}; +use bitcoin::hashes::Hash; use bitcoin::locktime::absolute::LockTime; use bitcoin::script::{Script, ScriptBuf}; use bitcoin::secp256k1; @@ -882,12 +881,7 @@ impl OnchainTxHandler { // claim, which will always be unique per request. Once a claim ID // is generated, it is assigned and remains unchanged, even if the // underlying set of HTLCs changes. - let mut engine = Sha256::engine(); - for htlc in htlcs { - engine.input(&htlc.commitment_txid.to_byte_array()); - engine.input(&htlc.htlc.transaction_output_index.unwrap().to_be_bytes()); - } - ClaimId(Sha256::from_engine(engine).to_byte_array()) + ClaimId::from_htlcs(htlcs) }, }; debug_assert!(self.pending_claim_requests.get(&claim_id).is_none()); @@ -1339,7 +1333,7 @@ mod tests { // Use non-anchor channels so that HTLC-Timeouts are broadcast immediately instead of sent // to the user for external funding. let chan_params = ChannelTransactionParameters { - holder_pubkeys: signer.new_pubkeys(None, &secp_ctx), + holder_pubkeys: signer.pubkeys(&secp_ctx), holder_selected_contest_delay: 66, is_outbound_from_holder: true, counterparty_parameters: Some(CounterpartyChannelTransactionParameters { diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 6ba46a4de6b..872087899df 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -24,9 +24,10 @@ use crate::io_extras::sink; use crate::ln::chan_utils; use crate::ln::chan_utils::{ shared_anchor_script_pubkey, HTLCOutputInCommitment, ANCHOR_INPUT_WITNESS_WEIGHT, + BASE_INPUT_WEIGHT, BASE_TX_SIZE, EMPTY_SCRIPT_SIG_WEIGHT, EMPTY_WITNESS_WEIGHT, HTLC_SUCCESS_INPUT_KEYED_ANCHOR_WITNESS_WEIGHT, HTLC_SUCCESS_INPUT_P2A_ANCHOR_WITNESS_WEIGHT, HTLC_TIMEOUT_INPUT_KEYED_ANCHOR_WITNESS_WEIGHT, HTLC_TIMEOUT_INPUT_P2A_ANCHOR_WITNESS_WEIGHT, - P2A_ANCHOR_INPUT_WITNESS_WEIGHT, + P2WSH_TXOUT_WEIGHT, SEGWIT_MARKER_FLAG_WEIGHT, TRUC_CHILD_MAX_WEIGHT, TRUC_MAX_WEIGHT, }; use crate::ln::types::ChannelId; use crate::prelude::*; @@ -42,6 +43,7 @@ use bitcoin::amount::Amount; use bitcoin::consensus::Encodable; use bitcoin::constants::WITNESS_SCALE_FACTOR; use bitcoin::locktime::absolute::LockTime; +use bitcoin::policy::MAX_STANDARD_TX_WEIGHT; use bitcoin::secp256k1; use bitcoin::secp256k1::ecdsa::Signature; use bitcoin::secp256k1::{PublicKey, Secp256k1}; @@ -50,13 +52,6 @@ use bitcoin::{ OutPoint, Psbt, PubkeyHash, ScriptBuf, Sequence, Transaction, TxIn, TxOut, WPubkeyHash, Witness, }; -pub(crate) const EMPTY_SCRIPT_SIG_WEIGHT: u64 = - 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64; - -const BASE_INPUT_SIZE: u64 = 32 /* txid */ + 4 /* vout */ + 4 /* sequence */; - -pub(crate) const BASE_INPUT_WEIGHT: u64 = BASE_INPUT_SIZE * WITNESS_SCALE_FACTOR as u64; - /// A descriptor used to sign for a commitment transaction's anchor output. #[derive(Clone, Debug, PartialEq, Eq)] pub struct AnchorDescriptor { @@ -132,12 +127,21 @@ pub enum BumpTransactionEvent { /// feerate of the commitment transaction is already sufficient, in which case the child anchor /// transaction is not needed and only the commitment transaction should be broadcast. /// + /// In zero-fee commitment channels, the commitment transaction and the anchor transaction + /// form a 1-parent-1-child package that conforms to BIP 431 (known as TRUC transactions). + /// The anchor transaction must be version 3, and its size must be no more than 1000 vB. + /// The anchor transaction is usually needed to bump the fee of the commitment transaction + /// as the commitment transaction is not explicitly assigned any fees. In those cases the + /// anchor transaction must be broadcast together with the commitment transaction as a + /// `child-with-parents` package (usually using the Bitcoin Core `submitpackage` RPC). + /// /// The consumer should be able to sign for any of the additional inputs included within the - /// child anchor transaction. To sign its anchor input, an [`EcdsaChannelSigner`] should be - /// re-derived through [`SignerProvider::derive_channel_signer`]. The anchor input signature + /// child anchor transaction. To sign its keyed-anchor input, an [`EcdsaChannelSigner`] should + /// be re-derived through [`SignerProvider::derive_channel_signer`]. The anchor input signature /// can be computed with [`EcdsaChannelSigner::sign_holder_keyed_anchor_input`], which can then /// be provided to [`build_keyed_anchor_input_witness`] along with the `funding_pubkey` to - /// obtain the full witness required to spend. + /// obtain the full witness required to spend. Note that no signature or witness data is + /// required to spend the keyless anchor used in zero-fee commitment channels. /// /// It is possible to receive more than one instance of this event if a valid child anchor /// transaction is never broadcast or is but not with a sufficient fee to be mined. Care should @@ -188,14 +192,25 @@ pub enum BumpTransactionEvent { pending_htlcs: Vec, }, /// Indicates that a channel featuring anchor outputs has unilaterally closed on-chain by a - /// holder commitment transaction and its HTLC(s) need to be resolved on-chain. With the - /// zero-HTLC-transaction-fee variant of anchor outputs, the pre-signed HTLC - /// transactions have a zero fee, thus requiring additional inputs and/or outputs to be attached - /// for a timely confirmation within the chain. These additional inputs and/or outputs must be - /// appended to the resulting HTLC transaction to meet the target feerate. Failure to meet the - /// target feerate decreases the confirmation odds of the transaction, possibly resulting in a - /// loss of funds. Once the transaction meets the target feerate, it must be signed for and - /// broadcast by the consumer of the event. + /// holder commitment transaction and its HTLC(s) need to be resolved on-chain. In all such + /// channels, the pre-signed HTLC transactions have a zero fee, thus requiring additional + /// inputs and/or outputs to be attached for a timely confirmation within the chain. These + /// additional inputs and/or outputs must be appended to the resulting HTLC transaction to + /// meet the target feerate. Failure to meet the target feerate decreases the confirmation + /// odds of the transaction, possibly resulting in a loss of funds. Once the transaction + /// meets the target feerate, it must be signed for and broadcast by the consumer of the + /// event. + /// + /// In zero-fee commitment channels, you must set the version of the HTLC claim transaction + /// to version 3 as the counterparty's signature commits to the version of + /// the transaction. You must also make sure that this claim transaction does not grow + /// bigger than 10,000 vB, the maximum vsize of any TRUC transaction as specified in + /// BIP 431. It is possible for [`htlc_descriptors`] to be long enough such + /// that claiming all the HTLCs therein in a single transaction would exceed this limit. + /// In this case, you must claim all the HTLCs in [`htlc_descriptors`] using multiple + /// transactions. Finally, note that while HTLCs in zero-fee commitment channels no + /// longer have the 1 CSV lock, LDK will still emit this event only after the commitment + /// transaction has 1 confirmation. /// /// The consumer should be able to sign for any of the non-HTLC inputs added to the resulting /// HTLC transaction. To sign HTLC inputs, an [`EcdsaChannelSigner`] should be re-derived @@ -216,6 +231,7 @@ pub enum BumpTransactionEvent { /// /// [`EcdsaChannelSigner`]: crate::sign::ecdsa::EcdsaChannelSigner /// [`EcdsaChannelSigner::sign_holder_htlc_transaction`]: crate::sign::ecdsa::EcdsaChannelSigner::sign_holder_htlc_transaction + /// [`htlc_descriptors`]: `BumpTransactionEvent::HTLCResolution::htlc_descriptors` HTLCResolution { /// The `channel_id` of the channel which has been closed. channel_id: ChannelId, @@ -351,6 +367,12 @@ pub trait CoinSelectionSource { /// provided, in which case a zero-value empty OP_RETURN output can be used instead. /// 3. Enough inputs must be selected/contributed for the resulting transaction (including the /// inputs and outputs noted above) to meet `target_feerate_sat_per_1000_weight`. + /// 4. The final transaction must have a weight smaller than `max_tx_weight`; if this + /// constraint can't be met, return an `Err`. In the case of counterparty-signed HTLC + /// transactions, we will remove a chunk of HTLCs and try your algorithm again. As for + /// anchor transactions, we will try your coin selection again with the same input-output + /// set when you call [`ChannelMonitor::rebroadcast_pending_claims`], as anchor transactions + /// cannot be downsized. /// /// Implementations must take note that [`Input::satisfaction_weight`] only tracks the weight of /// the input's `script_sig` and `witness`. Some wallets, like Bitcoin Core's, may require @@ -364,16 +386,18 @@ pub trait CoinSelectionSource { /// other claims, implementations must be willing to double spend their UTXOs. The choice of /// which UTXOs to double spend is left to the implementation, but it must strive to keep the /// set of other claims being double spent to a minimum. + /// + /// [`ChannelMonitor::rebroadcast_pending_claims`]: crate::chain::channelmonitor::ChannelMonitor::rebroadcast_pending_claims fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], - target_feerate_sat_per_1000_weight: u32, - ) -> AsyncResult<'a, CoinSelection>; + target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, + ) -> AsyncResult<'a, CoinSelection, ()>; /// Signs and provides the full witness for all inputs within the transaction known to the /// trait (i.e., any provided via [`CoinSelectionSource::select_confirmed_utxos`]). /// /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the /// unsigned transaction and then sign it with your wallet. - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction>; + fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()>; } /// An alternative to [`CoinSelectionSource`] that can be implemented and used along [`Wallet`] to @@ -382,17 +406,17 @@ pub trait CoinSelectionSource { /// For a synchronous version of this trait, see [`sync::WalletSourceSync`]. pub trait WalletSource { /// Returns all UTXOs, with at least 1 confirmation each, that are available to spend. - fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec>; + fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()>; /// Returns a script to use for change above dust resulting from a successful coin selection /// attempt. - fn get_change_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf>; + fn get_change_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()>; /// Signs and provides the full [`TxIn::script_sig`] and [`TxIn::witness`] for all inputs within /// the transaction known to the wallet (i.e., any provided via /// [`WalletSource::list_confirmed_utxos`]). /// /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the /// unsigned transaction and then sign it with your wallet. - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction>; + fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()>; } /// A wrapper over [`WalletSource`] that implements [`CoinSelection`] by preferring UTXOs that would @@ -436,7 +460,18 @@ where &self, utxos: &[Utxo], claim_id: ClaimId, force_conflicting_utxo_spend: bool, tolerate_high_network_feerates: bool, target_feerate_sat_per_1000_weight: u32, preexisting_tx_weight: u64, input_amount_sat: Amount, target_amount_sat: Amount, + max_tx_weight: u64, ) -> Result { + // P2WSH and P2TR outputs are both the heaviest-weight standard outputs at 34 bytes + let max_coin_selection_weight = max_tx_weight + .checked_sub(preexisting_tx_weight + P2WSH_TXOUT_WEIGHT) + .ok_or_else(|| { + log_debug!( + self.logger, + "max_tx_weight is too small to accommodate the preexisting tx weight plus a P2WSH/P2TR output" + ); + })?; + let mut selected_amount; let mut total_fees; let mut selected_utxos; @@ -476,31 +511,67 @@ where } }) .collect::>(); - eligible_utxos.sort_unstable_by_key(|(utxo, _)| utxo.output.value); + eligible_utxos.sort_unstable_by_key(|(utxo, fee_to_spend_utxo)| { + utxo.output.value - *fee_to_spend_utxo + }); selected_amount = input_amount_sat; total_fees = Amount::from_sat(fee_for_weight( target_feerate_sat_per_1000_weight, preexisting_tx_weight, )); - selected_utxos = Vec::new(); + selected_utxos = VecDeque::new(); + // Invariant: `selected_utxos_weight` is never greater than `max_coin_selection_weight` + let mut selected_utxos_weight = 0; for (utxo, fee_to_spend_utxo) in eligible_utxos { if selected_amount >= target_amount_sat + total_fees { break; } + // First skip any UTXOs with prohibitive satisfaction weights + if BASE_INPUT_WEIGHT + utxo.satisfaction_weight > max_coin_selection_weight { + continue; + } + // If adding this UTXO to `selected_utxos` would push us over the + // `max_coin_selection_weight`, remove UTXOs from the front to make room + // for this new UTXO. + while selected_utxos_weight + BASE_INPUT_WEIGHT + utxo.satisfaction_weight + > max_coin_selection_weight + && !selected_utxos.is_empty() + { + let (smallest_value_after_spend_utxo, fee_to_spend_utxo): (Utxo, Amount) = + selected_utxos.pop_front().unwrap(); + selected_amount -= smallest_value_after_spend_utxo.output.value; + total_fees -= fee_to_spend_utxo; + selected_utxos_weight -= + BASE_INPUT_WEIGHT + smallest_value_after_spend_utxo.satisfaction_weight; + } selected_amount += utxo.output.value; total_fees += fee_to_spend_utxo; - selected_utxos.push(utxo.clone()); + selected_utxos_weight += BASE_INPUT_WEIGHT + utxo.satisfaction_weight; + selected_utxos.push_back((utxo.clone(), fee_to_spend_utxo)); } if selected_amount < target_amount_sat + total_fees { log_debug!( self.logger, - "Insufficient funds to meet target feerate {} sat/kW", - target_feerate_sat_per_1000_weight + "Insufficient funds to meet target feerate {} sat/kW while remaining under {} WU", + target_feerate_sat_per_1000_weight, + max_coin_selection_weight, ); return Err(()); } - for utxo in &selected_utxos { + // Once we've selected enough UTXOs to cover `target_amount_sat + total_fees`, + // we may be able to remove some small-value ones while still covering + // `target_amount_sat + total_fees`. + while !selected_utxos.is_empty() + && selected_amount - selected_utxos.front().unwrap().0.output.value + >= target_amount_sat + total_fees - selected_utxos.front().unwrap().1 + { + let (smallest_value_after_spend_utxo, fee_to_spend_utxo) = + selected_utxos.pop_front().unwrap(); + selected_amount -= smallest_value_after_spend_utxo.output.value; + total_fees -= fee_to_spend_utxo; + } + for (utxo, _) in &selected_utxos { locked_utxos.insert(utxo.outpoint, claim_id); } } @@ -521,7 +592,10 @@ where Some(TxOut { script_pubkey: change_script, value: change_output_amount }) }; - Ok(CoinSelection { confirmed_utxos: selected_utxos, change_output }) + Ok(CoinSelection { + confirmed_utxos: selected_utxos.into_iter().map(|(utxo, _)| utxo).collect(), + change_output, + }) } } @@ -533,12 +607,11 @@ where { fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], - target_feerate_sat_per_1000_weight: u32, - ) -> AsyncResult<'a, CoinSelection> { + target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, + ) -> AsyncResult<'a, CoinSelection, ()> { Box::pin(async move { let utxos = self.source.list_confirmed_utxos().await?; // TODO: Use fee estimation utils when we upgrade to bitcoin v0.30.0. - const BASE_TX_SIZE: u64 = 4 /* version */ + 1 /* input count */ + 1 /* output count */ + 4 /* locktime */; let total_output_size: u64 = must_pay_to .iter() .map( @@ -550,8 +623,9 @@ where let total_input_weight = (BASE_INPUT_WEIGHT * must_spend.len() as u64) + total_satisfaction_weight; - let preexisting_tx_weight = 2 /* segwit marker & flag */ + total_input_weight + - ((BASE_TX_SIZE + total_output_size) * WITNESS_SCALE_FACTOR as u64); + let preexisting_tx_weight = SEGWIT_MARKER_FLAG_WEIGHT + + total_input_weight + + ((BASE_TX_SIZE + total_output_size) * WITNESS_SCALE_FACTOR as u64); let input_amount_sat = must_spend.iter().map(|input| input.previous_utxo.value).sum(); let target_amount_sat = must_pay_to.iter().map(|output| output.value).sum(); @@ -574,6 +648,7 @@ where preexisting_tx_weight, input_amount_sat, target_amount_sat, + max_tx_weight, ) .await; if attempt.is_ok() { @@ -584,7 +659,7 @@ where }) } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction> { + fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { self.source.sign_psbt(psbt) } } @@ -676,7 +751,7 @@ where .transaction_parameters .channel_type_features; let anchor_input_witness_weight = if channel_type.supports_anchor_zero_fee_commitments() { - P2A_ANCHOR_INPUT_WITNESS_WEIGHT + EMPTY_WITNESS_WEIGHT } else { ANCHOR_INPUT_WITNESS_WEIGHT }; @@ -699,9 +774,10 @@ where // the anchor input. let mut anchor_utxo = anchor_descriptor.previous_utxo(); let commitment_tx_fee_sat = Amount::from_sat(commitment_tx_fee_sat); + let commitment_tx_weight = commitment_tx.weight().to_wu(); anchor_utxo.value += commitment_tx_fee_sat; let starting_package_and_fixed_input_satisfaction_weight = - commitment_tx.weight().to_wu() + anchor_input_witness_weight + EMPTY_SCRIPT_SIG_WEIGHT; + commitment_tx_weight + anchor_input_witness_weight + EMPTY_SCRIPT_SIG_WEIGHT; let mut package_and_fixed_input_satisfaction_weight = starting_package_and_fixed_input_satisfaction_weight; @@ -723,6 +799,14 @@ where must_spend, &[], package_target_feerate_sat_per_1000_weight, + if channel_type.supports_anchor_zero_fee_commitments() { + TRUC_CHILD_MAX_WEIGHT + } else { + MAX_STANDARD_TX_WEIGHT as u64 + } + // We added the commitment tx weight to the input satisfaction weight above, so + // increase the max_tx_weight by the same delta here. + + commitment_tx_weight, ) .await?; @@ -833,6 +917,15 @@ where assert!(package_fee >= expected_package_fee); } + #[cfg(debug_assertions)] + if channel_type.supports_anchor_zero_fee_commitments() { + assert!(commitment_tx.weight().to_wu() < TRUC_MAX_WEIGHT); + assert!(anchor_tx.weight().to_wu() < TRUC_CHILD_MAX_WEIGHT); + } else { + assert!(commitment_tx.weight().to_wu() < MAX_STANDARD_TX_WEIGHT as u64); + assert!(anchor_tx.weight().to_wu() < MAX_STANDARD_TX_WEIGHT as u64); + } + log_info!( self.logger, "Broadcasting anchor transaction {} to bump channel close with txid {}", @@ -854,17 +947,6 @@ where .channel_derivation_parameters .transaction_parameters .channel_type_features; - let mut htlc_tx = Transaction { - version: if channel_type.supports_anchor_zero_fee_commitments() { - Version::non_standard(3) - } else { - Version::TWO - }, - lock_time: tx_lock_time, - input: vec![], - output: vec![], - }; - let mut must_spend = Vec::with_capacity(htlc_descriptors.len()); let (htlc_success_witness_weight, htlc_timeout_witness_weight) = if channel_type.supports_anchor_zero_fee_commitments() { ( @@ -879,123 +961,214 @@ where } else { panic!("channel type should be either zero-fee HTLCs, or zero-fee commitments"); }; - for htlc_descriptor in htlc_descriptors { - let htlc_input = htlc_descriptor.unsigned_tx_input(); - must_spend.push(Input { - outpoint: htlc_input.previous_output.clone(), - previous_utxo: htlc_descriptor.previous_utxo(&self.secp), - satisfaction_weight: EMPTY_SCRIPT_SIG_WEIGHT - + if htlc_descriptor.preimage.is_some() { - htlc_success_witness_weight - } else { - htlc_timeout_witness_weight - }, - }); - htlc_tx.input.push(htlc_input); - let htlc_output = htlc_descriptor.tx_output(&self.secp); - htlc_tx.output.push(htlc_output); - } - log_debug!( - self.logger, - "Performing coin selection for HTLC transaction targeting {} sat/kW", - target_feerate_sat_per_1000_weight - ); + let max_tx_weight = if channel_type.supports_anchor_zero_fee_commitments() { + // Cap the size of transactions claiming `HolderHTLCOutput` in 0FC channels. + // Otherwise, we could hit the max 10_000vB size limit on V3 transactions + // (BIP 431 rule 4). + TRUC_MAX_WEIGHT + } else { + // We should never hit this because HTLC-timeout transactions have a signed + // locktime, HTLC-success transactions do not, and we never aggregate + // packages with a signed locktime with packages that do not have a signed + // locktime. + // Hence in the worst case, we aggregate 483 success HTLC transactions, + // and 483 * 705 ~= 341_000, and 341_000 < 400_000. + MAX_STANDARD_TX_WEIGHT as u64 + }; + // A 1-input 1-output transaction, both p2wpkh is 438 WU. + // This is just an initial budget, we increase it further below in case the user can't satisfy it. + const USER_COINS_WEIGHT_BUDGET: u64 = 1000; + + let mut broadcasted_htlcs = 0; + let mut batch_size = htlc_descriptors.len() - broadcasted_htlcs; + let mut utxo_id = claim_id; + + while broadcasted_htlcs < htlc_descriptors.len() { + let mut htlc_tx = Transaction { + version: if channel_type.supports_anchor_zero_fee_commitments() { + Version::non_standard(3) + } else { + Version::TWO + }, + lock_time: tx_lock_time, + input: vec![], + output: vec![], + }; + let mut must_spend = Vec::with_capacity(htlc_descriptors.len() - broadcasted_htlcs); + let mut htlc_weight_sum = 0; + for htlc_descriptor in + &htlc_descriptors[broadcasted_htlcs..broadcasted_htlcs + batch_size] + { + let input_output_weight = if htlc_descriptor.preimage.is_some() { + chan_utils::aggregated_htlc_success_input_output_pair_weight(channel_type) + } else { + chan_utils::aggregated_htlc_timeout_input_output_pair_weight(channel_type) + }; + if htlc_weight_sum + input_output_weight >= max_tx_weight - USER_COINS_WEIGHT_BUDGET + { + break; + } + htlc_weight_sum += input_output_weight; + let htlc_input = htlc_descriptor.unsigned_tx_input(); + must_spend.push(Input { + outpoint: htlc_input.previous_output.clone(), + previous_utxo: htlc_descriptor.previous_utxo(&self.secp), + satisfaction_weight: EMPTY_SCRIPT_SIG_WEIGHT + + if htlc_descriptor.preimage.is_some() { + htlc_success_witness_weight + } else { + htlc_timeout_witness_weight + }, + }); + htlc_tx.input.push(htlc_input); + let htlc_output = htlc_descriptor.tx_output(&self.secp); + htlc_tx.output.push(htlc_output); + } + batch_size = htlc_tx.input.len(); + let selected_htlcs = + &htlc_descriptors[broadcasted_htlcs..broadcasted_htlcs + batch_size]; - #[cfg(debug_assertions)] - let must_spend_satisfaction_weight = - must_spend.iter().map(|input| input.satisfaction_weight).sum::(); - #[cfg(debug_assertions)] - let must_spend_amount = - must_spend.iter().map(|input| input.previous_utxo.value.to_sat()).sum::(); + log_info!( + self.logger, + "Batch transaction assigned to UTXO id {} contains {} HTLCs: {}", + log_bytes!(utxo_id.0), + batch_size, + log_iter!(selected_htlcs.iter().map(|d| d.outpoint())) + ); - let coin_selection: CoinSelection = self - .utxo_source - .select_confirmed_utxos( - claim_id, - must_spend, - &htlc_tx.output, - target_feerate_sat_per_1000_weight, - ) - .await?; - - #[cfg(debug_assertions)] - let input_satisfaction_weight: u64 = - coin_selection.confirmed_utxos.iter().map(|utxo| utxo.satisfaction_weight).sum(); - #[cfg(debug_assertions)] - let total_satisfaction_weight = must_spend_satisfaction_weight + input_satisfaction_weight; - #[cfg(debug_assertions)] - let input_value: u64 = - coin_selection.confirmed_utxos.iter().map(|utxo| utxo.output.value.to_sat()).sum(); - #[cfg(debug_assertions)] - let total_input_amount = must_spend_amount + input_value; - - self.process_coin_selection(&mut htlc_tx, &coin_selection); - - // construct psbt - let mut htlc_psbt = Psbt::from_unsigned_tx(htlc_tx).unwrap(); - // add witness_utxo to htlc inputs - for (i, htlc_descriptor) in htlc_descriptors.iter().enumerate() { - debug_assert_eq!( - htlc_psbt.unsigned_tx.input[i].previous_output, - htlc_descriptor.outpoint() + log_debug!( + self.logger, + "Performing coin selection for HTLC transaction targeting {} sat/kW", + target_feerate_sat_per_1000_weight ); - htlc_psbt.inputs[i].witness_utxo = Some(htlc_descriptor.previous_utxo(&self.secp)); - } - // add witness_utxo to remaining inputs - for (idx, utxo) in coin_selection.confirmed_utxos.into_iter().enumerate() { - // offset to skip the htlc inputs - let index = idx + htlc_descriptors.len(); - debug_assert_eq!(htlc_psbt.unsigned_tx.input[index].previous_output, utxo.outpoint); - if utxo.output.script_pubkey.is_witness_program() { - htlc_psbt.inputs[index].witness_utxo = Some(utxo.output); + + #[cfg(debug_assertions)] + let must_spend_satisfaction_weight = + must_spend.iter().map(|input| input.satisfaction_weight).sum::(); + #[cfg(debug_assertions)] + let must_spend_amount = + must_spend.iter().map(|input| input.previous_utxo.value.to_sat()).sum::(); + + let coin_selection: CoinSelection = match self + .utxo_source + .select_confirmed_utxos( + utxo_id, + must_spend, + &htlc_tx.output, + target_feerate_sat_per_1000_weight, + max_tx_weight, + ) + .await + { + Ok(selection) => selection, + Err(()) => { + let pair_weight = + chan_utils::aggregated_htlc_timeout_input_output_pair_weight(channel_type); + let htlcs_to_remove = + (USER_COINS_WEIGHT_BUDGET + pair_weight - 1) / pair_weight; + batch_size = batch_size.checked_sub(htlcs_to_remove as usize).ok_or(())?; + if batch_size == 0 { + return Err(()); + } + continue; + }, + }; + broadcasted_htlcs += batch_size; + batch_size = htlc_descriptors.len() - broadcasted_htlcs; + utxo_id = claim_id.step_with_bytes(&broadcasted_htlcs.to_be_bytes()); + + #[cfg(debug_assertions)] + let input_satisfaction_weight: u64 = + coin_selection.confirmed_utxos.iter().map(|utxo| utxo.satisfaction_weight).sum(); + #[cfg(debug_assertions)] + let total_satisfaction_weight = must_spend_satisfaction_weight + input_satisfaction_weight; + #[cfg(debug_assertions)] + let input_value: u64 = + coin_selection.confirmed_utxos.iter().map(|utxo| utxo.output.value.to_sat()).sum(); + #[cfg(debug_assertions)] + let total_input_amount = must_spend_amount + input_value; + + self.process_coin_selection(&mut htlc_tx, &coin_selection); + + // construct psbt + let mut htlc_psbt = Psbt::from_unsigned_tx(htlc_tx).unwrap(); + // add witness_utxo to htlc inputs + for (i, htlc_descriptor) in selected_htlcs.iter().enumerate() { + debug_assert_eq!( + htlc_psbt.unsigned_tx.input[i].previous_output, + htlc_descriptor.outpoint() + ); + htlc_psbt.inputs[i].witness_utxo = Some(htlc_descriptor.previous_utxo(&self.secp)); } - } - #[cfg(debug_assertions)] - let unsigned_tx_weight = htlc_psbt.unsigned_tx.weight().to_wu() - - (htlc_psbt.unsigned_tx.input.len() as u64 * EMPTY_SCRIPT_SIG_WEIGHT); + // add witness_utxo to remaining inputs + for (idx, utxo) in coin_selection.confirmed_utxos.into_iter().enumerate() { + // offset to skip the htlc inputs + let index = idx + selected_htlcs.len(); + debug_assert_eq!(htlc_psbt.unsigned_tx.input[index].previous_output, utxo.outpoint); + if utxo.output.script_pubkey.is_witness_program() { + htlc_psbt.inputs[index].witness_utxo = Some(utxo.output); + } + } - log_debug!( - self.logger, - "Signing HTLC transaction {}", - htlc_psbt.unsigned_tx.compute_txid() - ); - htlc_tx = self.utxo_source.sign_psbt(htlc_psbt).await?; - - let mut signers = BTreeMap::new(); - for (idx, htlc_descriptor) in htlc_descriptors.iter().enumerate() { - let keys_id = htlc_descriptor.channel_derivation_parameters.keys_id; - let signer = signers - .entry(keys_id) - .or_insert_with(|| self.signer_provider.derive_channel_signer(keys_id)); - let htlc_sig = - signer.sign_holder_htlc_transaction(&htlc_tx, idx, htlc_descriptor, &self.secp)?; - let witness_script = htlc_descriptor.witness_script(&self.secp); - htlc_tx.input[idx].witness = - htlc_descriptor.tx_input_witness(&htlc_sig, &witness_script); - } + #[cfg(debug_assertions)] + let unsigned_tx_weight = htlc_psbt.unsigned_tx.weight().to_wu() + - (htlc_psbt.unsigned_tx.input.len() as u64 * EMPTY_SCRIPT_SIG_WEIGHT); - #[cfg(debug_assertions)] - { - let signed_tx_weight = htlc_tx.weight().to_wu(); - let expected_signed_tx_weight = unsigned_tx_weight + total_satisfaction_weight; - // Our estimate should be within a 1% error margin of the actual weight and we should - // never underestimate. - assert!(expected_signed_tx_weight >= signed_tx_weight); - assert!(expected_signed_tx_weight * 99 / 100 <= signed_tx_weight); - - let expected_signed_tx_fee = - fee_for_weight(target_feerate_sat_per_1000_weight, signed_tx_weight); - let signed_tx_fee = total_input_amount - - htlc_tx.output.iter().map(|output| output.value.to_sat()).sum::(); - // Our feerate should always be at least what we were seeking. It may overshoot if - // the coin selector burned funds to an OP_RETURN without a change output. - assert!(signed_tx_fee >= expected_signed_tx_fee); + log_debug!( + self.logger, + "Signing HTLC transaction {}", + htlc_psbt.unsigned_tx.compute_txid() + ); + htlc_tx = self.utxo_source.sign_psbt(htlc_psbt).await?; + + let mut signers = BTreeMap::new(); + for (idx, htlc_descriptor) in selected_htlcs.iter().enumerate() { + let keys_id = htlc_descriptor.channel_derivation_parameters.keys_id; + let signer = signers + .entry(keys_id) + .or_insert_with(|| self.signer_provider.derive_channel_signer(keys_id)); + let htlc_sig = signer.sign_holder_htlc_transaction( + &htlc_tx, + idx, + htlc_descriptor, + &self.secp, + )?; + let witness_script = htlc_descriptor.witness_script(&self.secp); + htlc_tx.input[idx].witness = + htlc_descriptor.tx_input_witness(&htlc_sig, &witness_script); + } + + #[cfg(debug_assertions)] + { + let signed_tx_weight = htlc_tx.weight().to_wu(); + let expected_signed_tx_weight = unsigned_tx_weight + total_satisfaction_weight; + // Our estimate should be within a 2% error margin of the actual weight and we should + // never underestimate. + assert!(expected_signed_tx_weight >= signed_tx_weight); + assert!(expected_signed_tx_weight * 98 / 100 <= signed_tx_weight); + + let expected_signed_tx_fee = + fee_for_weight(target_feerate_sat_per_1000_weight, signed_tx_weight); + let signed_tx_fee = total_input_amount + - htlc_tx.output.iter().map(|output| output.value.to_sat()).sum::(); + // Our feerate should always be at least what we were seeking. It may overshoot if + // the coin selector burned funds to an OP_RETURN without a change output. + assert!(signed_tx_fee >= expected_signed_tx_fee); + } + + #[cfg(debug_assertions)] + if channel_type.supports_anchor_zero_fee_commitments() { + assert!(htlc_tx.weight().to_wu() < TRUC_MAX_WEIGHT); + } else { + assert!(htlc_tx.weight().to_wu() < MAX_STANDARD_TX_WEIGHT as u64); + } + + log_info!(self.logger, "Broadcasting {}", log_tx!(htlc_tx)); + self.broadcaster.broadcast_transactions(&[&htlc_tx]); } - log_info!(self.logger, "Broadcasting {}", log_tx!(htlc_tx)); - self.broadcaster.broadcast_transactions(&[&htlc_tx]); Ok(()) } @@ -1090,7 +1263,7 @@ mod tests { impl CoinSelectionSourceSync for TestCoinSelectionSource { fn select_confirmed_utxos( &self, _claim_id: ClaimId, must_spend: Vec, _must_pay_to: &[TxOut], - target_feerate_sat_per_1000_weight: u32, + target_feerate_sat_per_1000_weight: u32, _max_tx_weight: u64, ) -> Result { let mut expected_selects = self.expected_selects.lock().unwrap(); let (weight, value, feerate, res) = expected_selects.remove(0); @@ -1135,6 +1308,7 @@ mod tests { let commitment_tx_bytes = Vec::::from_hex("02000000000101cc6b0a9dd84b52c07340fff6fab002fc37b4bdccfdce9f39c5ec8391a56b652907000000009b948b80044a01000000000000220020b4182433fdfdfbf894897c98f84d92cec815cee222755ffd000ae091c9dadc2d4a01000000000000220020f83f7dbf90e2de325b5bb6bab0ae370151278c6964739242b2e7ce0cb68a5d81cb4a02000000000022002024add256b3dccee772610caef82a601045ab6f98fd6d5df608cc756b891ccfe63ffa490000000000220020894bf32b37906a643625e87131897c3714c71b3ac9b161862c9aa6c8d468b4c70400473044022060abd347bff2cca0212b660e6addff792b3356bd4a1b5b26672dc2e694c3c5f002202b40b7e346b494a7b1d048b4ec33ba99c90a09ab48eb1df64ccdc768066c865c014730440220554d8361e04dc0ee178dcb23d2d23f53ec7a1ae4312a5be76bd9e83ab8981f3d0220501f23ffb18cb81ccea72d30252f88d5e69fd28ba4992803d03c00d06fa8899e0147522102817f6ce189ab7114f89e8d5df58cdbbaf272dc8e71b92982d47456a0b6a0ceee2102c9b4d2f24aca54f65e13f4c83e2a8d8e877e12d3c71a76e81f28a5cabc652aa352ae626c7620").unwrap(); let commitment_tx: Transaction = Readable::read(&mut Cursor::new(&commitment_tx_bytes)).unwrap(); + let commitment_txid = commitment_tx.compute_txid(); let total_commitment_weight = commitment_tx.weight().to_wu() + ANCHOR_INPUT_WITNESS_WEIGHT + EMPTY_SCRIPT_SIG_WEIGHT; let commitment_and_anchor_fee = 930 + 330; @@ -1191,7 +1365,7 @@ mod tests { keys_id: [42; 32], transaction_parameters, }, - outpoint: OutPoint { txid: Txid::from_byte_array([42; 32]), vout: 0 }, + outpoint: OutPoint { txid: commitment_txid, vout: 0 }, value: Amount::from_sat(ANCHOR_OUTPUT_VALUE_SATOSHI), }, pending_htlcs: Vec::new(), diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index 7d407f4bb8d..c987b871868 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -58,17 +58,17 @@ impl WalletSource for WalletSourceSyncWrapper where T::Target: WalletSourceSync, { - fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec> { + fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()> { let utxos = self.0.list_confirmed_utxos(); Box::pin(async move { utxos }) } - fn get_change_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf> { + fn get_change_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()> { let script = self.0.get_change_script(); Box::pin(async move { script }) } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction> { + fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { let signed_psbt = self.0.sign_psbt(psbt); Box::pin(async move { signed_psbt }) } @@ -102,13 +102,14 @@ where { fn select_confirmed_utxos( &self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &[TxOut], - target_feerate_sat_per_1000_weight: u32, + target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, ) -> Result { let mut fut = self.wallet.select_confirmed_utxos( claim_id, must_spend, must_pay_to, target_feerate_sat_per_1000_weight, + max_tx_weight, ); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -140,7 +141,7 @@ pub trait CoinSelectionSourceSync { /// A synchronous version of [`CoinSelectionSource::select_confirmed_utxos`]. fn select_confirmed_utxos( &self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &[TxOut], - target_feerate_sat_per_1000_weight: u32, + target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, ) -> Result; /// A synchronous version of [`CoinSelectionSource::sign_psbt`]. @@ -169,18 +170,19 @@ where { fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], - target_feerate_sat_per_1000_weight: u32, - ) -> AsyncResult<'a, CoinSelection> { + target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, + ) -> AsyncResult<'a, CoinSelection, ()> { let coins = self.0.select_confirmed_utxos( claim_id, must_spend, must_pay_to, target_feerate_sat_per_1000_weight, + max_tx_weight, ); Box::pin(async move { coins }) } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction> { + fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { let psbt = self.0.sign_psbt(psbt); Box::pin(async move { psbt }) } diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index d8a1a181b99..1b9850c442e 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -1669,8 +1669,10 @@ pub enum Event { /// Indicates that a transaction originating from LDK needs to have its fee bumped. This event /// requires confirmed external funds to be readily available to spend. /// - /// LDK does not currently generate this event unless the - /// [`ChannelHandshakeConfig::negotiate_anchors_zero_fee_htlc_tx`] config flag is set to true. + /// LDK does not currently generate this event unless either the + /// [`ChannelHandshakeConfig::negotiate_anchors_zero_fee_htlc_tx`] or the + /// [`ChannelHandshakeConfig::negotiate_anchor_zero_fee_commitments`] config flags are set to + /// true. /// It is limited to the scope of channels with anchor outputs. /// /// # Failure Behavior and Persistence @@ -1678,6 +1680,7 @@ pub enum Event { /// returning `Err(ReplayEvent ())`), but will only be regenerated as needed after restarts. /// /// [`ChannelHandshakeConfig::negotiate_anchors_zero_fee_htlc_tx`]: crate::util::config::ChannelHandshakeConfig::negotiate_anchors_zero_fee_htlc_tx + /// [`ChannelHandshakeConfig::negotiate_anchor_zero_fee_commitments`]: crate::util::config::ChannelHandshakeConfig::negotiate_anchor_zero_fee_commitments BumpTransaction(BumpTransactionEvent), /// We received an onion message that is intended to be forwarded to a peer /// that is currently offline. This event will only be generated if the diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index de5103aeba9..71821081094 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -596,7 +596,7 @@ fn do_test_async_raa_peer_disconnect( } // Expect the RAA - let (_, revoke_and_ack, commitment_signed, resend_order, _, _) = + let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _) = handle_chan_reestablish_msgs!(dst, src); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { assert!(revoke_and_ack.is_none()); @@ -612,14 +612,14 @@ fn do_test_async_raa_peer_disconnect( dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { - let (_, revoke_and_ack, commitment_signed, resend_order, _, _) = + let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_some()); assert!(commitment_signed.is_some()); assert!(resend_order == RAACommitmentOrder::RevokeAndACKFirst); } else { // Make sure we don't double send the RAA. - let (_, revoke_and_ack, commitment_signed, _, _, _) = + let (_, revoke_and_ack, commitment_signed, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_none()); assert!(commitment_signed.is_none()); @@ -746,7 +746,8 @@ fn do_test_async_commitment_signature_peer_disconnect( } // Expect the RAA - let (_, revoke_and_ack, commitment_signed, _, _, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, revoke_and_ack, commitment_signed, _, _, _, _) = + handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_some()); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { assert!(commitment_signed.is_none()); @@ -759,11 +760,11 @@ fn do_test_async_commitment_signature_peer_disconnect( dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { - let (_, _, commitment_signed, _, _, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, _, commitment_signed, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(commitment_signed.is_some()); } else { // Make sure we don't double send the CS. - let (_, _, commitment_signed, _, _, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, _, commitment_signed, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(commitment_signed.is_none()); } } @@ -880,6 +881,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.2.is_none()); assert!(as_resp.4.is_none()); assert!(as_resp.5.is_none()); + assert!(as_resp.6.is_none()); if monitor_update_failure { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -901,6 +903,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.2.is_none()); assert!(as_resp.4.is_none()); assert!(as_resp.5.is_none()); + assert!(as_resp.6.is_none()); nodes[0].enable_channel_signer_op(&node_b_id, &chan_id, SignerOp::SignCounterpartyCommitment); nodes[0].node.signer_unblocked(Some((node_b_id, chan_id))); @@ -923,6 +926,9 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.5.is_none()); assert!(bs_resp.5.is_none()); + assert!(as_resp.6.is_none()); + assert!(bs_resp.6.is_none()); + // Now that everything is restored, get the CS + RAA and handle them. nodes[1] .node diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index 72cb34657fb..51bfc95e7e9 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -11,6 +11,7 @@ //! largely of interest for those implementing the traits on [`crate::sign`] by hand. use bitcoin::amount::Amount; +use bitcoin::constants::WITNESS_SCALE_FACTOR; use bitcoin::opcodes; use bitcoin::script::{Builder, Script, ScriptBuf}; use bitcoin::sighash; @@ -89,12 +90,18 @@ pub const ANCHOR_INPUT_WITNESS_WEIGHT: u64 = 114; #[cfg(not(feature = "grind_signatures"))] pub const ANCHOR_INPUT_WITNESS_WEIGHT: u64 = 115; -/// The weight of a P2A anchor witness. -pub const P2A_ANCHOR_INPUT_WITNESS_WEIGHT: u64 = 1; +/// The weight of an empty witness; used to spend a P2A output. +pub const EMPTY_WITNESS_WEIGHT: u64 = 1; /// The maximum value of a P2A anchor. pub const P2A_MAX_VALUE: u64 = 240; +/// The maximum weight of a TRUC transaction, see BIP431. +pub const TRUC_MAX_WEIGHT: u64 = 10_000 * WITNESS_SCALE_FACTOR as u64; + +/// The maximum weight of a TRUC transaction with an unconfirmed TRUC ancestor, see BIP431. +pub const TRUC_CHILD_MAX_WEIGHT: u64 = 1000 * WITNESS_SCALE_FACTOR as u64; + /// The upper bound weight of an HTLC timeout input from a commitment transaction with keyed anchor outputs. pub const HTLC_TIMEOUT_INPUT_KEYED_ANCHOR_WITNESS_WEIGHT: u64 = 288; /// The upper bound weight of an HTLC timeout input from a commitment transaction with a p2a anchor output. @@ -125,6 +132,15 @@ pub const FUNDING_TRANSACTION_WITNESS_WEIGHT: u64 = 1 + // number_of_witness_ele 1 + // witness_script_length MULTISIG_SCRIPT_SIZE; +pub(crate) const BASE_TX_SIZE: u64 = 4 /* version */ + 1 /* input count */ + 1 /* output count */ + 4 /* locktime */; +pub(crate) const SEGWIT_MARKER_FLAG_WEIGHT: u64 = 2; +pub(crate) const EMPTY_SCRIPT_SIG_WEIGHT: u64 = + 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64; +pub(crate) const BASE_INPUT_SIZE: u64 = 32 /* txid */ + 4 /* vout */ + 4 /* sequence */; +pub(crate) const BASE_INPUT_WEIGHT: u64 = BASE_INPUT_SIZE * WITNESS_SCALE_FACTOR as u64; +pub(crate) const P2WSH_TXOUT_WEIGHT: u64 = + (8 /* value */ + 1 /* var_int */ + 34/* p2wsh spk */) * WITNESS_SCALE_FACTOR as u64; + /// Gets the weight for an HTLC-Success transaction. #[inline] #[rustfmt::skip] @@ -134,6 +150,18 @@ pub fn htlc_success_tx_weight(channel_type_features: &ChannelTypeFeatures) -> u6 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { HTLC_SUCCESS_ANCHOR_TX_WEIGHT } else { HTLC_SUCCESS_TX_WEIGHT } } +/// Gets the weight of a single input-output pair in externally funded HTLC-success transactions +pub fn aggregated_htlc_success_input_output_pair_weight( + channel_type_features: &ChannelTypeFeatures, +) -> u64 { + let satisfaction_weight = if channel_type_features.supports_anchors_zero_fee_htlc_tx() { + EMPTY_SCRIPT_SIG_WEIGHT + HTLC_SUCCESS_INPUT_KEYED_ANCHOR_WITNESS_WEIGHT + } else { + EMPTY_SCRIPT_SIG_WEIGHT + HTLC_SUCCESS_INPUT_P2A_ANCHOR_WITNESS_WEIGHT + }; + BASE_INPUT_WEIGHT + P2WSH_TXOUT_WEIGHT + satisfaction_weight +} + /// Gets the weight for an HTLC-Timeout transaction. #[inline] #[rustfmt::skip] @@ -143,6 +171,18 @@ pub fn htlc_timeout_tx_weight(channel_type_features: &ChannelTypeFeatures) -> u6 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { HTLC_TIMEOUT_ANCHOR_TX_WEIGHT } else { HTLC_TIMEOUT_TX_WEIGHT } } +/// Gets the weight of a single input-output pair in externally funded HTLC-timeout transactions +pub fn aggregated_htlc_timeout_input_output_pair_weight( + channel_type_features: &ChannelTypeFeatures, +) -> u64 { + let satisfaction_weight = if channel_type_features.supports_anchors_zero_fee_htlc_tx() { + EMPTY_SCRIPT_SIG_WEIGHT + HTLC_TIMEOUT_INPUT_KEYED_ANCHOR_WITNESS_WEIGHT + } else { + EMPTY_SCRIPT_SIG_WEIGHT + HTLC_TIMEOUT_INPUT_P2A_ANCHOR_WITNESS_WEIGHT + }; + BASE_INPUT_WEIGHT + P2WSH_TXOUT_WEIGHT + satisfaction_weight +} + /// Describes the type of HTLC claim as determined by analyzing the witness. #[derive(PartialEq, Eq)] pub enum HTLCClaim { @@ -1018,11 +1058,11 @@ pub struct ChannelTransactionParameters { /// If a channel was funded with transaction A, and later spliced with transaction B, this field /// tracks the txid of transaction A. /// - /// See [`compute_funding_key_tweak`] and [`ChannelSigner::new_pubkeys`] for more context on how - /// this may be used. + /// See [`compute_funding_key_tweak`] and [`ChannelSigner::new_funding_pubkey`] for more context + /// on how this may be used. /// /// [`compute_funding_key_tweak`]: crate::sign::compute_funding_key_tweak - /// [`ChannelSigner::new_pubkeys`]: crate::sign::ChannelSigner::new_pubkeys + /// [`ChannelSigner::new_funding_pubkey`]: crate::sign::ChannelSigner::new_funding_pubkey pub splice_parent_funding_txid: Option, /// This channel's type, as negotiated during channel open. For old objects where this field /// wasn't serialized, it will default to static_remote_key at deserialization. @@ -2245,8 +2285,8 @@ mod tests { let counterparty_signer = keys_provider.derive_channel_signer(keys_provider.generate_channel_keys_id(true, 1)); let per_commitment_secret = SecretKey::from_slice(&>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap(); let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret); - let holder_pubkeys = signer.new_pubkeys(None, &secp_ctx); - let counterparty_pubkeys = counterparty_signer.new_pubkeys(None, &secp_ctx).clone(); + let holder_pubkeys = signer.pubkeys(&secp_ctx); + let counterparty_pubkeys = counterparty_signer.pubkeys(&secp_ctx).clone(); let channel_parameters = ChannelTransactionParameters { holder_pubkeys: holder_pubkeys.clone(), holder_selected_contest_delay: 0, diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 81166d3d962..3c901d5e073 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -36,7 +36,6 @@ use crate::chain::channelmonitor::{ }; use crate::chain::transaction::{OutPoint, TransactionData}; use crate::chain::BestBlock; -use crate::events::bump_transaction::{BASE_INPUT_WEIGHT, EMPTY_SCRIPT_SIG_WEIGHT}; use crate::events::{ClosureReason, FundingInfo}; use crate::ln::chan_utils; use crate::ln::chan_utils::{ @@ -44,7 +43,7 @@ use crate::ln::chan_utils::{ selected_commitment_sat_per_1000_weight, ChannelPublicKeys, ChannelTransactionParameters, ClosingTransaction, CommitmentTransaction, CounterpartyChannelTransactionParameters, CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HolderCommitmentTransaction, - FUNDING_TRANSACTION_WITNESS_WEIGHT, + BASE_INPUT_WEIGHT, EMPTY_SCRIPT_SIG_WEIGHT, FUNDING_TRANSACTION_WITNESS_WEIGHT, }; use crate::ln::channel_state::{ ChannelShutdownState, CounterpartyForwardingInfo, InboundHTLCDetails, InboundHTLCStateDetails, @@ -1373,7 +1372,28 @@ pub(crate) const COINBASE_MATURITY: u32 = 100; /// The number of blocks to wait for a channel_announcement to propagate such that payments using an /// older SCID can still be relayed. Once the spend of the previous funding transaction has reached /// this number of confirmations, the corresponding SCID will be forgotten. -const CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY: u32 = 144; +/// +/// Because HTLCs added prior to 0.1 which were waiting to be failed may reference a channel's +/// pre-splice SCID, we need to ensure this is at least the maximum number of blocks before an HTLC +/// gets failed-back due to a time-out. Luckily, in LDK prior to 0.2, this is enforced directly +/// when checking the incoming HTLC, and compared against `CLTV_FAR_FAR_AWAY` (which prior to LDK +/// 0.2, and still at the time of writing, is 14 * 24 * 6, i.e. two weeks). +/// +/// Here we use four times that value to give us more time to fail an HTLC back (which does require +/// the user call [`ChannelManager::process_pending_htlc_forwards`]) just in case (if an HTLC has +/// been expired for 3 * 2 weeks our counterparty really should have closed the channel by now). +/// Holding on to stale SCIDs doesn't really cost us much as each one costs an on-chain splice to +/// generate anyway, so we might as well make this nearly arbitrarily long. +/// +/// [`ChannelManager::process_pending_htlc_forwards`]: crate::ln::channelmanager::ChannelManager::process_pending_htlc_forwards +#[cfg(not(test))] +pub(crate) const CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY: u32 = 14 * 24 * 6 * 4; + +/// In test (not `_test_utils`, though, since that tests actual upgrading), we deliberately break +/// the above condition so that we can ensure that HTLCs forwarded in 0.2 or later are handled +/// correctly even if this constant is reduced and an HTLC can outlive the original channel's SCID. +#[cfg(test)] +pub(crate) const CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY: u32 = 144; struct PendingChannelMonitorUpdate { update: ChannelMonitorUpdate, @@ -2470,6 +2490,7 @@ impl FundingScope { fn for_splice( prev_funding: &Self, context: &ChannelContext, our_funding_contribution: SignedAmount, their_funding_contribution: SignedAmount, counterparty_funding_pubkey: PublicKey, + our_new_holder_keys: ChannelPublicKeys, ) -> Self where SP::Target: SignerProvider, @@ -2489,19 +2510,15 @@ impl FundingScope { debug_assert!(post_value_to_self_msat.is_some()); let post_value_to_self_msat = post_value_to_self_msat.unwrap(); - // Rotate the pubkeys using the prev_funding_txid as a tweak - let prev_funding_txid = prev_funding.get_funding_txid(); - let holder_pubkeys = context.new_holder_pubkeys(prev_funding_txid); - let channel_parameters = &prev_funding.channel_transaction_parameters; let mut post_channel_transaction_parameters = ChannelTransactionParameters { - holder_pubkeys, + holder_pubkeys: our_new_holder_keys, holder_selected_contest_delay: channel_parameters.holder_selected_contest_delay, // The 'outbound' attribute doesn't change, even if the splice initiator is the other node is_outbound_from_holder: channel_parameters.is_outbound_from_holder, counterparty_parameters: channel_parameters.counterparty_parameters.clone(), funding_outpoint: None, // filled later - splice_parent_funding_txid: prev_funding_txid, + splice_parent_funding_txid: prev_funding.get_funding_txid(), channel_type_features: channel_parameters.channel_type_features.clone(), channel_value_satoshis: post_channel_value, }; @@ -2637,6 +2654,7 @@ impl_writeable_tlv_based!(PendingFunding, { enum FundingNegotiation { AwaitingAck { context: FundingNegotiationContext, + new_holder_funding_key: PublicKey, }, ConstructingTransaction { funding: FundingScope, @@ -2667,7 +2685,7 @@ impl FundingNegotiation { fn is_initiator(&self) -> bool { match self { - FundingNegotiation::AwaitingAck { context } => context.is_initiator, + FundingNegotiation::AwaitingAck { context, .. } => context.is_initiator, FundingNegotiation::ConstructingTransaction { interactive_tx_constructor, .. } => { interactive_tx_constructor.is_initiator() }, @@ -3203,6 +3221,7 @@ where funding.get_holder_selected_contest_delay(), &context.destination_script, &funding.channel_transaction_parameters, funding.is_outbound(), obscure_factor, holder_commitment_tx, best_block, context.counterparty_node_id, context.channel_id(), + context.is_manual_broadcast, ); channel_monitor.provide_initial_counterparty_commitment_tx( counterparty_initial_commitment_tx.clone(), @@ -3510,7 +3529,7 @@ where // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`? - let pubkeys = holder_signer.new_pubkeys(None, &secp_ctx); + let pubkeys = holder_signer.pubkeys(&secp_ctx); let funding = FundingScope { value_to_self_msat, @@ -3748,7 +3767,7 @@ where Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}), }; - let pubkeys = holder_signer.new_pubkeys(None, &secp_ctx); + let pubkeys = holder_signer.pubkeys(&secp_ctx); let temporary_channel_id = temporary_channel_id_fn.map(|f| f(&pubkeys)) .unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source)); @@ -4111,16 +4130,6 @@ where return &mut self.holder_signer; } - /// Returns holder pubkeys to use for the channel. - fn new_holder_pubkeys(&self, prev_funding_txid: Option) -> ChannelPublicKeys { - match &self.holder_signer { - ChannelSignerType::Ecdsa(ecdsa) => ecdsa.new_pubkeys(prev_funding_txid, &self.secp_ctx), - // TODO (taproot|arik) - #[cfg(taproot)] - _ => todo!(), - } - } - /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0, /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases /// or prior to any channel actions during `Channel` initialization. @@ -6264,7 +6273,7 @@ where commitment_number, &commitment_point, false, - false, + true, logger, ); let counterparty_initial_commitment_tx = commitment_data.tx; @@ -6846,6 +6855,9 @@ pub struct FundingTxSigned { /// Information about the completed funding negotiation. pub splice_negotiated: Option, + + /// A `splice_locked` to send to the counterparty when the splice requires 0 confirmations. + pub splice_locked: Option, } /// Information about a splice funding negotiation that has been completed. @@ -6888,7 +6900,7 @@ macro_rules! maybe_create_splice_funding_failed { .map(|funding| funding.get_channel_type().clone()); let (contributed_inputs, contributed_outputs) = match funding_negotiation { - FundingNegotiation::AwaitingAck { context } => { + FundingNegotiation::AwaitingAck { context, .. } => { context.$contributed_inputs_and_outputs() }, FundingNegotiation::ConstructingTransaction { @@ -8877,9 +8889,13 @@ where } } - fn on_tx_signatures_exchange( - &mut self, funding_tx: Transaction, - ) -> Option { + fn on_tx_signatures_exchange<'a, L: Deref>( + &mut self, funding_tx: Transaction, best_block_height: u32, + logger: &WithChannelContext<'a, L>, + ) -> (Option, Option) + where + L::Target: Logger, + { debug_assert!(!self.context.channel_state.is_monitor_update_in_progress()); debug_assert!(!self.context.channel_state.is_awaiting_remote_revoke()); @@ -8901,22 +8917,42 @@ where channel_type, }; - Some(splice_negotiated) + let splice_locked = pending_splice.check_get_splice_locked( + &self.context, + pending_splice.negotiated_candidates.len() - 1, + best_block_height, + ); + if let Some(splice_txid) = + splice_locked.as_ref().map(|splice_locked| splice_locked.splice_txid) + { + log_info!( + logger, + "Sending 0conf splice_locked txid {} to our peer for channel {}", + splice_txid, + &self.context.channel_id + ); + } + + (Some(splice_negotiated), splice_locked) } else { debug_assert!(false); - None + (None, None) } } else { self.funding.funding_transaction = Some(funding_tx); self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); - None + (None, None) } } - pub fn funding_transaction_signed( - &mut self, funding_txid_signed: Txid, witnesses: Vec, - ) -> Result { + pub fn funding_transaction_signed( + &mut self, funding_txid_signed: Txid, witnesses: Vec, best_block_height: u32, + logger: &L, + ) -> Result + where + L::Target: Logger, + { let signing_session = if let Some(signing_session) = self.context.interactive_tx_signing_session.as_mut() { if let Some(pending_splice) = self.pending_splice.as_ref() { @@ -8937,6 +8973,7 @@ where tx_signatures: None, funding_tx: None, splice_negotiated: None, + splice_locked: None, }); } @@ -8949,6 +8986,7 @@ where tx_signatures: None, funding_tx: None, splice_negotiated: None, + splice_locked: None, }); } let err = @@ -8991,19 +9029,30 @@ where .provide_holder_witnesses(tx_signatures, &self.context.secp_ctx) .map_err(|err| APIError::APIMisuseError { err })?; - let splice_negotiated = if let Some(funding_tx) = funding_tx.clone() { + let logger = WithChannelContext::from(logger, &self.context, None); + if tx_signatures.is_some() { + log_info!( + logger, + "Sending tx_signatures for interactive funding transaction {funding_txid_signed}" + ); + } + + let (splice_negotiated, splice_locked) = if let Some(funding_tx) = funding_tx.clone() { debug_assert!(tx_signatures.is_some()); - self.on_tx_signatures_exchange(funding_tx) + self.on_tx_signatures_exchange(funding_tx, best_block_height, &logger) } else { - None + (None, None) }; - Ok(FundingTxSigned { tx_signatures, funding_tx, splice_negotiated }) + Ok(FundingTxSigned { tx_signatures, funding_tx, splice_negotiated, splice_locked }) } - pub fn tx_signatures( - &mut self, msg: &msgs::TxSignatures, - ) -> Result { + pub fn tx_signatures( + &mut self, msg: &msgs::TxSignatures, best_block_height: u32, logger: &L, + ) -> Result + where + L::Target: Logger, + { let signing_session = if let Some(signing_session) = self.context.interactive_tx_signing_session.as_mut() { @@ -9049,13 +9098,25 @@ where let (holder_tx_signatures, funding_tx) = signing_session.received_tx_signatures(msg).map_err(|msg| ChannelError::Warn(msg))?; - let splice_negotiated = if let Some(funding_tx) = funding_tx.clone() { - self.on_tx_signatures_exchange(funding_tx) + let logger = WithChannelContext::from(logger, &self.context, None); + log_info!( + logger, + "Received tx_signatures for interactive funding transaction {}", + msg.tx_hash + ); + + let (splice_negotiated, splice_locked) = if let Some(funding_tx) = funding_tx.clone() { + self.on_tx_signatures_exchange(funding_tx, best_block_height, &logger) } else { - None + (None, None) }; - Ok(FundingTxSigned { tx_signatures: holder_tx_signatures, funding_tx, splice_negotiated }) + Ok(FundingTxSigned { + tx_signatures: holder_tx_signatures, + funding_tx, + splice_negotiated, + splice_locked, + }) } /// Queues up an outbound update fee by placing it in the holding cell. You should call @@ -11128,6 +11189,12 @@ where let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, block_height, logger); + if let Some(quiescent_action) = self.quiescent_action.as_ref() { + if matches!(quiescent_action, QuiescentAction::Splice(_)) { + self.context.channel_state.set_awaiting_quiescence(); + } + } + Some(SpliceFundingPromotion { funding_txo, monitor_update, @@ -11362,7 +11429,11 @@ where confirmed_funding_index, height, ) { - log_info!(logger, "Sending a splice_locked to our peer for channel {}", &self.context.channel_id); + log_info!( + logger, "Sending splice_locked txid {} to our peer for channel {}", + splice_locked.splice_txid, + &self.context.channel_id + ); let (funding_txo, monitor_update, announcement_sigs, discarded_funding) = chain_node_signer .and_then(|(chain_hash, node_signer, user_config)| { @@ -11795,10 +11866,10 @@ where }); } - if !self.context.is_live() { + if !self.context.is_usable() { return Err(APIError::APIMisuseError { err: format!( - "Channel {} cannot be spliced, as channel is not live", + "Channel {} cannot be spliced as it is either pending open/close", self.context.channel_id() ), }); @@ -11900,17 +11971,29 @@ where change_script, }; + // Rotate the funding pubkey using the prev_funding_txid as a tweak + let prev_funding_txid = self.funding.get_funding_txid(); + let funding_pubkey = match (prev_funding_txid, &self.context.holder_signer) { + (None, _) => { + debug_assert!(false); + self.funding.get_holder_pubkeys().funding_pubkey + }, + (Some(prev_funding_txid), ChannelSignerType::Ecdsa(ecdsa)) => { + ecdsa.new_funding_pubkey(prev_funding_txid, &self.context.secp_ctx) + }, + #[cfg(taproot)] + _ => todo!(), + }; + + let funding_negotiation = + FundingNegotiation::AwaitingAck { context, new_holder_funding_key: funding_pubkey }; self.pending_splice = Some(PendingFunding { - funding_negotiation: Some(FundingNegotiation::AwaitingAck { context }), + funding_negotiation: Some(funding_negotiation), negotiated_candidates: vec![], sent_funding_txid: None, received_funding_txid: None, }); - // Rotate the pubkeys using the prev_funding_txid as a tweak - let prev_funding_txid = self.funding.get_funding_txid(); - let funding_pubkey = self.context.new_holder_pubkeys(prev_funding_txid).funding_pubkey; - msgs::SpliceInit { channel_id: self.context.channel_id, funding_contribution_satoshis: adjusted_funding_contribution.to_sat(), @@ -11994,12 +12077,29 @@ where self.validate_splice_contributions(our_funding_contribution, their_funding_contribution) .map_err(|e| ChannelError::WarnAndDisconnect(e))?; + // Rotate the pubkeys using the prev_funding_txid as a tweak + let prev_funding_txid = self.funding.get_funding_txid(); + let funding_pubkey = match (prev_funding_txid, &self.context.holder_signer) { + (None, _) => { + debug_assert!(false); + self.funding.get_holder_pubkeys().funding_pubkey + }, + (Some(prev_funding_txid), ChannelSignerType::Ecdsa(ecdsa)) => { + ecdsa.new_funding_pubkey(prev_funding_txid, &self.context.secp_ctx) + }, + #[cfg(taproot)] + _ => todo!(), + }; + let mut new_keys = self.funding.get_holder_pubkeys().clone(); + new_keys.funding_pubkey = funding_pubkey; + Ok(FundingScope::for_splice( &self.funding, &self.context, our_funding_contribution, their_funding_contribution, msg.funding_pubkey, + new_keys, )) } @@ -12144,8 +12244,7 @@ where // optimization, but for often-offline nodes it may be, as we may connect and immediately // go into splicing from both sides. - let funding_pubkey = splice_funding.get_holder_pubkeys().funding_pubkey; - + let new_funding_pubkey = splice_funding.get_holder_pubkeys().funding_pubkey; self.pending_splice = Some(PendingFunding { funding_negotiation: Some(FundingNegotiation::ConstructingTransaction { funding: splice_funding, @@ -12159,7 +12258,7 @@ where Ok(msgs::SpliceAck { channel_id: self.context.channel_id, funding_contribution_satoshis: our_funding_contribution.to_sat(), - funding_pubkey, + funding_pubkey: new_funding_pubkey, require_confirmed_inputs: None, }) } @@ -12185,13 +12284,14 @@ where let pending_splice = self.pending_splice.as_mut().expect("We should have returned an error earlier!"); // TODO: Good candidate for a let else statement once MSRV >= 1.65 - let funding_negotiation_context = if let Some(FundingNegotiation::AwaitingAck { context }) = - pending_splice.funding_negotiation.take() - { - context - } else { - panic!("We should have returned an error earlier!"); - }; + let funding_negotiation_context = + if let Some(FundingNegotiation::AwaitingAck { context, .. }) = + pending_splice.funding_negotiation.take() + { + context + } else { + panic!("We should have returned an error earlier!"); + }; let mut interactive_tx_constructor = funding_negotiation_context .into_interactive_tx_constructor( @@ -12222,13 +12322,17 @@ where fn validate_splice_ack(&self, msg: &msgs::SpliceAck) -> Result { // TODO(splicing): Add check that we are the splice (quiescence) initiator - let funding_negotiation_context = match &self + let pending_splice = self .pending_splice .as_ref() - .ok_or(ChannelError::Ignore("Channel is not in pending splice".to_owned()))? + .ok_or_else(|| ChannelError::Ignore("Channel is not in pending splice".to_owned()))?; + + let (funding_negotiation_context, new_holder_funding_key) = match &pending_splice .funding_negotiation { - Some(FundingNegotiation::AwaitingAck { context }) => context, + Some(FundingNegotiation::AwaitingAck { context, new_holder_funding_key }) => { + (context, new_holder_funding_key) + }, Some(FundingNegotiation::ConstructingTransaction { .. }) | Some(FundingNegotiation::AwaitingSignatures { .. }) => { return Err(ChannelError::WarnAndDisconnect( @@ -12247,12 +12351,16 @@ where self.validate_splice_contributions(our_funding_contribution, their_funding_contribution) .map_err(|e| ChannelError::WarnAndDisconnect(e))?; + let mut new_keys = self.funding.get_holder_pubkeys().clone(); + new_keys.funding_pubkey = *new_holder_funding_key; + Ok(FundingScope::for_splice( &self.funding, &self.context, our_funding_contribution, their_funding_contribution, msg.funding_pubkey, + new_keys, )) } @@ -12961,6 +13069,7 @@ where || self.context.channel_state.is_awaiting_quiescence() || self.context.channel_state.is_local_stfu_sent() { + log_debug!(logger, "Channel is either pending quiescence or already quiescent"); return Ok(None); } @@ -12968,6 +13077,7 @@ where if self.context.is_live() { Ok(Some(self.send_stfu(logger)?)) } else { + log_debug!(logger, "Waiting for peer reconnection to send stfu"); Ok(None) } } @@ -13169,18 +13279,18 @@ where let end = self .funding .get_short_channel_id() - .and_then(|current_scid| { + .map(|current_scid| { let historical_scids = &self.context.historical_scids; historical_scids .iter() .zip(historical_scids.iter().skip(1).chain(core::iter::once(¤t_scid))) - .map(|(_, next_scid)| { - let funding_height = block_from_scid(*next_scid); - let retain_scid = - funding_height + CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY - 1 > height; - retain_scid + .filter(|(_, next_scid)| { + let funding_height = block_from_scid(**next_scid); + let drop_scid = + funding_height + CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY - 1 <= height; + drop_scid }) - .position(|retain_scid| retain_scid) + .count() }) .unwrap_or(0); @@ -16411,7 +16521,7 @@ mod tests { [0; 32], ); - let holder_pubkeys = signer.new_pubkeys(None, &secp_ctx); + let holder_pubkeys = signer.pubkeys(&secp_ctx); assert_eq!(holder_pubkeys.funding_pubkey.serialize()[..], >::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]); let keys_provider = Keys { signer: signer.clone() }; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5354761875d..4d96f8ad7da 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4730,14 +4730,6 @@ where // Look for the channel match peer_state.channel_by_id.entry(*channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if !chan_phase_entry.get().context().is_connected() { - // TODO: We should probably support this, but right now `splice_channel` refuses when - // the peer is disconnected, so we just check it here. - return Err(APIError::ChannelUnavailable { - err: "Cannot initiate splice while peer is disconnected".to_owned(), - }); - } - let locktime = locktime.unwrap_or_else(|| self.current_best_block().height); if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); @@ -6433,14 +6425,25 @@ where .map(|input| input.witness) .filter(|witness| !witness.is_empty()) .collect(); - match chan.funding_transaction_signed(txid, witnesses) { + let best_block_height = self.best_block.read().unwrap().height; + match chan.funding_transaction_signed( + txid, + witnesses, + best_block_height, + &self.logger, + ) { Ok(FundingTxSigned { tx_signatures: Some(tx_signatures), funding_tx, splice_negotiated, + splice_locked, }) => { if let Some(funding_tx) = funding_tx { - self.broadcast_interactive_funding(chan, &funding_tx); + self.broadcast_interactive_funding( + chan, + &funding_tx, + &self.logger, + ); } if let Some(splice_negotiated) = splice_negotiated { self.pending_events.lock().unwrap().push_back(( @@ -6460,6 +6463,14 @@ where msg: tx_signatures, }, ); + if let Some(splice_locked) = splice_locked { + peer_state.pending_msg_events.push( + MessageSendEvent::SendSpliceLocked { + node_id: *counterparty_node_id, + msg: splice_locked, + }, + ); + } return NotifyOption::DoPersist; }, Err(err) => { @@ -6470,9 +6481,11 @@ where tx_signatures: None, funding_tx, splice_negotiated, + splice_locked, }) => { debug_assert!(funding_tx.is_none()); debug_assert!(splice_negotiated.is_none()); + debug_assert!(splice_locked.is_none()); return NotifyOption::SkipPersistNoEvents; }, } @@ -6503,8 +6516,14 @@ where } fn broadcast_interactive_funding( - &self, channel: &mut FundedChannel, funding_tx: &Transaction, + &self, channel: &mut FundedChannel, funding_tx: &Transaction, logger: &L, ) { + let logger = WithChannelContext::from(logger, channel.context(), None); + log_info!( + logger, + "Broadcasting signed interactive funding transaction {}", + funding_tx.compute_txid() + ); self.tx_broadcaster.broadcast_transactions(&[funding_tx]); { let mut pending_events = self.pending_events.lock().unwrap(); @@ -9570,10 +9589,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } else { let txid = signing_session.unsigned_tx().compute_txid(); - match channel.funding_transaction_signed(txid, vec![]) { - Ok(FundingTxSigned { tx_signatures: Some(tx_signatures), funding_tx, splice_negotiated }) => { + let best_block_height = self.best_block.read().unwrap().height; + match channel.funding_transaction_signed(txid, vec![], best_block_height, &self.logger) { + Ok(FundingTxSigned { + tx_signatures: Some(tx_signatures), + funding_tx, + splice_negotiated, + splice_locked, + }) => { if let Some(funding_tx) = funding_tx { - self.broadcast_interactive_funding(channel, &funding_tx); + self.broadcast_interactive_funding(channel, &funding_tx, &self.logger); } if let Some(splice_negotiated) = splice_negotiated { @@ -9594,6 +9619,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ node_id: counterparty_node_id, msg: tx_signatures, }); + if let Some(splice_locked) = splice_locked { + pending_msg_events.push(MessageSendEvent::SendSpliceLocked { + node_id: counterparty_node_id, + msg: splice_locked, + }); + } } }, Ok(FundingTxSigned { tx_signatures: None, .. }) => { @@ -10572,20 +10603,32 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ hash_map::Entry::Occupied(mut chan_entry) => { match chan_entry.get_mut().as_funded_mut() { Some(chan) => { - let FundingTxSigned { tx_signatures, funding_tx, splice_negotiated } = - try_channel_entry!(self, peer_state, chan.tx_signatures(msg), chan_entry); + let best_block_height = self.best_block.read().unwrap().height; + let FundingTxSigned { + tx_signatures, + funding_tx, + splice_negotiated, + splice_locked, + } = try_channel_entry!( + self, + peer_state, + chan.tx_signatures(msg, best_block_height, &self.logger), + chan_entry + ); if let Some(tx_signatures) = tx_signatures { peer_state.pending_msg_events.push(MessageSendEvent::SendTxSignatures { node_id: *counterparty_node_id, msg: tx_signatures, }); } + if let Some(splice_locked) = splice_locked { + peer_state.pending_msg_events.push(MessageSendEvent::SendSpliceLocked { + node_id: *counterparty_node_id, + msg: splice_locked, + }); + } if let Some(ref funding_tx) = funding_tx { - self.tx_broadcaster.broadcast_transactions(&[funding_tx]); - { - let mut pending_events = self.pending_events.lock().unwrap(); - emit_channel_pending_event!(pending_events, chan); - } + self.broadcast_interactive_funding(chan, funding_tx, &self.logger); } if let Some(splice_negotiated) = splice_negotiated { self.pending_events.lock().unwrap().push_back(( diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 9845d6de738..78378978695 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -21,7 +21,9 @@ use crate::events::{ ClaimedHTLC, ClosureReason, Event, HTLCHandlingFailureType, PaidBolt12Invoice, PathFailure, PaymentFailureReason, PaymentPurpose, }; -use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC}; +use crate::ln::chan_utils::{ + commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, TRUC_MAX_WEIGHT, +}; use crate::ln::channelmanager::{ AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder, RecipientOnionFields, MIN_CLTV_EXPIRY_DELTA, @@ -58,6 +60,7 @@ use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash as _; use bitcoin::locktime::absolute::{LockTime, LOCK_TIME_THRESHOLD}; use bitcoin::network::Network; +use bitcoin::policy::MAX_STANDARD_TX_WEIGHT; use bitcoin::pow::CompactTarget; use bitcoin::script::ScriptBuf; use bitcoin::secp256k1::{PublicKey, SecretKey}; @@ -400,12 +403,18 @@ fn do_connect_block_without_consistency_checks<'a, 'b, 'c, 'd>( } pub fn provide_anchor_reserves<'a, 'b, 'c>(nodes: &[Node<'a, 'b, 'c>]) -> Transaction { + provide_anchor_utxo_reserves(nodes, 1, Amount::ONE_BTC) +} + +pub fn provide_anchor_utxo_reserves<'a, 'b, 'c>( + nodes: &[Node<'a, 'b, 'c>], utxos: usize, amount: Amount, +) -> Transaction { let mut output = Vec::with_capacity(nodes.len()); for node in nodes { - output.push(TxOut { - value: Amount::ONE_BTC, - script_pubkey: node.wallet_source.get_change_script().unwrap(), - }); + let script_pubkey = node.wallet_source.get_change_script().unwrap(); + for _ in 0..utxos { + output.push(TxOut { value: amount, script_pubkey: script_pubkey.clone() }); + } } let tx = Transaction { version: TxVersion::TWO, @@ -1573,15 +1582,19 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option, ) -> (bitcoin::Transaction, ChannelId) { - let initiator_channels = initiator.node.list_usable_channels().len(); - let receiver_channels = receiver.node.list_usable_channels().len(); + open_zero_conf_channel_with_value(initiator, receiver, initiator_config, 100_000, 10_001) +} +pub fn exchange_open_accept_zero_conf_chan<'a, 'b, 'c, 'd>( + initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, + initiator_config: Option, channel_value_sat: u64, push_msat: u64, +) -> ChannelId { let receiver_node_id = receiver.node.get_our_node_id(); let initiator_node_id = initiator.node.get_our_node_id(); initiator .node - .create_channel(receiver_node_id, 100_000, 10_001, 42, None, initiator_config) + .create_channel(receiver_node_id, channel_value_sat, push_msat, 42, None, initiator_config) .unwrap(); let open_channel = get_event_msg!(initiator, MessageSendEvent::SendOpenChannel, receiver_node_id); @@ -1609,8 +1622,30 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( assert_eq!(accept_channel.common_fields.minimum_depth, 0); initiator.node.handle_accept_channel(receiver_node_id, &accept_channel); + accept_channel.common_fields.temporary_channel_id +} + +// Receiver must have been initialized with manually_accept_inbound_channels set to true. +pub fn open_zero_conf_channel_with_value<'a, 'b, 'c, 'd>( + initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, + initiator_config: Option, channel_value_sat: u64, push_msat: u64, +) -> (bitcoin::Transaction, ChannelId) { + let initiator_channels = initiator.node.list_usable_channels().len(); + let receiver_channels = receiver.node.list_usable_channels().len(); + + let receiver_node_id = receiver.node.get_our_node_id(); + let initiator_node_id = initiator.node.get_our_node_id(); + + exchange_open_accept_zero_conf_chan( + initiator, + receiver, + initiator_config, + channel_value_sat, + push_msat, + ); + let (temporary_channel_id, tx, _) = - create_funding_transaction(&initiator, &receiver_node_id, 100_000, 42); + create_funding_transaction(&initiator, &receiver_node_id, channel_value_sat, 42); initiator .node .funding_transaction_generated(temporary_channel_id, receiver_node_id, tx.clone()) @@ -1796,6 +1831,111 @@ pub fn create_chan_between_nodes_with_value_a<'a, 'b, 'c: 'd, 'd>( (msgs, chan_id, tx) } +pub fn create_channel_manual_funding<'a, 'b, 'c: 'd, 'd>( + nodes: &'a Vec>, initiator: usize, counterparty: usize, channel_value: u64, + push_msat: u64, zero_conf: bool, +) -> (ChannelId, Transaction, OutPoint) { + let node_a = &nodes[initiator]; + let node_b = &nodes[counterparty]; + let node_a_id = node_a.node.get_our_node_id(); + let node_b_id = node_b.node.get_our_node_id(); + + let temp_channel_id = if zero_conf { + exchange_open_accept_zero_conf_chan(node_a, node_b, None, channel_value, push_msat) + } else { + exchange_open_accept_chan(node_a, node_b, channel_value, push_msat) + }; + + let (funding_temp_id, funding_tx, funding_outpoint) = + create_funding_transaction(node_a, &node_b_id, channel_value, 42); + assert_eq!(temp_channel_id, funding_temp_id); + + node_a + .node + .funding_transaction_generated_manual_broadcast( + funding_temp_id, + node_b_id, + funding_tx.clone(), + ) + .unwrap(); + check_added_monitors!(node_a, 0); + + let funding_created = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b_id); + node_b.node.handle_funding_created(node_a_id, &funding_created); + check_added_monitors!(node_b, 1); + let channel_id_b = expect_channel_pending_event(node_b, &node_a_id); + + if zero_conf { + let bs_signed_locked = node_b.node.get_and_clear_pending_msg_events(); + assert_eq!(bs_signed_locked.len(), 2); + match &bs_signed_locked[0] { + MessageSendEvent::SendFundingSigned { node_id, msg } => { + assert_eq!(*node_id, node_a_id); + node_a.node.handle_funding_signed(node_b_id, &msg); + check_added_monitors(node_a, 1); + + assert!(node_a.tx_broadcaster.txn_broadcast().is_empty()); + + let as_channel_ready = + get_event_msg!(node_a, MessageSendEvent::SendChannelReady, node_b_id); + node_b.node.handle_channel_ready(node_a_id, &as_channel_ready); + expect_channel_ready_event(node_b, &node_a_id); + }, + _ => panic!("Unexpected event"), + } + match &bs_signed_locked[1] { + MessageSendEvent::SendChannelReady { node_id, msg } => { + assert_eq!(*node_id, node_a_id); + node_a.node.handle_channel_ready(node_b_id, &msg); + }, + _ => panic!("Unexpected event"), + } + } else { + let funding_signed = get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a_id); + node_a.node.handle_funding_signed(node_b_id, &funding_signed); + check_added_monitors(node_a, 1) + } + + let events = node_a.node.get_and_clear_pending_events(); + assert_eq!(events.len(), if zero_conf { 3 } else { 2 }); + let funding_txid = funding_tx.compute_txid(); + let mut channel_id = None; + for event in events { + match event { + Event::FundingTxBroadcastSafe { funding_txo, counterparty_node_id, .. } => { + assert_eq!(counterparty_node_id, node_b_id); + assert_eq!(funding_txo.txid, funding_txid); + assert_eq!(funding_txo.vout, u32::from(funding_outpoint.index)); + }, + Event::ChannelPending { channel_id: pending_id, counterparty_node_id, .. } => { + assert_eq!(counterparty_node_id, node_b_id); + channel_id = Some(pending_id); + }, + Event::ChannelReady { channel_id: pending_id, counterparty_node_id, .. } => { + assert_eq!(counterparty_node_id, node_b_id); + channel_id = Some(pending_id); + }, + _ => panic!("Unexpected event"), + } + } + let channel_id = channel_id.expect("channel pending event missing"); + assert_eq!(channel_id, channel_id_b); + + assert!(node_a.tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); + + if zero_conf { + let as_channel_update = + get_event_msg!(node_a, MessageSendEvent::SendChannelUpdate, node_b_id); + let bs_channel_update = + get_event_msg!(node_b, MessageSendEvent::SendChannelUpdate, node_a_id); + + node_a.node.handle_channel_update(node_b_id, &bs_channel_update); + node_b.node.handle_channel_update(node_a_id, &as_channel_update); + } + + (channel_id, funding_tx, funding_outpoint) +} + pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>( node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, as_funding_msgs: &(msgs::ChannelReady, msgs::AnnouncementSignatures), @@ -1964,6 +2104,11 @@ pub fn update_nodes_with_chan_announce<'a, 'b, 'c, 'd>( pub fn do_check_spends Option>( tx: &Transaction, get_output: F, ) { + if tx.version == TxVersion::non_standard(3) { + assert!(tx.weight().to_wu() <= TRUC_MAX_WEIGHT); + } else { + assert!(tx.weight().to_wu() <= MAX_STANDARD_TX_WEIGHT as u64); + } let mut p2a_output_below_dust = false; let mut has_p2a_output = false; for outp in tx.output.iter() { @@ -2486,6 +2631,15 @@ pub fn expect_and_process_pending_htlcs(node: &Node<'_, '_, '_>, process_twice: assert!(!node.node.needs_pending_htlc_processing()); } +/// Processes an HTLC which is pending forward but will fail to forward when we process it here. +pub fn expect_htlc_forwarding_fails( + node: &Node<'_, '_, '_>, expected_failure: &[HTLCHandlingFailureType], +) { + expect_and_process_pending_htlcs(node, false); + let events = node.node.get_and_clear_pending_events(); + expect_htlc_failure_conditions(events, expected_failure); +} + #[macro_export] /// Performs the "commitment signed dance" - the series of message exchanges which occur after a /// commitment update. @@ -4871,6 +5025,13 @@ macro_rules! handle_chan_reestablish_msgs { had_channel_update = true; } + let mut stfu = None; + if let Some(&MessageSendEvent::SendStfu { ref node_id, ref msg }) = msg_events.get(idx) { + idx += 1; + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + stfu = Some(msg.clone()); + } + let mut revoke_and_ack = None; let mut commitment_update = None; let order = if let Some(ev) = msg_events.get(idx) { @@ -4946,7 +5107,15 @@ macro_rules! handle_chan_reestablish_msgs { assert_eq!(msg_events.len(), idx, "{msg_events:?}"); - (channel_ready, revoke_and_ack, commitment_update, order, announcement_sigs, tx_signatures) + ( + channel_ready, + revoke_and_ack, + commitment_update, + order, + announcement_sigs, + tx_signatures, + stfu, + ) }}; } @@ -4955,6 +5124,7 @@ pub struct ReconnectArgs<'a, 'b, 'c, 'd> { pub node_b: &'a Node<'b, 'c, 'd>, pub send_channel_ready: (bool, bool), pub send_announcement_sigs: (bool, bool), + pub send_stfu: (bool, bool), pub send_interactive_tx_commit_sig: (bool, bool), pub send_interactive_tx_sigs: (bool, bool), pub expect_renegotiated_funding_locked_monitor_update: (bool, bool), @@ -4977,6 +5147,7 @@ impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> { node_b, send_channel_ready: (false, false), send_announcement_sigs: (false, false), + send_stfu: (false, false), send_interactive_tx_commit_sig: (false, false), send_interactive_tx_sigs: (false, false), expect_renegotiated_funding_locked_monitor_update: (false, false), @@ -5000,6 +5171,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_b, send_channel_ready, send_announcement_sigs, + send_stfu, send_interactive_tx_commit_sig, send_interactive_tx_sigs, expect_renegotiated_funding_locked_monitor_update, @@ -5118,6 +5290,12 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } else { assert!(chan_msgs.4.is_none()); } + if send_stfu.0 { + let stfu = chan_msgs.6.take().unwrap(); + node_a.node.handle_stfu(node_b_id, &stfu); + } else { + assert!(chan_msgs.6.is_none()); + } if send_interactive_tx_commit_sig.0 { assert!(chan_msgs.1.is_none()); let commitment_update = chan_msgs.2.take().unwrap(); @@ -5224,6 +5402,12 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } else { assert!(chan_msgs.4.is_none()); } + if send_stfu.1 { + let stfu = chan_msgs.6.take().unwrap(); + node_b.node.handle_stfu(node_a_id, &stfu); + } else { + assert!(chan_msgs.6.is_none()); + } if send_interactive_tx_commit_sig.1 { assert!(chan_msgs.1.is_none()); let commitment_update = chan_msgs.2.take().unwrap(); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index d79b3074cc7..876324737a6 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -9628,6 +9628,120 @@ pub fn test_remove_expired_inbound_unfunded_channels() { check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } +fn do_test_manual_broadcast_skips_commitment_until_funding( + force_broadcast: bool, close_by_timeout: bool, zero_conf_open: bool, +) { + // Checks that commitment (and HTLC) transactions will not be broadcast for manual-funded + // channels until either the funding transaction is seen on-chain or the channel is manually + // forced to broadcast using `ChannelMonitor::broadcast_latest_holder_commitment_txn`. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut chan_config = test_default_channel_config(); + if zero_conf_open { + chan_config.manually_accept_inbound_channels = true; + } + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let (channel_id, funding_tx, funding_outpoint) = + create_channel_manual_funding(&nodes, 0, 1, 100_000, 10_000, zero_conf_open); + + if close_by_timeout { + if !zero_conf_open { + panic!("Cant send a payment if we didn't open 0-conf"); + } + let (_payment_preimage, payment_hash, _payment_secret, _payment_id) = + route_payment(&nodes[0], &[&nodes[1]], 10_000_000); + nodes[1].node.get_and_clear_pending_events(); + + connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); + let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100_000); + + // On timeout, B will try to fail the HTLC back, but its too late - A has already FC'd. + connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); + let failure = HTLCHandlingFailureType::Receive { payment_hash }; + expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[failure]); + get_htlc_update_msgs(&nodes[1], &node_a_id); + check_added_monitors(&nodes[1], 1); + } else { + let msg = "manual close".to_owned(); + nodes[0] + .node + .force_close_broadcasting_latest_txn(&channel_id, &node_b_id, msg.clone()) + .unwrap(); + let reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message: msg }; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100_000); + } + check_added_monitors(&nodes[0], 1); + assert_eq!(get_err_msg(&nodes[0], &node_b_id).channel_id, channel_id); + assert!(nodes[0].tx_broadcaster.txn_broadcast().is_empty()); + + let monitor_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert!(monitor_events.is_empty()); + + // The funding tx should be broadcasted after either a manual broadcast call or if the funding + // transaction appears on chain. + if force_broadcast { + let monitor = get_monitor!(&nodes[0], channel_id); + monitor.broadcast_latest_holder_commitment_txn( + &nodes[0].tx_broadcaster, + &nodes[0].fee_estimator, + &nodes[0].logger, + ); + } else { + mine_transaction(&nodes[0], &funding_tx); + mine_transaction(&nodes[1], &funding_tx); + } + + let funding_txid = funding_tx.compute_txid(); + let broadcasts = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(broadcasts.len(), if close_by_timeout { 2 } else { 1 }); + let commitment_tx = broadcasts + .iter() + .find(|tx| { + tx.input.iter().any(|input| { + input.previous_output.txid == funding_txid + && input.previous_output.vout == u32::from(funding_outpoint.index) + }) + }) + .expect("commitment transaction not broadcast"); + check_spends!(commitment_tx, funding_tx); + assert_eq!(commitment_tx.input.len(), 1); + let commitment_input = &commitment_tx.input[0]; + assert_eq!(commitment_input.previous_output.txid, funding_txid); + assert_eq!(commitment_input.previous_output.vout, u32::from(funding_outpoint.index)); + + if close_by_timeout { + let htlc_tx = broadcasts + .iter() + .find(|tx| { + tx.input + .iter() + .any(|input| input.previous_output.txid == commitment_tx.compute_txid()) + }) + .expect("HTLC claim transaction not broadcast"); + check_spends!(htlc_tx, commitment_tx); + } + + let monitor_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert!(monitor_events.iter().all(|event| !matches!(event, Event::BumpTransaction(_)))); +} + +#[test] +fn test_manual_broadcast_skips_commitment_until_funding() { + do_test_manual_broadcast_skips_commitment_until_funding(true, true, true); + do_test_manual_broadcast_skips_commitment_until_funding(true, false, true); + do_test_manual_broadcast_skips_commitment_until_funding(true, false, false); + do_test_manual_broadcast_skips_commitment_until_funding(false, true, true); + do_test_manual_broadcast_skips_commitment_until_funding(false, false, true); + do_test_manual_broadcast_skips_commitment_until_funding(false, false, false); +} + fn do_test_multi_post_event_actions(do_reload: bool) { // Tests handling multiple post-Event actions at once. // There is specific code in ChannelManager to handle channels where multiple post-Event diff --git a/lightning/src/ln/funding.rs b/lightning/src/ln/funding.rs index db1d9169c56..ae7e36ac452 100644 --- a/lightning/src/ln/funding.rs +++ b/lightning/src/ln/funding.rs @@ -12,7 +12,8 @@ use bitcoin::{Amount, ScriptBuf, SignedAmount, TxOut}; use bitcoin::{Script, Sequence, Transaction, Weight}; -use crate::events::bump_transaction::{Utxo, EMPTY_SCRIPT_SIG_WEIGHT}; +use crate::events::bump_transaction::Utxo; +use crate::ln::chan_utils::EMPTY_SCRIPT_SIG_WEIGHT; use crate::prelude::Vec; use crate::sign::{P2TR_KEY_PATH_WITNESS_WEIGHT, P2WPKH_WITNESS_WEIGHT}; diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index a912db02a20..43feccd53d3 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -27,8 +27,9 @@ use bitcoin::{ }; use crate::chain::chaininterface::fee_for_weight; -use crate::events::bump_transaction::{BASE_INPUT_WEIGHT, EMPTY_SCRIPT_SIG_WEIGHT}; -use crate::ln::chan_utils::FUNDING_TRANSACTION_WITNESS_WEIGHT; +use crate::ln::chan_utils::{ + BASE_INPUT_WEIGHT, EMPTY_SCRIPT_SIG_WEIGHT, FUNDING_TRANSACTION_WITNESS_WEIGHT, +}; use crate::ln::channel::{FundingNegotiationContext, TOTAL_BITCOIN_SUPPLY_SATOSHIS}; use crate::ln::funding::FundingTxInput; use crate::ln::msgs; diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index bae55584575..9473142cfed 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -120,8 +120,8 @@ mod reorg_tests; #[cfg(test)] #[allow(unused_mut)] mod shutdown_tests; -#[cfg(test)] -mod splicing_tests; +#[cfg(any(feature = "_test_utils", test))] +pub mod splicing_tests; #[cfg(any(test, feature = "_externalize_tests"))] #[allow(unused_mut)] pub mod update_fee_tests; diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 3cf6c6cc2ad..74f081b03ae 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -632,16 +632,15 @@ pub trait SocketDescriptor: cmp::Eq + hash::Hash + Clone { /// /// If the returned size is smaller than `data.len()`, a /// [`PeerManager::write_buffer_space_avail`] call must be made the next time more data can be - /// written. Additionally, until a `send_data` event completes fully, no further - /// [`PeerManager::read_event`] calls should be made for the same peer! Because this is to - /// prevent denial-of-service issues, you should not read or buffer any data from the socket - /// until then. + /// written. /// - /// If a [`PeerManager::read_event`] call on this descriptor had previously returned true - /// (indicating that read events should be paused to prevent DoS in the send buffer), - /// `resume_read` may be set indicating that read events on this descriptor should resume. A - /// `resume_read` of false carries no meaning, and should not cause any action. - fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize; + /// If `continue_read` is *not* set, further [`PeerManager::read_event`] calls should be + /// avoided until another call is made with it set. This allows us to pause read if there are + /// too many outgoing messages queued for a peer to avoid DoS issues where a peer fills our + /// buffer by sending us messages that need response without reading the responses. + /// + /// Note that calls may be made with an empty `data` to update the `continue_read` flag. + fn send_data(&mut self, data: &[u8], continue_read: bool) -> usize; /// Disconnect the socket pointed to by this SocketDescriptor. /// /// You do *not* need to call [`PeerManager::socket_disconnected`] with this socket after this @@ -782,6 +781,9 @@ struct Peer { /// Note that these messages are *not* encrypted/MAC'd, and are only serialized. gossip_broadcast_buffer: VecDeque, awaiting_write_event: bool, + /// Set to true if the last call to [`SocketDescriptor::send_data`] for this peer had the + /// `should_read` flag unset, indicating we've told the driver to stop reading from this peer. + sent_pause_read: bool, pending_read_buffer: Vec, pending_read_buffer_pos: usize, @@ -1441,6 +1443,7 @@ where pending_outbound_buffer_first_msg_offset: 0, gossip_broadcast_buffer: VecDeque::new(), awaiting_write_event: false, + sent_pause_read: false, pending_read_buffer, pending_read_buffer_pos: 0, @@ -1501,6 +1504,7 @@ where pending_outbound_buffer_first_msg_offset: 0, gossip_broadcast_buffer: VecDeque::new(), awaiting_write_event: false, + sent_pause_read: false, pending_read_buffer, pending_read_buffer_pos: 0, @@ -1523,7 +1527,7 @@ where } } - fn peer_should_read(&self, peer: &mut Peer) -> bool { + fn should_read_from(&self, peer: &mut Peer) -> bool { peer.should_read(self.gossip_processing_backlogged.load(Ordering::Relaxed)) } @@ -1536,10 +1540,14 @@ where } fn do_attempt_write_data( - &self, descriptor: &mut Descriptor, peer: &mut Peer, force_one_write: bool, + &self, descriptor: &mut Descriptor, peer: &mut Peer, mut force_one_write: bool, ) { - let mut have_written = false; - while !peer.awaiting_write_event { + // If we detect that we should be reading from the peer but reads are currently paused, or + // vice versa, then we need to tell the socket driver to update their internal flag + // indicating whether or not reads are paused. Do this by forcing a write with the desired + // `continue_read` flag set, even if no outbound messages are currently queued. + force_one_write |= self.should_read_from(peer) == peer.sent_pause_read; + while force_one_write || !peer.awaiting_write_event { if peer.should_buffer_onion_message() { if let Some((peer_node_id, _)) = peer.their_node_id { let handler = &self.message_handler.onion_message_handler; @@ -1604,23 +1612,23 @@ where self.maybe_send_extra_ping(peer); } - let should_read = self.peer_should_read(peer); + let should_read = self.should_read_from(peer); let next_buff = match peer.pending_outbound_buffer.front() { None => { - if force_one_write && !have_written { - if should_read { - let data_sent = descriptor.send_data(&[], should_read); - debug_assert_eq!(data_sent, 0, "Can't write more than no data"); - } + if force_one_write { + let data_sent = descriptor.send_data(&[], should_read); + debug_assert_eq!(data_sent, 0, "Can't write more than no data"); + peer.sent_pause_read = !should_read; } return; }, Some(buff) => buff, }; + force_one_write = false; let pending = &next_buff[peer.pending_outbound_buffer_first_msg_offset..]; let data_sent = descriptor.send_data(pending, should_read); - have_written = true; + peer.sent_pause_read = !should_read; peer.pending_outbound_buffer_first_msg_offset += data_sent; if peer.pending_outbound_buffer_first_msg_offset == next_buff.len() { peer.pending_outbound_buffer_first_msg_offset = 0; @@ -1664,7 +1672,10 @@ where Some(peer_mutex) => { let mut peer = peer_mutex.lock().unwrap(); peer.awaiting_write_event = false; - self.do_attempt_write_data(descriptor, &mut peer, false); + // We go ahead and force at least one write here, because if we don't have any + // messages to send and the net driver thought we did that's weird, so they might + // also have a confused read-paused state that we should go ahead and clear. + self.do_attempt_write_data(descriptor, &mut peer, true); }, }; Ok(()) @@ -1676,11 +1687,9 @@ where /// /// Will *not* call back into [`send_data`] on any descriptors to avoid reentrancy complexity. /// Thus, however, you should call [`process_events`] after any `read_event` to generate - /// [`send_data`] calls to handle responses. - /// - /// If `Ok(true)` is returned, further read_events should not be triggered until a - /// [`send_data`] call on this descriptor has `resume_read` set (preventing DoS issues in the - /// send buffer). + /// [`send_data`] calls to handle responses. This is also important to give [`send_data`] calls + /// a chance to pause reads if too many messages have been queued in response allowing a peer + /// to bloat our memory. /// /// In order to avoid processing too many messages at once per peer, `data` should be on the /// order of 4KiB. @@ -1689,7 +1698,7 @@ where /// [`process_events`]: PeerManager::process_events pub fn read_event( &self, peer_descriptor: &mut Descriptor, data: &[u8], - ) -> Result { + ) -> Result<(), PeerHandleError> { match self.do_read_event(peer_descriptor, data) { Ok(res) => Ok(res), Err(e) => { @@ -1718,8 +1727,7 @@ where fn do_read_event( &self, peer_descriptor: &mut Descriptor, data: &[u8], - ) -> Result { - let mut pause_read = false; + ) -> Result<(), PeerHandleError> { let peers = self.peers.read().unwrap(); let mut msgs_to_forward = Vec::new(); let mut peer_node_id = None; @@ -1994,7 +2002,6 @@ where }, } } - pause_read = !self.peer_should_read(peer); if let Some(message) = msg_to_handle { match self.handle_message(&peer_mutex, peer_lock, message) { @@ -2027,7 +2034,7 @@ where ); } - Ok(pause_read) + Ok(()) } /// Process an incoming message and return a decision (ok, lightning error, peer handling error) regarding the next action with the peer @@ -3725,7 +3732,7 @@ mod tests { } impl SocketDescriptor for FileDescriptor { - fn send_data(&mut self, data: &[u8], _resume_read: bool) -> usize { + fn send_data(&mut self, data: &[u8], _continue_read: bool) -> usize { if self.hang_writes.load(Ordering::Acquire) { 0 } else { @@ -3939,12 +3946,8 @@ mod tests { fn try_establish_connection<'a>( peer_a: &TestPeer<'a>, peer_b: &TestPeer<'a>, - ) -> ( - FileDescriptor, - FileDescriptor, - Result, - Result, - ) { + ) -> (FileDescriptor, FileDescriptor, Result<(), PeerHandleError>, Result<(), PeerHandleError>) + { let addr_a = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1000 }; let addr_b = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1001 }; @@ -3958,11 +3961,11 @@ mod tests { let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap(); peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap(); - assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false); + peer_a.read_event(&mut fd_a, &initial_data).unwrap(); peer_a.process_events(); let a_data = fd_a.outbound_data.lock().unwrap().split_off(0); - assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false); + peer_b.read_event(&mut fd_b, &a_data).unwrap(); peer_b.process_events(); let b_data = fd_b.outbound_data.lock().unwrap().split_off(0); @@ -3989,8 +3992,8 @@ mod tests { let (fd_a, fd_b, a_refused, b_refused) = try_establish_connection(peer_a, peer_b); - assert_eq!(a_refused.unwrap(), false); - assert_eq!(b_refused.unwrap(), false); + a_refused.unwrap(); + b_refused.unwrap(); assert_eq!(peer_a.peer_by_node_id(&id_b).unwrap().counterparty_node_id, id_b); assert_eq!(peer_a.peer_by_node_id(&id_b).unwrap().socket_address, Some(addr_b)); @@ -4113,11 +4116,11 @@ mod tests { let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap(); peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap(); - assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false); + peer_a.read_event(&mut fd_a, &initial_data).unwrap(); peer_a.process_events(); let a_data = fd_a.outbound_data.lock().unwrap().split_off(0); - assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false); + peer_b.read_event(&mut fd_b, &a_data).unwrap(); peer_b.process_events(); let b_data = fd_b.outbound_data.lock().unwrap().split_off(0); @@ -4144,11 +4147,11 @@ mod tests { let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap(); peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap(); - assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false); + peer_a.read_event(&mut fd_a, &initial_data).unwrap(); peer_a.process_events(); let a_data = fd_a.outbound_data.lock().unwrap().split_off(0); - assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false); + peer_b.read_event(&mut fd_b, &a_data).unwrap(); peer_b.process_events(); let b_data = fd_b.outbound_data.lock().unwrap().split_off(0); @@ -4220,7 +4223,7 @@ mod tests { peers[0].process_events(); let a_data = fd_a.outbound_data.lock().unwrap().split_off(0); - assert_eq!(peers[1].read_event(&mut fd_b, &a_data).unwrap(), false); + peers[1].read_event(&mut fd_b, &a_data).unwrap(); } #[test] @@ -4240,13 +4243,13 @@ mod tests { let mut dup_encryptor = PeerChannelEncryptor::new_outbound(id_a, SecretKey::from_slice(&[42; 32]).unwrap()); let initial_data = dup_encryptor.get_act_one(&peers[1].secp_ctx); - assert_eq!(peers[0].read_event(&mut fd_dup, &initial_data).unwrap(), false); + peers[0].read_event(&mut fd_dup, &initial_data).unwrap(); peers[0].process_events(); let a_data = fd_dup.outbound_data.lock().unwrap().split_off(0); let (act_three, _) = dup_encryptor.process_act_two(&a_data[..], &&cfgs[1].node_signer).unwrap(); - assert_eq!(peers[0].read_event(&mut fd_dup, &act_three).unwrap(), false); + peers[0].read_event(&mut fd_dup, &act_three).unwrap(); let not_init_msg = msgs::Ping { ponglen: 4, byteslen: 0 }; let msg_bytes = dup_encryptor.encrypt_message(¬_init_msg); @@ -4504,10 +4507,10 @@ mod tests { assert_eq!(peers_len, 1); } - assert_eq!(peers[0].read_event(&mut fd_a, &initial_data).unwrap(), false); + peers[0].read_event(&mut fd_a, &initial_data).unwrap(); peers[0].process_events(); let a_data = fd_a.outbound_data.lock().unwrap().split_off(0); - assert_eq!(peers[1].read_event(&mut fd_b, &a_data).unwrap(), false); + peers[1].read_event(&mut fd_b, &a_data).unwrap(); peers[1].process_events(); // ...but if we get a second timer tick, we should disconnect the peer @@ -4557,11 +4560,11 @@ mod tests { let act_one = peer_b.new_outbound_connection(a_id, fd_b.clone(), None).unwrap(); peer_a.new_inbound_connection(fd_a.clone(), None).unwrap(); - assert_eq!(peer_a.read_event(&mut fd_a, &act_one).unwrap(), false); + peer_a.read_event(&mut fd_a, &act_one).unwrap(); peer_a.process_events(); let act_two = fd_a.outbound_data.lock().unwrap().split_off(0); - assert_eq!(peer_b.read_event(&mut fd_b, &act_two).unwrap(), false); + peer_b.read_event(&mut fd_b, &act_two).unwrap(); peer_b.process_events(); // Calling this here triggers the race on inbound connections. @@ -4575,7 +4578,7 @@ mod tests { assert!(!handshake_complete); } - assert_eq!(peer_a.read_event(&mut fd_a, &act_three_with_init_b).unwrap(), false); + peer_a.read_event(&mut fd_a, &act_three_with_init_b).unwrap(); peer_a.process_events(); { @@ -4595,7 +4598,7 @@ mod tests { assert!(!handshake_complete); } - assert_eq!(peer_b.read_event(&mut fd_b, &init_a).unwrap(), false); + peer_b.read_event(&mut fd_b, &init_a).unwrap(); peer_b.process_events(); { @@ -4632,7 +4635,7 @@ mod tests { peer_a.process_events(); let msg = fd_a.outbound_data.lock().unwrap().split_off(0); assert!(!msg.is_empty()); - assert_eq!(peer_b.read_event(&mut fd_b, &msg).unwrap(), false); + peer_b.read_event(&mut fd_b, &msg).unwrap(); peer_b.process_events(); }; @@ -4675,12 +4678,12 @@ mod tests { let msg = fd_a.outbound_data.lock().unwrap().split_off(0); if !msg.is_empty() { - assert_eq!(peers[1].read_event(&mut fd_b, &msg).unwrap(), false); + peers[1].read_event(&mut fd_b, &msg).unwrap(); continue; } let msg = fd_b.outbound_data.lock().unwrap().split_off(0); if !msg.is_empty() { - assert_eq!(peers[0].read_event(&mut fd_a, &msg).unwrap(), false); + peers[0].read_event(&mut fd_a, &msg).unwrap(); continue; } break; diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index 3edd051d735..f95883e9434 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -7,6 +7,8 @@ // You may not use this file except in accordance with one or both of these // licenses. +#![cfg_attr(not(test), allow(unused_imports))] + use crate::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::transaction::OutPoint; @@ -14,15 +16,18 @@ use crate::chain::ChannelMonitorUpdateStatus; use crate::events::bump_transaction::sync::WalletSourceSync; use crate::events::{ClosureReason, Event, FundingInfo, HTLCHandlingFailureType}; use crate::ln::chan_utils; -use crate::ln::channelmanager::BREAKDOWN_TIMEOUT; +use crate::ln::channel::CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY; +use crate::ln::channelmanager::{PaymentId, RecipientOnionFields, BREAKDOWN_TIMEOUT}; use crate::ln::functional_test_utils::*; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::ln::types::ChannelId; +use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::test_channel_signer::SignerOp; +use bitcoin::secp256k1::PublicKey; use bitcoin::{Amount, OutPoint as BitcoinOutPoint, ScriptBuf, Transaction, TxOut}; #[test] @@ -65,7 +70,7 @@ fn test_v1_splice_in_negative_insufficient_inputs() { } } -fn negotiate_splice_tx<'a, 'b, 'c, 'd>( +pub fn negotiate_splice_tx<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, initiator_contribution: SpliceContribution, ) -> msgs::CommitmentSigned { @@ -80,7 +85,7 @@ fn negotiate_splice_tx<'a, 'b, 'c, 'd>( ) } -fn complete_splice_handshake<'a, 'b, 'c, 'd>( +pub fn complete_splice_handshake<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, initiator_contribution: SpliceContribution, ) -> ScriptBuf { @@ -117,7 +122,7 @@ fn complete_splice_handshake<'a, 'b, 'c, 'd>( new_funding_script } -fn complete_interactive_funding_negotiation<'a, 'b, 'c, 'd>( +pub fn complete_interactive_funding_negotiation<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, initiator_contribution: SpliceContribution, new_funding_script: ScriptBuf, ) -> msgs::CommitmentSigned { @@ -206,25 +211,25 @@ fn complete_interactive_funding_negotiation<'a, 'b, 'c, 'd>( } } -fn sign_interactive_funding_transaction<'a, 'b, 'c, 'd>( +pub fn sign_interactive_funding_tx<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, - initial_commit_sig_for_acceptor: msgs::CommitmentSigned, -) { + initial_commit_sig_for_acceptor: msgs::CommitmentSigned, is_0conf: bool, +) -> (Transaction, Option<(msgs::SpliceLocked, PublicKey)>) { let node_id_initiator = initiator.node.get_our_node_id(); let node_id_acceptor = acceptor.node.get_our_node_id(); assert!(initiator.node.get_and_clear_pending_msg_events().is_empty()); acceptor.node.handle_commitment_signed(node_id_initiator, &initial_commit_sig_for_acceptor); - let mut msg_events = acceptor.node.get_and_clear_pending_msg_events(); + let msg_events = acceptor.node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 2, "{msg_events:?}"); - if let MessageSendEvent::UpdateHTLCs { mut updates, .. } = msg_events.remove(0) { - let commitment_signed = updates.commitment_signed.remove(0); - initiator.node.handle_commitment_signed(node_id_acceptor, &commitment_signed); + if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = &msg_events[0] { + let commitment_signed = &updates.commitment_signed[0]; + initiator.node.handle_commitment_signed(node_id_acceptor, commitment_signed); } else { panic!(); } - if let MessageSendEvent::SendTxSignatures { ref msg, .. } = msg_events.remove(0) { + if let MessageSendEvent::SendTxSignatures { ref msg, .. } = &msg_events[1] { initiator.node.handle_tx_signatures(node_id_acceptor, msg); } else { panic!(); @@ -244,15 +249,37 @@ fn sign_interactive_funding_transaction<'a, 'b, 'c, 'd>( .funding_transaction_signed(&channel_id, &counterparty_node_id, partially_signed_tx) .unwrap(); } - let tx_signatures = - get_event_msg!(initiator, MessageSendEvent::SendTxSignatures, node_id_acceptor); - acceptor.node.handle_tx_signatures(node_id_initiator, &tx_signatures); + let mut msg_events = initiator.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), if is_0conf { 2 } else { 1 }, "{msg_events:?}"); + if let MessageSendEvent::SendTxSignatures { ref msg, .. } = &msg_events[0] { + acceptor.node.handle_tx_signatures(node_id_initiator, msg); + } else { + panic!(); + } + let splice_locked = if is_0conf { + if let MessageSendEvent::SendSpliceLocked { msg, .. } = msg_events.remove(1) { + Some((msg, node_id_acceptor)) + } else { + panic!(); + } + } else { + None + }; check_added_monitors(&initiator, 1); check_added_monitors(&acceptor, 1); + + let tx = { + let mut initiator_txn = initiator.tx_broadcaster.txn_broadcast(); + assert_eq!(initiator_txn.len(), 1); + let acceptor_txn = acceptor.tx_broadcaster.txn_broadcast(); + assert_eq!(initiator_txn, acceptor_txn,); + initiator_txn.remove(0) + }; + (tx, splice_locked) } -fn splice_channel<'a, 'b, 'c, 'd>( +pub fn splice_channel<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, initiator_contribution: SpliceContribution, ) -> Transaction { @@ -269,15 +296,9 @@ fn splice_channel<'a, 'b, 'c, 'd>( initiator_contribution, new_funding_script, ); - sign_interactive_funding_transaction(initiator, acceptor, initial_commit_sig_for_acceptor); - - let splice_tx = { - let mut initiator_txn = initiator.tx_broadcaster.txn_broadcast(); - assert_eq!(initiator_txn.len(), 1); - let acceptor_txn = acceptor.tx_broadcaster.txn_broadcast(); - assert_eq!(initiator_txn, acceptor_txn); - initiator_txn.remove(0) - }; + let (splice_tx, splice_locked) = + sign_interactive_funding_tx(initiator, acceptor, initial_commit_sig_for_acceptor, false); + assert!(splice_locked.is_none()); expect_splice_pending_event(initiator, &node_id_acceptor); expect_splice_pending_event(acceptor, &node_id_initiator); @@ -285,37 +306,47 @@ fn splice_channel<'a, 'b, 'c, 'd>( splice_tx } -fn lock_splice_after_blocks<'a, 'b, 'c, 'd>( - node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, - num_blocks: u32, +pub fn lock_splice_after_blocks<'a, 'b, 'c, 'd>( + node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, num_blocks: u32, +) { + connect_blocks(node_a, num_blocks); + connect_blocks(node_b, num_blocks); + + let node_id_b = node_b.node.get_our_node_id(); + let splice_locked_for_node_b = + get_event_msg!(node_a, MessageSendEvent::SendSpliceLocked, node_id_b); + lock_splice(node_a, node_b, &splice_locked_for_node_b, false); +} + +pub fn lock_splice<'a, 'b, 'c, 'd>( + node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, + splice_locked_for_node_b: &msgs::SpliceLocked, is_0conf: bool, ) { let (prev_funding_outpoint, prev_funding_script) = node_a .chain_monitor .chain_monitor - .get_monitor(channel_id) + .get_monitor(splice_locked_for_node_b.channel_id) .map(|monitor| (monitor.get_funding_txo(), monitor.get_funding_script())) .unwrap(); - connect_blocks(node_a, num_blocks); - connect_blocks(node_b, num_blocks); - let node_id_a = node_a.node.get_our_node_id(); let node_id_b = node_b.node.get_our_node_id(); - let splice_locked_a = get_event_msg!(node_a, MessageSendEvent::SendSpliceLocked, node_id_b); - node_b.node.handle_splice_locked(node_id_a, &splice_locked_a); + node_b.node.handle_splice_locked(node_id_a, splice_locked_for_node_b); let mut msg_events = node_b.node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 2, "{msg_events:?}"); + assert_eq!(msg_events.len(), if is_0conf { 1 } else { 2 }, "{msg_events:?}"); if let MessageSendEvent::SendSpliceLocked { msg, .. } = msg_events.remove(0) { node_a.node.handle_splice_locked(node_id_b, &msg); } else { panic!(); } - if let MessageSendEvent::SendAnnouncementSignatures { msg, .. } = msg_events.remove(0) { - node_a.node.handle_announcement_signatures(node_id_b, &msg); - } else { - panic!(); + if !is_0conf { + if let MessageSendEvent::SendAnnouncementSignatures { msg, .. } = msg_events.remove(0) { + node_a.node.handle_announcement_signatures(node_id_b, &msg); + } else { + panic!(); + } } expect_channel_ready_event(&node_a, &node_id_b); @@ -323,23 +354,25 @@ fn lock_splice_after_blocks<'a, 'b, 'c, 'd>( expect_channel_ready_event(&node_b, &node_id_a); check_added_monitors(&node_b, 1); - let mut msg_events = node_a.node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 2, "{msg_events:?}"); - if let MessageSendEvent::SendAnnouncementSignatures { msg, .. } = msg_events.remove(0) { - node_b.node.handle_announcement_signatures(node_id_a, &msg); - } else { - panic!(); - } - if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = msg_events.remove(0) { - } else { - panic!(); - } + if !is_0conf { + let mut msg_events = node_a.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2, "{msg_events:?}"); + if let MessageSendEvent::SendAnnouncementSignatures { msg, .. } = msg_events.remove(0) { + node_b.node.handle_announcement_signatures(node_id_a, &msg); + } else { + panic!(); + } + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = msg_events.remove(0) { + } else { + panic!(); + } - let mut msg_events = node_b.node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1, "{msg_events:?}"); - if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = msg_events.remove(0) { - } else { - panic!(); + let mut msg_events = node_b.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = msg_events.remove(0) { + } else { + panic!(); + } } // Remove the corresponding outputs and transactions the chain source is watching for the @@ -356,6 +389,7 @@ fn test_splice_state_reset_on_disconnect() { do_test_splice_state_reset_on_disconnect(true); } +#[cfg(test)] fn do_test_splice_state_reset_on_disconnect(reload: bool) { // Tests that we're able to forget our pending splice state after a disconnect such that we can // retry later. @@ -533,7 +567,7 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { mine_transaction(&nodes[0], &splice_tx); mine_transaction(&nodes[1], &splice_tx); - lock_splice_after_blocks(&nodes[0], &nodes[1], channel_id, ANTI_REORG_DELAY - 1); + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); } #[test] @@ -633,7 +667,7 @@ fn test_splice_in() { assert!(htlc_limit_msat < initial_channel_value_sat * 1000); let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); - lock_splice_after_blocks(&nodes[0], &nodes[1], channel_id, ANTI_REORG_DELAY - 1); + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; assert!(htlc_limit_msat > initial_channel_value_sat); @@ -676,13 +710,14 @@ fn test_splice_out() { assert!(htlc_limit_msat < initial_channel_value_sat / 2 * 1000); let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); - lock_splice_after_blocks(&nodes[0], &nodes[1], channel_id, ANTI_REORG_DELAY - 1); + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; assert!(htlc_limit_msat < initial_channel_value_sat / 2 * 1000); let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); } +#[cfg(test)] #[derive(PartialEq)] enum SpliceStatus { Unconfirmed, @@ -700,6 +735,7 @@ fn test_splice_commitment_broadcast() { do_test_splice_commitment_broadcast(SpliceStatus::Locked, true); } +#[cfg(test)] fn do_test_splice_commitment_broadcast(splice_status: SpliceStatus, claim_htlcs: bool) { // Tests that we're able to enforce HTLCs onchain during the different stages of a splice. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -736,7 +772,7 @@ fn do_test_splice_commitment_broadcast(splice_status: SpliceStatus, claim_htlcs: mine_transaction(&nodes[1], &splice_tx); } if splice_status == SpliceStatus::Locked { - lock_splice_after_blocks(&nodes[0], &nodes[1], channel_id, ANTI_REORG_DELAY - 1); + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); } if claim_htlcs { @@ -895,6 +931,7 @@ fn test_splice_reestablish() { do_test_splice_reestablish(true, true); } +#[cfg(test)] fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { // Test that we're able to reestablish the channel succesfully throughout the lifecycle of a splice. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -1149,6 +1186,320 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { .remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script); } +#[test] +fn test_propose_splice_while_disconnected() { + do_test_propose_splice_while_disconnected(false, false); + do_test_propose_splice_while_disconnected(false, true); + do_test_propose_splice_while_disconnected(true, false); + do_test_propose_splice_while_disconnected(true, true); +} + +#[cfg(test)] +fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { + // Test that both nodes are able to propose a splice while the counterparty is disconnected, and + // whoever doesn't go first due to the quiescence tie-breaker, will retry their splice after the + // first one becomes locked. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let (persister_0a, persister_0b, persister_1a, persister_1b); + let (chain_monitor_0a, chain_monitor_0b, chain_monitor_1a, chain_monitor_1b); + let mut config = test_default_channel_config(); + if use_0conf { + config.manually_accept_inbound_channels = true; + config.channel_handshake_limits.trust_own_funding_0conf = true; + } + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let (node_0a, node_0b, node_1a, node_1b); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_id_0 = nodes[0].node.get_our_node_id(); + let node_id_1 = nodes[1].node.get_our_node_id(); + + let initial_channel_value_sat = 1_000_000; + let push_msat = initial_channel_value_sat / 2 * 1000; + let channel_id = if use_0conf { + let (funding_tx, channel_id) = open_zero_conf_channel_with_value( + &nodes[0], + &nodes[1], + None, + initial_channel_value_sat, + push_msat, + ); + mine_transaction(&nodes[0], &funding_tx); + mine_transaction(&nodes[1], &funding_tx); + channel_id + } else { + let (_, _, channel_id, _) = create_announced_chan_between_nodes_with_value( + &nodes, + 0, + 1, + initial_channel_value_sat, + push_msat, + ); + channel_id + }; + + // Start with the nodes disconnected, and have each one attempt a splice. + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + + let splice_out_sat = initial_channel_value_sat / 4; + let node_0_contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(splice_out_sat), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }], + }; + nodes[0] + .node + .splice_channel( + &channel_id, + &node_id_1, + node_0_contribution.clone(), + FEERATE_FLOOR_SATS_PER_KW, + None, + ) + .unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + let node_1_contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(splice_out_sat), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }], + }; + nodes[1] + .node + .splice_channel( + &channel_id, + &node_id_0, + node_1_contribution.clone(), + FEERATE_FLOOR_SATS_PER_KW, + None, + ) + .unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + if reload { + let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); + reload_node!( + nodes[0], + nodes[0].node.encode(), + &[&encoded_monitor_0], + persister_0a, + chain_monitor_0a, + node_0a + ); + let encoded_monitor_1 = get_monitor!(nodes[1], channel_id).encode(); + reload_node!( + nodes[1], + nodes[1].node.encode(), + &[&encoded_monitor_1], + persister_1a, + chain_monitor_1a, + node_1a + ); + } + + // Reconnect the nodes. Both nodes should attempt quiescence as the initiator, but only one will + // be it via the tie-breaker. + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_channel_ready = (true, true); + if !use_0conf { + reconnect_args.send_announcement_sigs = (true, true); + } + reconnect_args.send_stfu = (true, true); + reconnect_nodes(reconnect_args); + let splice_init = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceInit, node_id_1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let (prev_funding_outpoint, prev_funding_script) = nodes[0] + .chain_monitor + .chain_monitor + .get_monitor(channel_id) + .map(|monitor| (monitor.get_funding_txo(), monitor.get_funding_script())) + .unwrap(); + + // Negotiate the first splice to completion. + let initial_commit_sig = { + nodes[1].node.handle_splice_init(node_id_0, &splice_init); + let splice_ack = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceAck, node_id_0); + nodes[0].node.handle_splice_ack(node_id_1, &splice_ack); + let new_funding_script = chan_utils::make_funding_redeemscript( + &splice_init.funding_pubkey, + &splice_ack.funding_pubkey, + ) + .to_p2wsh(); + complete_interactive_funding_negotiation( + &nodes[0], + &nodes[1], + channel_id, + node_0_contribution, + new_funding_script, + ) + }; + let (splice_tx, splice_locked) = + sign_interactive_funding_tx(&nodes[0], &nodes[1], initial_commit_sig, use_0conf); + expect_splice_pending_event(&nodes[0], &node_id_1); + expect_splice_pending_event(&nodes[1], &node_id_0); + + let splice_locked = if use_0conf { + let (splice_locked, for_node_id) = splice_locked.unwrap(); + assert_eq!(for_node_id, node_id_1); + splice_locked + } else { + assert!(splice_locked.is_none()); + + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + + // Mine enough blocks for the first splice to become locked. + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + + get_event_msg!(nodes[0], MessageSendEvent::SendSpliceLocked, node_id_1) + }; + nodes[1].node.handle_splice_locked(node_id_0, &splice_locked); + + // We should see the node which lost the tie-breaker attempt their splice now by first + // negotiating quiescence, but their `stfu` won't be sent until after another reconnection. + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), if use_0conf { 2 } else { 3 }, "{msg_events:?}"); + if let MessageSendEvent::SendSpliceLocked { ref msg, .. } = &msg_events[0] { + nodes[0].node.handle_splice_locked(node_id_1, msg); + if use_0conf { + // TODO(splicing): Revisit splice transaction rebroadcasts. + let txn_0 = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn_0.len(), 1); + assert_eq!(&txn_0[0], &splice_tx); + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + } + } else { + panic!("Unexpected event {:?}", &msg_events[0]); + } + if !use_0conf { + if let MessageSendEvent::SendAnnouncementSignatures { ref msg, .. } = &msg_events[1] { + nodes[0].node.handle_announcement_signatures(node_id_1, msg); + } else { + panic!("Unexpected event {:?}", &msg_events[1]); + } + } + assert!(matches!( + &msg_events[if use_0conf { 1 } else { 2 }], + MessageSendEvent::SendStfu { .. } + )); + + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), if use_0conf { 0 } else { 2 }, "{msg_events:?}"); + if !use_0conf { + if let MessageSendEvent::SendAnnouncementSignatures { ref msg, .. } = &msg_events[0] { + nodes[1].node.handle_announcement_signatures(node_id_0, msg); + } else { + panic!("Unexpected event {:?}", &msg_events[1]); + } + assert!(matches!(&msg_events[1], MessageSendEvent::BroadcastChannelAnnouncement { .. })); + } + + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), if use_0conf { 0 } else { 1 }, "{msg_events:?}"); + if !use_0conf { + assert!(matches!(&msg_events[0], MessageSendEvent::BroadcastChannelAnnouncement { .. })); + } + + expect_channel_ready_event(&nodes[0], &node_id_1); + check_added_monitors(&nodes[0], 1); + expect_channel_ready_event(&nodes[1], &node_id_0); + check_added_monitors(&nodes[1], 1); + + // Remove the corresponding outputs and transactions the chain source is watching for the + // old funding as it is no longer being tracked. + nodes[0] + .chain_source + .remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script.clone()); + nodes[1] + .chain_source + .remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script); + + // Reconnect the nodes. This should trigger the node which lost the tie-breaker to resend `stfu` + // for their splice attempt. + if reload { + let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); + reload_node!( + nodes[0], + nodes[0].node.encode(), + &[&encoded_monitor_0], + persister_0b, + chain_monitor_0b, + node_0b + ); + let encoded_monitor_1 = get_monitor!(nodes[1], channel_id).encode(); + reload_node!( + nodes[1], + nodes[1].node.encode(), + &[&encoded_monitor_1], + persister_1b, + chain_monitor_1b, + node_1b + ); + } else { + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + } + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + if !use_0conf { + reconnect_args.send_announcement_sigs = (true, true); + } + reconnect_args.send_stfu = (true, false); + reconnect_nodes(reconnect_args); + + // Drive the second splice to completion. + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + if let MessageSendEvent::SendStfu { ref msg, .. } = msg_events[0] { + nodes[1].node.handle_stfu(node_id_0, msg); + } else { + panic!("Unexpected event {:?}", &msg_events[0]); + } + + let splice_init = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceInit, node_id_0); + let initial_commit_sig = { + nodes[0].node.handle_splice_init(node_id_1, &splice_init); + let splice_ack = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceAck, node_id_1); + nodes[1].node.handle_splice_ack(node_id_0, &splice_ack); + let new_funding_script = chan_utils::make_funding_redeemscript( + &splice_init.funding_pubkey, + &splice_ack.funding_pubkey, + ) + .to_p2wsh(); + complete_interactive_funding_negotiation( + &nodes[1], + &nodes[0], + channel_id, + node_1_contribution, + new_funding_script, + ) + }; + let (splice_tx, splice_locked) = + sign_interactive_funding_tx(&nodes[1], &nodes[0], initial_commit_sig, use_0conf); + expect_splice_pending_event(&nodes[0], &node_id_1); + expect_splice_pending_event(&nodes[1], &node_id_0); + + if use_0conf { + let (splice_locked, for_node_id) = splice_locked.unwrap(); + assert_eq!(for_node_id, node_id_0); + lock_splice(&nodes[1], &nodes[0], &splice_locked, true); + } else { + assert!(splice_locked.is_none()); + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + lock_splice_after_blocks(&nodes[1], &nodes[0], ANTI_REORG_DELAY - 1); + } + + // Sanity check that we can still make a test payment. + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); +} + #[test] fn disconnect_on_unexpected_interactive_tx_message() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -1459,3 +1810,151 @@ fn fail_quiescent_action_on_channel_close() { check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); } + +#[cfg(test)] +fn do_test_splice_with_inflight_htlc_forward_and_resolution(expire_scid_pre_forward: bool) { + // Test that we are still able to forward and resolve HTLCs while the original SCIDs contained + // in the onion packets have now changed due channel splices becoming locked. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let mut config = test_default_channel_config(); + config.channel_config.cltv_expiry_delta = CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY as u16 * 2; + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(config.clone()), Some(config.clone()), Some(config)], + ); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_id_0 = nodes[0].node.get_our_node_id(); + let node_id_1 = nodes[1].node.get_our_node_id(); + let node_id_2 = nodes[2].node.get_our_node_id(); + + let (_, _, channel_id_0_1, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + let (chan_upd_1_2, _, channel_id_1_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2); + + let node_max_height = + nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; + connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); + + // Send an outbound HTLC from node 0 to 2. + let payment_amount = 1_000_000; + let payment_params = + PaymentParameters::from_node_id(node_id_2, CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY * 2) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + let route_params = + RouteParameters::from_payment_params_and_value(payment_params, payment_amount); + let route = get_route(&nodes[0], &route_params).unwrap(); + let (_, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[2], Some(payment_amount), None); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); + + // Node 1 should now have a pending HTLC to forward to 2. + let update_add_0_1 = get_htlc_update_msgs(&nodes[0], &node_id_1); + nodes[1].node.handle_update_add_htlc(node_id_0, &update_add_0_1.update_add_htlcs[0]); + commitment_signed_dance!(nodes[1], nodes[0], update_add_0_1.commitment_signed, false); + assert!(nodes[1].node.needs_pending_htlc_processing()); + + // Splice both channels, lock them, and connect enough blocks to trigger the legacy SCID pruning + // logic while the HTLC is still pending. + let contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }], + }; + let splice_tx_0_1 = splice_channel(&nodes[0], &nodes[1], channel_id_0_1, contribution); + for node in &nodes { + mine_transaction(node, &splice_tx_0_1); + } + + let contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }], + }; + let splice_tx_1_2 = splice_channel(&nodes[1], &nodes[2], channel_id_1_2, contribution); + for node in &nodes { + mine_transaction(node, &splice_tx_1_2); + } + + for node in &nodes { + connect_blocks(node, ANTI_REORG_DELAY - 2); + } + let splice_locked = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceLocked, node_id_1); + lock_splice(&nodes[0], &nodes[1], &splice_locked, false); + + for node in &nodes { + connect_blocks(node, 1); + } + let splice_locked = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceLocked, node_id_2); + lock_splice(&nodes[1], &nodes[2], &splice_locked, false); + + if expire_scid_pre_forward { + for node in &nodes { + connect_blocks(node, CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY); + } + + // Now attempt to forward the HTLC from node 1 to 2 which will fail because the SCID is no + // longer stored and has expired. Obviously this is somewhat of an absurd case - not + // forwarding for `CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY` blocks is kinda nuts. + let fail_type = HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: chan_upd_1_2.contents.short_channel_id, + }; + expect_htlc_forwarding_fails(&nodes[1], &[fail_type]); + check_added_monitors(&nodes[1], 1); + let update_fail_1_0 = get_htlc_update_msgs(&nodes[1], &node_id_0); + nodes[0].node.handle_update_fail_htlc(node_id_1, &update_fail_1_0.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], update_fail_1_0.commitment_signed, false); + + let conditions = PaymentFailedConditions::new(); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, conditions); + } else { + // Now attempt to forward the HTLC from node 1 to 2. + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 1); + let update_add_1_2 = get_htlc_update_msgs(&nodes[1], &node_id_2); + nodes[2].node.handle_update_add_htlc(node_id_1, &update_add_1_2.update_add_htlcs[0]); + commitment_signed_dance!(nodes[2], nodes[1], update_add_1_2.commitment_signed, false); + assert!(nodes[2].node.needs_pending_htlc_processing()); + + // Node 2 should see the claimable payment. Fail it back to make sure we also handle the SCID + // change on the way back. + nodes[2].node.process_pending_htlc_forwards(); + expect_payment_claimable!(&nodes[2], payment_hash, payment_secret, payment_amount); + nodes[2].node.fail_htlc_backwards(&payment_hash); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; + expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail_type]); + check_added_monitors(&nodes[2], 1); + + let update_fail_1_2 = get_htlc_update_msgs(&nodes[2], &node_id_1); + nodes[1].node.handle_update_fail_htlc(node_id_2, &update_fail_1_2.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[1], nodes[2], update_fail_1_2.commitment_signed, false); + let fail_type = HTLCHandlingFailureType::Forward { + node_id: Some(node_id_2), + channel_id: channel_id_1_2, + }; + expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[fail_type]); + check_added_monitors(&nodes[1], 1); + + let update_fail_0_1 = get_htlc_update_msgs(&nodes[1], &node_id_0); + nodes[0].node.handle_update_fail_htlc(node_id_1, &update_fail_0_1.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], update_fail_0_1.commitment_signed, false); + + let conditions = PaymentFailedConditions::new(); + expect_payment_failed_conditions(&nodes[0], payment_hash, true, conditions); + } +} + +#[test] +fn test_splice_with_inflight_htlc_forward_and_resolution() { + do_test_splice_with_inflight_htlc_forward_and_resolution(true); + do_test_splice_with_inflight_htlc_forward_and_resolution(false); +} diff --git a/lightning/src/ln/zero_fee_commitment_tests.rs b/lightning/src/ln/zero_fee_commitment_tests.rs index 9d915785c0f..b4fd9e0be88 100644 --- a/lightning/src/ln/zero_fee_commitment_tests.rs +++ b/lightning/src/ln/zero_fee_commitment_tests.rs @@ -1,5 +1,15 @@ -use crate::ln::chan_utils::shared_anchor_script_pubkey; +use crate::events::{ClosureReason, Event}; +use crate::ln::chan_utils; +use crate::ln::chan_utils::{ + BASE_INPUT_WEIGHT, BASE_TX_SIZE, EMPTY_SCRIPT_SIG_WEIGHT, EMPTY_WITNESS_WEIGHT, + P2WSH_TXOUT_WEIGHT, SEGWIT_MARKER_FLAG_WEIGHT, TRUC_CHILD_MAX_WEIGHT, +}; use crate::ln::functional_test_utils::*; +use crate::ln::msgs::BaseMessageHandler; +use crate::prelude::*; + +use bitcoin::constants::WITNESS_SCALE_FACTOR; +use bitcoin::Amount; #[test] fn test_p2a_anchor_values_under_trims_and_rounds() { @@ -46,10 +56,10 @@ fn test_p2a_anchor_values_under_trims_and_rounds() { )* let txn = get_local_commitment_txn!(nodes[0], chan_id); assert_eq!(txn.len(), 1); - assert_eq!(txn[0].output.iter().find(|output| output.script_pubkey == shared_anchor_script_pubkey()).unwrap().value.to_sat(), $expected_p2a_value_sat); + assert_eq!(txn[0].output.iter().find(|output| output.script_pubkey == chan_utils::shared_anchor_script_pubkey()).unwrap().value.to_sat(), $expected_p2a_value_sat); let txn = get_local_commitment_txn!(nodes[1], chan_id); assert_eq!(txn.len(), 1); - assert_eq!(txn[0].output.iter().find(|output| output.script_pubkey == shared_anchor_script_pubkey()).unwrap().value.to_sat(), $expected_p2a_value_sat); + assert_eq!(txn[0].output.iter().find(|output| output.script_pubkey == chan_utils::shared_anchor_script_pubkey()).unwrap().value.to_sat(), $expected_p2a_value_sat); for hash in node_0_1_hashes { fail_payment(&nodes[0], &[&nodes[1]], hash); } @@ -92,3 +102,338 @@ fn test_p2a_anchor_values_under_trims_and_rounds() { p2a_value_test!([353_000], [353_001], 240); p2a_value_test!([353_001], [353_001], 240); } + +#[test] +fn test_htlc_claim_chunking() { + // Assert we split an overall HolderHTLCOutput claim into constituent + // HTLC claim transactions such that each transaction is under TRUC_MAX_WEIGHT. + // Assert we reduce the number of HTLCs in a batch transaction by 2 if the + // coin selection algorithm fails to meet the target weight. + // Assert the claim_id of the first batch transaction is the claim + // id assigned to the overall claim. + // Assert we give up bumping a HTLC transaction once the batch size is + // 0 or negative. + // + // Route a bunch of HTLCs, force close the channel, assert two HTLC transactions + // get broadcasted, confirm only one of them, assert a new one gets broadcasted + // to sweep the remaining HTLCs, confirm a block without that transaction while + // dropping all available coin selection utxos, and assert we give up creating + // another HTLC transaction when handling the third HTLC bump. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut user_cfg = test_default_channel_config(); + user_cfg.channel_handshake_config.our_htlc_minimum_msat = 1; + user_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; + user_cfg.channel_handshake_config.our_max_accepted_htlcs = 114; + user_cfg.manually_accept_inbound_channels = true; + + let configs = [Some(user_cfg.clone()), Some(user_cfg)]; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &configs); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let coinbase_tx = provide_anchor_utxo_reserves(&nodes, 50, Amount::from_sat(500)); + + const CHAN_CAPACITY: u64 = 10_000_000; + let (_, _, chan_id, _funding_tx) = create_announced_chan_between_nodes_with_value( + &nodes, + 0, + 1, + CHAN_CAPACITY, + (CHAN_CAPACITY / 2) * 1000, + ); + + let mut node_1_preimages = Vec::new(); + const NONDUST_HTLC_AMT_MSAT: u64 = 1_000_000; + for _ in 0..75 { + let (preimage, payment_hash, _, _) = + route_payment(&nodes[0], &[&nodes[1]], NONDUST_HTLC_AMT_MSAT); + node_1_preimages.push((preimage, payment_hash)); + } + let node_0_commit_tx = get_local_commitment_txn!(nodes[0], chan_id); + assert_eq!(node_0_commit_tx.len(), 1); + assert_eq!(node_0_commit_tx[0].output.len(), 75 + 2 + 1); + let node_1_commit_tx = get_local_commitment_txn!(nodes[1], chan_id); + assert_eq!(node_1_commit_tx.len(), 1); + assert_eq!(node_1_commit_tx[0].output.len(), 75 + 2 + 1); + + for (preimage, payment_hash) in node_1_preimages { + nodes[1].node.claim_funds(preimage); + check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash, NONDUST_HTLC_AMT_MSAT); + } + nodes[0].node.get_and_clear_pending_msg_events(); + nodes[1].node.get_and_clear_pending_msg_events(); + + mine_transaction(&nodes[0], &node_1_commit_tx[0]); + mine_transaction(&nodes[1], &node_1_commit_tx[0]); + + let mut events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events.pop().unwrap() { + Event::BumpTransaction(bump_event) => { + nodes[1].bump_tx_handler.handle_event(&bump_event); + }, + _ => panic!("Unexpected event"), + } + + let htlc_claims = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(htlc_claims.len(), 2); + + check_spends!(htlc_claims[0], node_1_commit_tx[0], coinbase_tx); + check_spends!(htlc_claims[1], node_1_commit_tx[0], coinbase_tx); + + assert_eq!(htlc_claims[0].input.len(), 71); + assert_eq!(htlc_claims[0].output.len(), 51); + assert_eq!(htlc_claims[1].input.len(), 34); + assert_eq!(htlc_claims[1].output.len(), 24); + + check_closed_broadcast!(nodes[0], true); + check_added_monitors!(nodes[0], 1); + check_closed_event!( + nodes[0], + 1, + ClosureReason::CommitmentTxConfirmed, + [nodes[1].node.get_our_node_id()], + CHAN_CAPACITY + ); + assert!(nodes[0].node.list_channels().is_empty()); + check_closed_broadcast!(nodes[1], true); + check_added_monitors!(nodes[1], 1); + check_closed_event!( + nodes[1], + 1, + ClosureReason::CommitmentTxConfirmed, + [nodes[0].node.get_our_node_id()], + CHAN_CAPACITY + ); + assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + mine_transaction(&nodes[1], &htlc_claims[0]); + + let mut events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events.pop().unwrap() { + Event::BumpTransaction(bump_event) => { + nodes[1].bump_tx_handler.handle_event(&bump_event); + }, + _ => panic!("Unexpected event"), + } + + let fresh_htlc_claims = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(fresh_htlc_claims.len(), 1); + check_spends!(fresh_htlc_claims[0], node_1_commit_tx[0], coinbase_tx); + // We are targeting a higher feerate here, + // so we need more utxos here compared to `htlc_claims[1]` above. + assert_eq!(fresh_htlc_claims[0].input.len(), 37); + assert_eq!(fresh_htlc_claims[0].output.len(), 25); + + let log_entries = nodes[1].logger.lines.lock().unwrap(); + let batch_tx_id_assignments: Vec<_> = log_entries + .keys() + .map(|key| &key.1) + .filter(|log_msg| log_msg.contains("Batch transaction assigned to UTXO id")) + .collect(); + assert_eq!(batch_tx_id_assignments.len(), 7); + + let mut unique_claim_ids: Vec<(&str, u8)> = Vec::new(); + for claim_id in batch_tx_id_assignments + .iter() + .map(|assignment| assignment.split_whitespace().nth(6).unwrap()) + { + if let Some((_, count)) = unique_claim_ids.iter_mut().find(|(id, _count)| &claim_id == id) { + *count += 1; + } else { + unique_claim_ids.push((claim_id, 1)); + } + } + unique_claim_ids.sort_unstable_by_key(|(_id, count)| *count); + assert_eq!(unique_claim_ids.len(), 2); + let (og_claim_id, og_claim_id_count) = unique_claim_ids.pop().unwrap(); + assert_eq!(og_claim_id_count, 6); + assert_eq!(unique_claim_ids.pop().unwrap().1, 1); + + let handling_htlc_bumps: Vec<_> = log_entries + .keys() + .map(|key| &key.1) + .filter(|log_msg| log_msg.contains("Handling HTLC bump")) + .map(|log_msg| { + log_msg + .split_whitespace() + .nth(5) + .unwrap() + .trim_matches(|c: char| c.is_ascii_punctuation()) + }) + .collect(); + assert_eq!(handling_htlc_bumps.len(), 2); + assert_eq!(handling_htlc_bumps[0], og_claim_id); + assert_eq!(handling_htlc_bumps[1], og_claim_id); + + let mut batch_sizes: Vec = batch_tx_id_assignments + .iter() + .map(|assignment| assignment.split_whitespace().nth(8).unwrap().parse().unwrap()) + .collect(); + batch_sizes.sort_unstable(); + batch_sizes.reverse(); + assert_eq!(batch_sizes.len(), 7); + assert_eq!(batch_sizes.pop().unwrap(), 24); + assert_eq!(batch_sizes.pop().unwrap(), 24); + for i in (51..=59).step_by(2) { + assert_eq!(batch_sizes.pop().unwrap(), i); + } + drop(log_entries); + + nodes[1].wallet_source.clear_utxos(); + nodes[1].chain_monitor.chain_monitor.rebroadcast_pending_claims(); + + let mut events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events.pop().unwrap() { + Event::BumpTransaction(bump_event) => { + nodes[1].bump_tx_handler.handle_event(&bump_event); + }, + _ => panic!("Unexpected event"), + } + + nodes[1].logger.assert_log( + "lightning::events::bump_transaction", + format!( + "Failed bumping HTLC transaction fee for commitment {}", + node_1_commit_tx[0].compute_txid() + ), + 1, + ); +} + +#[test] +fn test_anchor_tx_too_big() { + // Assert all V3 anchor tx transactions are below TRUC_CHILD_MAX_WEIGHT. + // + // Provide a bunch of small utxos, fail to bump the commitment, + // then provide a single big-value utxo, and successfully broadcast + // the commitment. + const FEERATE: u32 = 500; + let chanmon_cfgs = create_chanmon_cfgs(2); + { + let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = FEERATE; + } + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut user_cfg = test_default_channel_config(); + user_cfg.channel_handshake_config.our_htlc_minimum_msat = 1; + user_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; + user_cfg.channel_handshake_config.our_max_accepted_htlcs = 114; + user_cfg.manually_accept_inbound_channels = true; + + let configs = [Some(user_cfg.clone()), Some(user_cfg)]; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &configs); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + let _coinbase_tx_a = provide_anchor_utxo_reserves(&nodes, 50, Amount::from_sat(500)); + + const CHAN_CAPACITY: u64 = 10_000_000; + let (_, _, chan_id, _funding_tx) = create_announced_chan_between_nodes_with_value( + &nodes, + 0, + 1, + CHAN_CAPACITY, + (CHAN_CAPACITY / 2) * 1000, + ); + + let mut node_1_preimages = Vec::new(); + const NONDUST_HTLC_AMT_MSAT: u64 = 1_000_000; + for _ in 0..50 { + let (preimage, payment_hash, _, _) = + route_payment(&nodes[0], &[&nodes[1]], NONDUST_HTLC_AMT_MSAT); + node_1_preimages.push((preimage, payment_hash)); + } + let commitment_tx = get_local_commitment_txn!(nodes[1], chan_id).pop().unwrap(); + let commitment_txid = commitment_tx.compute_txid(); + + let message = "Channel force-closed".to_owned(); + nodes[1] + .node + .force_close_broadcasting_latest_txn(&chan_id, &node_a_id, message.clone()) + .unwrap(); + check_added_monitors(&nodes[1], 1); + check_closed_broadcast!(nodes[1], true); + + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], CHAN_CAPACITY); + + let mut events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events.pop().unwrap() { + Event::BumpTransaction(bump_event) => { + nodes[1].bump_tx_handler.handle_event(&bump_event); + }, + _ => panic!("Unexpected event"), + } + assert!(nodes[1].tx_broadcaster.txn_broadcast().is_empty()); + let max_coin_selection_weight = TRUC_CHILD_MAX_WEIGHT + - BASE_TX_SIZE * WITNESS_SCALE_FACTOR as u64 + - SEGWIT_MARKER_FLAG_WEIGHT + - BASE_INPUT_WEIGHT + - EMPTY_SCRIPT_SIG_WEIGHT + - EMPTY_WITNESS_WEIGHT + - P2WSH_TXOUT_WEIGHT; + nodes[1].logger.assert_log( + "lightning::events::bump_transaction", + format!( + "Insufficient funds to meet target feerate {} sat/kW while remaining under {} WU", + FEERATE, max_coin_selection_weight + ), + 4, + ); + nodes[1].logger.assert_log( + "lightning::events::bump_transaction", + format!("Failed bumping commitment transaction fee for {}", commitment_txid), + 1, + ); + + let coinbase_tx_b = provide_anchor_reserves(&nodes); + + nodes[1].chain_monitor.chain_monitor.rebroadcast_pending_claims(); + + let mut events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events.pop().unwrap() { + Event::BumpTransaction(bump_event) => { + nodes[1].bump_tx_handler.handle_event(&bump_event); + }, + _ => panic!("Unexpected event"), + } + let txns = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(txns.len(), 2); + check_spends!(txns[1], txns[0], coinbase_tx_b); + assert!(txns[1].weight().to_wu() < TRUC_CHILD_MAX_WEIGHT); + + assert_eq!(txns[0].compute_txid(), commitment_txid); + assert_eq!(txns[1].input.len(), 2); + assert_eq!(txns[1].output.len(), 1); + nodes[1].logger.assert_log( + "lightning::events::bump_transaction", + format!( + "Insufficient funds to meet target feerate {} sat/kW while remaining under {} WU", + FEERATE, max_coin_selection_weight + ), + 4, + ); + nodes[1].logger.assert_log( + "lightning::events::bump_transaction", + format!("Failed bumping commitment transaction fee for {}", txns[0].compute_txid()), + 1, + ); + nodes[1].logger.assert_log( + "lightning::events::bump_transaction", + format!( + "Broadcasting anchor transaction {} to bump channel close with txid {}", + txns[1].compute_txid(), + txns[0].compute_txid() + ), + 1, + ); +} diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 5bd24d8e2e1..5f525cea6b9 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -272,12 +272,12 @@ pub enum SpendableOutputDescriptor { /// To derive the delayed payment key which is used to sign this input, you must pass the /// holder [`InMemorySigner::delayed_payment_base_key`] (i.e., the private key which /// corresponds to the [`ChannelPublicKeys::delayed_payment_basepoint`] in - /// [`ChannelSigner::new_pubkeys`]) and the provided + /// [`ChannelSigner::pubkeys`]) and the provided /// [`DelayedPaymentOutputDescriptor::per_commitment_point`] to /// [`chan_utils::derive_private_key`]. The DelayedPaymentKey can be generated without the /// secret key using [`DelayedPaymentKey::from_basepoint`] and only the /// [`ChannelPublicKeys::delayed_payment_basepoint`] which appears in - /// [`ChannelSigner::new_pubkeys`]. + /// [`ChannelSigner::pubkeys`]. /// /// To derive the [`DelayedPaymentOutputDescriptor::revocation_pubkey`] provided here (which is /// used in the witness script generation), you must pass the counterparty @@ -292,7 +292,7 @@ pub enum SpendableOutputDescriptor { /// [`chan_utils::get_revokeable_redeemscript`]. DelayedPaymentOutput(DelayedPaymentOutputDescriptor), /// An output spendable exclusively by our payment key (i.e., the private key that corresponds - /// to the `payment_point` in [`ChannelSigner::new_pubkeys`]). The output type depends on the + /// to the `payment_point` in [`ChannelSigner::pubkeys`]). The output type depends on the /// channel type negotiated. /// /// On an anchor outputs channel, the witness in the spending input is: @@ -792,19 +792,25 @@ pub trait ChannelSigner { /// and pause future signing operations until this validation completes. fn validate_counterparty_revocation(&self, idx: u64, secret: &SecretKey) -> Result<(), ()>; - /// Returns a *new* set of holder channel public keys and basepoints. They may be the same as a - /// previous value, but are also allowed to change arbitrarily. Signing methods must still - /// support signing for any keys which have ever been returned. This should only be called - /// either for new channels or new splices. + /// Returns the holder channel public keys and basepoints. This should only be called once + /// during channel creation and as such implementations are allowed undefined behavior if + /// called more than once. /// - /// `splice_parent_funding_txid` can be used to compute a tweak to rotate the funding key in the - /// 2-of-2 multisig script during a splice. See [`compute_funding_key_tweak`] for an example - /// tweak and more details. + /// This method is *not* asynchronous. Instead, the value must be computed locally or in + /// advance and cached. + fn pubkeys(&self, secp_ctx: &Secp256k1) -> ChannelPublicKeys; + + /// Returns a new funding pubkey (i.e. our public which is used in a 2-of-2 with the + /// counterparty's key to to lock the funds on-chain) for a spliced channel. + /// + /// `splice_parent_funding_txid` can be used to compute a tweak with which to rotate the base + /// key (which will then be available later in signing operations via + /// [`ChannelTransactionParameters::splice_parent_funding_txid`]). /// /// This method is *not* asynchronous. Instead, the value must be cached locally. - fn new_pubkeys( - &self, splice_parent_funding_txid: Option, secp_ctx: &Secp256k1, - ) -> ChannelPublicKeys; + fn new_funding_pubkey( + &self, splice_parent_funding_txid: Txid, secp_ctx: &Secp256k1, + ) -> PublicKey; /// Returns an arbitrary identifier describing the set of keys which are provided back to you in /// some [`SpendableOutputDescriptor`] types. This should be sufficient to identify this @@ -1058,7 +1064,7 @@ pub trait ChangeDestinationSource { /// /// This method should return a different value each time it is called, to avoid linking /// on-chain funds controlled to the same user. - fn get_change_destination_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf>; + fn get_change_destination_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()>; } /// A synchronous helper trait that describes an on-chain wallet capable of returning a (change) destination script. @@ -1090,7 +1096,7 @@ impl ChangeDestinationSource for ChangeDestinationSourceSyncWrapper where T::Target: ChangeDestinationSourceSync, { - fn get_change_destination_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf> { + fn get_change_destination_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()> { let script = self.0.get_change_destination_script(); Box::pin(async move { script }) } @@ -1457,17 +1463,13 @@ impl ChannelSigner for InMemorySigner { Ok(()) } - fn new_pubkeys( - &self, splice_parent_funding_txid: Option, secp_ctx: &Secp256k1, - ) -> ChannelPublicKeys { + fn pubkeys(&self, secp_ctx: &Secp256k1) -> ChannelPublicKeys { // Because splices always break downgrades, we go ahead and always use the new derivation // here as its just much better. - let use_v2_derivation = - self.v2_remote_key_derivation || splice_parent_funding_txid.is_some(); let payment_key = - if use_v2_derivation { &self.payment_key_v2 } else { &self.payment_key_v1 }; + if self.v2_remote_key_derivation { &self.payment_key_v2 } else { &self.payment_key_v1 }; let from_secret = |s: &SecretKey| PublicKey::from_secret_key(secp_ctx, s); - let mut pubkeys = ChannelPublicKeys { + let pubkeys = ChannelPublicKeys { funding_pubkey: from_secret(&self.funding_key.0), revocation_basepoint: RevocationBasepoint::from(from_secret(&self.revocation_base_key)), payment_point: from_secret(payment_key), @@ -1477,13 +1479,15 @@ impl ChannelSigner for InMemorySigner { htlc_basepoint: HtlcBasepoint::from(from_secret(&self.htlc_base_key)), }; - if splice_parent_funding_txid.is_some() { - pubkeys.funding_pubkey = - self.funding_key(splice_parent_funding_txid).public_key(secp_ctx); - } pubkeys } + fn new_funding_pubkey( + &self, splice_parent_funding_txid: Txid, secp_ctx: &Secp256k1, + ) -> PublicKey { + self.funding_key(Some(splice_parent_funding_txid)).public_key(secp_ctx) + } + fn channel_keys_id(&self) -> [u8; 32] { self.channel_keys_id } diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index 3edfd5211fe..7161bc77123 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -93,11 +93,11 @@ pub(crate) fn dummy_waker() -> Waker { } #[cfg(feature = "std")] -/// A type alias for a future that returns a result of type T. -pub type AsyncResult<'a, T> = Pin> + 'a + Send>>; +/// A type alias for a future that returns a result of type `T` or error `E`. +pub type AsyncResult<'a, T, E> = Pin> + 'a + Send>>; #[cfg(not(feature = "std"))] -/// A type alias for a future that returns a result of type T. -pub type AsyncResult<'a, T> = Pin> + 'a>>; +/// A type alias for a future that returns a result of type `T` or error `E`. +pub type AsyncResult<'a, T, E> = Pin> + 'a>>; /// Marker trait to optionally implement `Sync` under std. #[cfg(feature = "std")] diff --git a/lightning/src/util/dyn_signer.rs b/lightning/src/util/dyn_signer.rs index c519484a938..cf1cac37903 100644 --- a/lightning/src/util/dyn_signer.rs +++ b/lightning/src/util/dyn_signer.rs @@ -174,9 +174,12 @@ delegate!(DynSigner, ChannelSigner, holder_tx: &HolderCommitmentTransaction, preimages: Vec ) -> Result<(), ()>, - fn new_pubkeys(, - splice_parent_funding_txid: Option, secp_ctx: &Secp256k1 + fn pubkeys(, + secp_ctx: &Secp256k1 ) -> ChannelPublicKeys, + fn new_funding_pubkey(, + splice_parent_funding_txid: Txid, secp_ctx: &Secp256k1 + ) -> PublicKey, fn channel_keys_id(,) -> [u8; 32], fn validate_counterparty_revocation(, idx: u64, secret: &SecretKey) -> Result<(), ()> ); diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 0b4ba190740..434e16d629e 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -34,7 +34,7 @@ use crate::chain::transaction::OutPoint; use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::sync::Mutex; -use crate::util::async_poll::{dummy_waker, MaybeSend, MaybeSync}; +use crate::util::async_poll::{dummy_waker, AsyncResult, MaybeSend, MaybeSync}; use crate::util::logger::Logger; use crate::util::native_async::FutureSpawner; use crate::util::ser::{Readable, ReadableArgs, Writeable}; @@ -160,7 +160,7 @@ where { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> Pin, io::Error>> + 'static + Send>> { + ) -> AsyncResult<'static, Vec, io::Error> { let res = self.0.read(primary_namespace, secondary_namespace, key); Box::pin(async move { res }) @@ -168,7 +168,7 @@ where fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> Pin> + 'static + Send>> { + ) -> AsyncResult<'static, (), io::Error> { let res = self.0.write(primary_namespace, secondary_namespace, key, buf); Box::pin(async move { res }) @@ -176,7 +176,7 @@ where fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> Pin> + 'static + Send>> { + ) -> AsyncResult<'static, (), io::Error> { let res = self.0.remove(primary_namespace, secondary_namespace, key); Box::pin(async move { res }) @@ -184,7 +184,7 @@ where fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> Pin, io::Error>> + 'static + Send>> { + ) -> AsyncResult<'static, Vec, io::Error> { let res = self.0.list(primary_namespace, secondary_namespace); Box::pin(async move { res }) @@ -222,7 +222,7 @@ pub trait KVStore { /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> Pin, io::Error>> + 'static + Send>>; + ) -> AsyncResult<'static, Vec, io::Error>; /// Persists the given data under the given `key`. /// /// The order of multiple writes to the same key needs to be retained while persisting @@ -242,7 +242,7 @@ pub trait KVStore { /// Will create the given `primary_namespace` and `secondary_namespace` if not already present in the store. fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> Pin> + 'static + Send>>; + ) -> AsyncResult<'static, (), io::Error>; /// Removes any data that had previously been persisted under the given `key`. /// /// Returns successfully if no data will be stored for the given `primary_namespace`, @@ -250,7 +250,7 @@ pub trait KVStore { /// invokation or not. fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> Pin> + 'static + Send>>; + ) -> AsyncResult<'static, (), io::Error>; /// Returns a list of keys that are stored under the given `secondary_namespace` in /// `primary_namespace`. /// @@ -258,7 +258,7 @@ pub trait KVStore { /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown. fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> Pin, io::Error>> + 'static + Send>>; + ) -> AsyncResult<'static, Vec, io::Error>; } /// Provides additional interface methods that are required for [`KVStore`]-to-[`KVStore`] diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index e88e1eb3732..d78b3e921cc 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -1688,7 +1688,14 @@ impl Readable for Duration { fn read(r: &mut R) -> Result { let secs = Readable::read(r)?; let nanos = Readable::read(r)?; - Ok(Duration::new(secs, nanos)) + // Duration::new panics if the nanosecond part in excess of a second, added to the second + // part, overflows. To ensure this won't happen, we simply reject any case where there are + // nanoseconds in excess of a second, which is invalid anyway. + if nanos >= 1_000_000_000 { + Err(DecodeError::InvalidValue) + } else { + Ok(Duration::new(secs, nanos)) + } } } diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index efeb059b436..cba92f64c0c 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -35,11 +35,10 @@ use bitcoin::{BlockHash, ScriptBuf, Transaction, Txid}; use core::future::Future; use core::ops::Deref; -use core::pin::Pin; use core::sync::atomic::{AtomicBool, Ordering}; use core::task; -use super::async_poll::dummy_waker; +use super::async_poll::{dummy_waker, AsyncResult}; /// The number of blocks we wait before we prune the tracked spendable outputs. pub const PRUNE_DELAY_BLOCKS: u32 = ARCHIVAL_DELAY_BLOCKS + ANTI_REORG_DELAY; @@ -604,9 +603,7 @@ where sweeper_state.dirty = true; } - fn persist_state<'a>( - &self, sweeper_state: &SweeperState, - ) -> Pin> + 'a + Send>> { + fn persist_state<'a>(&self, sweeper_state: &SweeperState) -> AsyncResult<'a, (), io::Error> { let encoded = sweeper_state.encode(); self.kv_store.write( diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs index 6b1950169e8..3bacd76a610 100644 --- a/lightning/src/util/test_channel_signer.rs +++ b/lightning/src/util/test_channel_signer.rs @@ -24,7 +24,9 @@ use crate::prelude::*; #[cfg(any(test, feature = "_test_utils"))] use crate::sync::MutexGuard; use crate::sync::{Arc, Mutex}; + use core::cmp; +use core::sync::atomic::{AtomicBool, Ordering}; use bitcoin::hashes::Hash; use bitcoin::sighash; @@ -68,13 +70,31 @@ pub const INITIAL_REVOKED_COMMITMENT_NUMBER: u64 = 1 << 48; /// /// Note that before we do so we should ensure its serialization format has backwards- and /// forwards-compatibility prefix/suffixes! -#[derive(Clone)] pub struct TestChannelSigner { pub inner: DynSigner, /// Channel state used for policy enforcement pub state: Arc>, pub disable_revocation_policy_check: bool, pub disable_all_state_policy_checks: bool, + have_fetched_pubkeys: AtomicBool, +} + +impl Clone for TestChannelSigner { + fn clone(&self) -> Self { + // Generally, a signer should only ever be cloned when a ChannelMonitor is cloned (which + // doesn't fetch the pubkeys at all). This isn't really a critical test, but if it + // ever does fail we should make sure the clone is happening in a sensible place. + assert!(!self.have_fetched_pubkeys.load(Ordering::Acquire)); + Self { + inner: self.inner.clone(), + state: Arc::clone(&self.state), + disable_revocation_policy_check: self.disable_revocation_policy_check, + disable_all_state_policy_checks: self.disable_all_state_policy_checks, + // In some tests we clone a `ChannelMonitor` multiple times, so we have to initialize + // with `!have_fetched_pubkeys` to ensure the above assertion passes. + have_fetched_pubkeys: AtomicBool::new(false), + } + } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -129,6 +149,7 @@ impl TestChannelSigner { state, disable_revocation_policy_check: false, disable_all_state_policy_checks: false, + have_fetched_pubkeys: AtomicBool::new(false), } } @@ -141,7 +162,13 @@ impl TestChannelSigner { inner: DynSigner, state: Arc>, disable_revocation_policy_check: bool, disable_all_state_policy_checks: bool, ) -> Self { - Self { inner, state, disable_revocation_policy_check, disable_all_state_policy_checks } + Self { + inner, + state, + disable_revocation_policy_check, + disable_all_state_policy_checks, + have_fetched_pubkeys: AtomicBool::new(false), + } } #[cfg(any(test, feature = "_test_utils"))] @@ -221,10 +248,15 @@ impl ChannelSigner for TestChannelSigner { Ok(()) } - fn new_pubkeys( - &self, splice_parent_funding_txid: Option, secp_ctx: &Secp256k1, - ) -> ChannelPublicKeys { - self.inner.new_pubkeys(splice_parent_funding_txid, secp_ctx) + fn pubkeys(&self, secp_ctx: &Secp256k1) -> ChannelPublicKeys { + assert!(!self.have_fetched_pubkeys.swap(true, Ordering::AcqRel)); + self.inner.pubkeys(secp_ctx) + } + + fn new_funding_pubkey( + &self, splice_parent_funding_txid: Txid, secp_ctx: &Secp256k1, + ) -> PublicKey { + self.inner.new_funding_pubkey(splice_parent_funding_txid, secp_ctx) } fn channel_keys_id(&self) -> [u8; 32] { diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 38456e23358..1931287ab6a 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -50,6 +50,7 @@ use crate::sign::{self, ReceiveAuthKey}; use crate::sign::{ChannelSigner, PeerStorageKey}; use crate::sync::RwLock; use crate::types::features::{ChannelFeatures, InitFeatures, NodeFeatures}; +use crate::util::async_poll::AsyncResult; use crate::util::config::UserConfig; use crate::util::dyn_signer::{ DynKeysInterface, DynKeysInterfaceTrait, DynPhantomKeysInterface, DynSigner, @@ -1011,13 +1012,13 @@ impl TestStore { impl KVStore for TestStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> Pin, io::Error>> + 'static + Send>> { + ) -> AsyncResult<'static, Vec, io::Error> { let res = self.read_internal(&primary_namespace, &secondary_namespace, &key); Box::pin(async move { res }) } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> Pin> + 'static + Send>> { + ) -> AsyncResult<'static, (), io::Error> { let path = format!("{primary_namespace}/{secondary_namespace}/{key}"); let future = Arc::new(Mutex::new((None, None))); @@ -1030,13 +1031,13 @@ impl KVStore for TestStore { } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> Pin> + 'static + Send>> { + ) -> AsyncResult<'static, (), io::Error> { let res = self.remove_internal(&primary_namespace, &secondary_namespace, &key); Box::pin(async move { res }) } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> Pin, io::Error>> + 'static + Send>> { + ) -> AsyncResult<'static, Vec, io::Error> { let res = self.list_internal(primary_namespace, secondary_namespace); Box::pin(async move { res }) } @@ -1123,6 +1124,30 @@ impl TestBroadcaster { impl chaininterface::BroadcasterInterface for TestBroadcaster { fn broadcast_transactions(&self, txs: &[&Transaction]) { + // Assert that any batch of transactions of length greater than 1 is sorted + // topologically, and is a `child-with-parents` package as defined in + // . + // + // Implementations MUST NOT rely on this, and must re-sort the transactions + // themselves. + // + // Right now LDK only ever broadcasts packages of length 2. + assert!(txs.len() <= 2); + if txs.len() == 2 { + let parent_txid = txs[0].compute_txid(); + assert!(txs[1] + .input + .iter() + .map(|input| input.previous_output.txid) + .any(|txid| txid == parent_txid)); + let child_txid = txs[1].compute_txid(); + assert!(txs[0] + .input + .iter() + .map(|input| input.previous_output.txid) + .all(|txid| txid != child_txid)); + } + for tx in txs { let lock_time = tx.lock_time.to_consensus_u32(); assert!(lock_time < 1_500_000_000); @@ -2177,6 +2202,10 @@ impl TestWalletSource { self.utxos.lock().unwrap().retain(|utxo| utxo.outpoint != outpoint); } + pub fn clear_utxos(&self) { + self.utxos.lock().unwrap().clear(); + } + pub fn sign_tx( &self, mut tx: Transaction, ) -> Result {