From 172604752d51ffd2f38a46f1270ffceec92965cf Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 23 Sep 2025 14:36:21 -0700 Subject: [PATCH 1/6] Log broadcast of interactive funding transaction --- lightning/src/ln/channelmanager.rs | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d45939f116e..4c96f5d96bc 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6438,7 +6438,11 @@ where splice_negotiated, }) => { if let Some(funding_tx) = funding_tx { - self.broadcast_interactive_funding(chan, &funding_tx); + self.broadcast_interactive_funding( + chan, + &funding_tx, + &self.logger, + ); } if let Some(splice_negotiated) = splice_negotiated { self.pending_events.lock().unwrap().push_back(( @@ -6501,8 +6505,14 @@ where } fn broadcast_interactive_funding( - &self, channel: &mut FundedChannel, funding_tx: &Transaction, + &self, channel: &mut FundedChannel, funding_tx: &Transaction, logger: &L, ) { + let logger = WithChannelContext::from(logger, channel.context(), None); + log_info!( + logger, + "Broadcasting signed interactive funding transaction {}", + funding_tx.compute_txid() + ); self.tx_broadcaster.broadcast_transactions(&[funding_tx]); { let mut pending_events = self.pending_events.lock().unwrap(); @@ -9571,7 +9581,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match channel.funding_transaction_signed(txid, vec![]) { Ok(FundingTxSigned { tx_signatures: Some(tx_signatures), funding_tx, splice_negotiated }) => { if let Some(funding_tx) = funding_tx { - self.broadcast_interactive_funding(channel, &funding_tx); + self.broadcast_interactive_funding(channel, &funding_tx, &self.logger); } if let Some(splice_negotiated) = splice_negotiated { @@ -10579,11 +10589,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }); } if let Some(ref funding_tx) = funding_tx { - self.tx_broadcaster.broadcast_transactions(&[funding_tx]); - { - let mut pending_events = self.pending_events.lock().unwrap(); - emit_channel_pending_event!(pending_events, chan); - } + self.broadcast_interactive_funding(chan, funding_tx, &self.logger); } if let Some(splice_negotiated) = splice_negotiated { self.pending_events.lock().unwrap().push_back(( From 1802b6e8735bce69672d78fa8594cd04aada9728 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 23 Sep 2025 14:35:47 -0700 Subject: [PATCH 2/6] Send 0conf splice_locked upon tx_signatures exchange Splices negotiated with 0 confirmations require that we immediately lock it after exchanging `tx_signatures`. --- lightning/src/ln/channel.rs | 98 ++++++++++++++++----- lightning/src/ln/channelmanager.rs | 55 ++++++++++-- lightning/src/ln/splicing_tests.rs | 135 ++++++++++++++++++----------- 3 files changed, 209 insertions(+), 79 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 81166d3d962..75ac056fea1 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -6846,6 +6846,9 @@ pub struct FundingTxSigned { /// Information about the completed funding negotiation. pub splice_negotiated: Option, + + /// A `splice_locked` to send to the counterparty when the splice requires 0 confirmations. + pub splice_locked: Option, } /// Information about a splice funding negotiation that has been completed. @@ -8877,9 +8880,13 @@ where } } - fn on_tx_signatures_exchange( - &mut self, funding_tx: Transaction, - ) -> Option { + fn on_tx_signatures_exchange<'a, L: Deref>( + &mut self, funding_tx: Transaction, best_block_height: u32, + logger: &WithChannelContext<'a, L>, + ) -> (Option, Option) + where + L::Target: Logger, + { debug_assert!(!self.context.channel_state.is_monitor_update_in_progress()); debug_assert!(!self.context.channel_state.is_awaiting_remote_revoke()); @@ -8901,22 +8908,42 @@ where channel_type, }; - Some(splice_negotiated) + let splice_locked = pending_splice.check_get_splice_locked( + &self.context, + pending_splice.negotiated_candidates.len() - 1, + best_block_height, + ); + if let Some(splice_txid) = + splice_locked.as_ref().map(|splice_locked| splice_locked.splice_txid) + { + log_info!( + logger, + "Sending 0conf splice_locked txid {} to our peer for channel {}", + splice_txid, + &self.context.channel_id + ); + } + + (Some(splice_negotiated), splice_locked) } else { debug_assert!(false); - None + (None, None) } } else { self.funding.funding_transaction = Some(funding_tx); self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); - None + (None, None) } } - pub fn funding_transaction_signed( - &mut self, funding_txid_signed: Txid, witnesses: Vec, - ) -> Result { + pub fn funding_transaction_signed( + &mut self, funding_txid_signed: Txid, witnesses: Vec, best_block_height: u32, + logger: &L, + ) -> Result + where + L::Target: Logger, + { let signing_session = if let Some(signing_session) = self.context.interactive_tx_signing_session.as_mut() { if let Some(pending_splice) = self.pending_splice.as_ref() { @@ -8937,6 +8964,7 @@ where tx_signatures: None, funding_tx: None, splice_negotiated: None, + splice_locked: None, }); } @@ -8949,6 +8977,7 @@ where tx_signatures: None, funding_tx: None, splice_negotiated: None, + splice_locked: None, }); } let err = @@ -8991,19 +9020,30 @@ where .provide_holder_witnesses(tx_signatures, &self.context.secp_ctx) .map_err(|err| APIError::APIMisuseError { err })?; - let splice_negotiated = if let Some(funding_tx) = funding_tx.clone() { + let logger = WithChannelContext::from(logger, &self.context, None); + if tx_signatures.is_some() { + log_info!( + logger, + "Sending tx_signatures for interactive funding transaction {funding_txid_signed}" + ); + } + + let (splice_negotiated, splice_locked) = if let Some(funding_tx) = funding_tx.clone() { debug_assert!(tx_signatures.is_some()); - self.on_tx_signatures_exchange(funding_tx) + self.on_tx_signatures_exchange(funding_tx, best_block_height, &logger) } else { - None + (None, None) }; - Ok(FundingTxSigned { tx_signatures, funding_tx, splice_negotiated }) + Ok(FundingTxSigned { tx_signatures, funding_tx, splice_negotiated, splice_locked }) } - pub fn tx_signatures( - &mut self, msg: &msgs::TxSignatures, - ) -> Result { + pub fn tx_signatures( + &mut self, msg: &msgs::TxSignatures, best_block_height: u32, logger: &L, + ) -> Result + where + L::Target: Logger, + { let signing_session = if let Some(signing_session) = self.context.interactive_tx_signing_session.as_mut() { @@ -9049,13 +9089,25 @@ where let (holder_tx_signatures, funding_tx) = signing_session.received_tx_signatures(msg).map_err(|msg| ChannelError::Warn(msg))?; - let splice_negotiated = if let Some(funding_tx) = funding_tx.clone() { - self.on_tx_signatures_exchange(funding_tx) + let logger = WithChannelContext::from(logger, &self.context, None); + log_info!( + logger, + "Received tx_signatures for interactive funding transaction {}", + msg.tx_hash + ); + + let (splice_negotiated, splice_locked) = if let Some(funding_tx) = funding_tx.clone() { + self.on_tx_signatures_exchange(funding_tx, best_block_height, &logger) } else { - None + (None, None) }; - Ok(FundingTxSigned { tx_signatures: holder_tx_signatures, funding_tx, splice_negotiated }) + Ok(FundingTxSigned { + tx_signatures: holder_tx_signatures, + funding_tx, + splice_negotiated, + splice_locked, + }) } /// Queues up an outbound update fee by placing it in the holding cell. You should call @@ -11362,7 +11414,11 @@ where confirmed_funding_index, height, ) { - log_info!(logger, "Sending a splice_locked to our peer for channel {}", &self.context.channel_id); + log_info!( + logger, "Sending splice_locked txid {} to our peer for channel {}", + splice_locked.splice_txid, + &self.context.channel_id + ); let (funding_txo, monitor_update, announcement_sigs, discarded_funding) = chain_node_signer .and_then(|(chain_hash, node_signer, user_config)| { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 4c96f5d96bc..ff57e9501ae 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6431,11 +6431,18 @@ where .map(|input| input.witness) .filter(|witness| !witness.is_empty()) .collect(); - match chan.funding_transaction_signed(txid, witnesses) { + let best_block_height = self.best_block.read().unwrap().height; + match chan.funding_transaction_signed( + txid, + witnesses, + best_block_height, + &self.logger, + ) { Ok(FundingTxSigned { tx_signatures: Some(tx_signatures), funding_tx, splice_negotiated, + splice_locked, }) => { if let Some(funding_tx) = funding_tx { self.broadcast_interactive_funding( @@ -6462,6 +6469,14 @@ where msg: tx_signatures, }, ); + if let Some(splice_locked) = splice_locked { + peer_state.pending_msg_events.push( + MessageSendEvent::SendSpliceLocked { + node_id: *counterparty_node_id, + msg: splice_locked, + }, + ); + } return NotifyOption::DoPersist; }, Err(err) => { @@ -6472,9 +6487,11 @@ where tx_signatures: None, funding_tx, splice_negotiated, + splice_locked, }) => { debug_assert!(funding_tx.is_none()); debug_assert!(splice_negotiated.is_none()); + debug_assert!(splice_locked.is_none()); return NotifyOption::SkipPersistNoEvents; }, } @@ -9578,8 +9595,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } else { let txid = signing_session.unsigned_tx().compute_txid(); - match channel.funding_transaction_signed(txid, vec![]) { - Ok(FundingTxSigned { tx_signatures: Some(tx_signatures), funding_tx, splice_negotiated }) => { + let best_block_height = self.best_block.read().unwrap().height; + match channel.funding_transaction_signed(txid, vec![], best_block_height, &self.logger) { + Ok(FundingTxSigned { + tx_signatures: Some(tx_signatures), + funding_tx, + splice_negotiated, + splice_locked, + }) => { if let Some(funding_tx) = funding_tx { self.broadcast_interactive_funding(channel, &funding_tx, &self.logger); } @@ -9602,6 +9625,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ node_id: counterparty_node_id, msg: tx_signatures, }); + if let Some(splice_locked) = splice_locked { + pending_msg_events.push(MessageSendEvent::SendSpliceLocked { + node_id: counterparty_node_id, + msg: splice_locked, + }); + } } }, Ok(FundingTxSigned { tx_signatures: None, .. }) => { @@ -10580,14 +10609,30 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ hash_map::Entry::Occupied(mut chan_entry) => { match chan_entry.get_mut().as_funded_mut() { Some(chan) => { - let FundingTxSigned { tx_signatures, funding_tx, splice_negotiated } = - try_channel_entry!(self, peer_state, chan.tx_signatures(msg), chan_entry); + let best_block_height = self.best_block.read().unwrap().height; + let FundingTxSigned { + tx_signatures, + funding_tx, + splice_negotiated, + splice_locked, + } = try_channel_entry!( + self, + peer_state, + chan.tx_signatures(msg, best_block_height, &self.logger), + chan_entry + ); if let Some(tx_signatures) = tx_signatures { peer_state.pending_msg_events.push(MessageSendEvent::SendTxSignatures { node_id: *counterparty_node_id, msg: tx_signatures, }); } + if let Some(splice_locked) = splice_locked { + peer_state.pending_msg_events.push(MessageSendEvent::SendSpliceLocked { + node_id: *counterparty_node_id, + msg: splice_locked, + }); + } if let Some(ref funding_tx) = funding_tx { self.broadcast_interactive_funding(chan, funding_tx, &self.logger); } diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index 3edd051d735..deb76a74b5e 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -23,6 +23,7 @@ use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::test_channel_signer::SignerOp; +use bitcoin::secp256k1::PublicKey; use bitcoin::{Amount, OutPoint as BitcoinOutPoint, ScriptBuf, Transaction, TxOut}; #[test] @@ -206,25 +207,25 @@ fn complete_interactive_funding_negotiation<'a, 'b, 'c, 'd>( } } -fn sign_interactive_funding_transaction<'a, 'b, 'c, 'd>( +fn sign_interactive_funding_tx<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, - initial_commit_sig_for_acceptor: msgs::CommitmentSigned, -) { + initial_commit_sig_for_acceptor: msgs::CommitmentSigned, is_0conf: bool, +) -> (Transaction, Option<(msgs::SpliceLocked, PublicKey)>) { let node_id_initiator = initiator.node.get_our_node_id(); let node_id_acceptor = acceptor.node.get_our_node_id(); assert!(initiator.node.get_and_clear_pending_msg_events().is_empty()); acceptor.node.handle_commitment_signed(node_id_initiator, &initial_commit_sig_for_acceptor); - let mut msg_events = acceptor.node.get_and_clear_pending_msg_events(); + let msg_events = acceptor.node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 2, "{msg_events:?}"); - if let MessageSendEvent::UpdateHTLCs { mut updates, .. } = msg_events.remove(0) { - let commitment_signed = updates.commitment_signed.remove(0); - initiator.node.handle_commitment_signed(node_id_acceptor, &commitment_signed); + if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = &msg_events[0] { + let commitment_signed = &updates.commitment_signed[0]; + initiator.node.handle_commitment_signed(node_id_acceptor, commitment_signed); } else { panic!(); } - if let MessageSendEvent::SendTxSignatures { ref msg, .. } = msg_events.remove(0) { + if let MessageSendEvent::SendTxSignatures { ref msg, .. } = &msg_events[1] { initiator.node.handle_tx_signatures(node_id_acceptor, msg); } else { panic!(); @@ -244,12 +245,34 @@ fn sign_interactive_funding_transaction<'a, 'b, 'c, 'd>( .funding_transaction_signed(&channel_id, &counterparty_node_id, partially_signed_tx) .unwrap(); } - let tx_signatures = - get_event_msg!(initiator, MessageSendEvent::SendTxSignatures, node_id_acceptor); - acceptor.node.handle_tx_signatures(node_id_initiator, &tx_signatures); + let mut msg_events = initiator.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), if is_0conf { 2 } else { 1 }, "{msg_events:?}"); + if let MessageSendEvent::SendTxSignatures { ref msg, .. } = &msg_events[0] { + acceptor.node.handle_tx_signatures(node_id_initiator, msg); + } else { + panic!(); + } + let splice_locked = if is_0conf { + if let MessageSendEvent::SendSpliceLocked { msg, .. } = msg_events.remove(1) { + Some((msg, node_id_acceptor)) + } else { + panic!(); + } + } else { + None + }; check_added_monitors(&initiator, 1); check_added_monitors(&acceptor, 1); + + let tx = { + let mut initiator_txn = initiator.tx_broadcaster.txn_broadcast(); + assert_eq!(initiator_txn.len(), 1); + let acceptor_txn = acceptor.tx_broadcaster.txn_broadcast(); + assert_eq!(initiator_txn, acceptor_txn,); + initiator_txn.remove(0) + }; + (tx, splice_locked) } fn splice_channel<'a, 'b, 'c, 'd>( @@ -269,15 +292,9 @@ fn splice_channel<'a, 'b, 'c, 'd>( initiator_contribution, new_funding_script, ); - sign_interactive_funding_transaction(initiator, acceptor, initial_commit_sig_for_acceptor); - - let splice_tx = { - let mut initiator_txn = initiator.tx_broadcaster.txn_broadcast(); - assert_eq!(initiator_txn.len(), 1); - let acceptor_txn = acceptor.tx_broadcaster.txn_broadcast(); - assert_eq!(initiator_txn, acceptor_txn); - initiator_txn.remove(0) - }; + let (splice_tx, splice_locked) = + sign_interactive_funding_tx(initiator, acceptor, initial_commit_sig_for_acceptor, false); + assert!(splice_locked.is_none()); expect_splice_pending_event(initiator, &node_id_acceptor); expect_splice_pending_event(acceptor, &node_id_initiator); @@ -286,36 +303,46 @@ fn splice_channel<'a, 'b, 'c, 'd>( } fn lock_splice_after_blocks<'a, 'b, 'c, 'd>( - node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, - num_blocks: u32, + node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, num_blocks: u32, +) { + connect_blocks(node_a, num_blocks); + connect_blocks(node_b, num_blocks); + + let node_id_b = node_b.node.get_our_node_id(); + let splice_locked_for_node_b = + get_event_msg!(node_a, MessageSendEvent::SendSpliceLocked, node_id_b); + lock_splice(node_a, node_b, &splice_locked_for_node_b, false); +} + +fn lock_splice<'a, 'b, 'c, 'd>( + node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, + splice_locked_for_node_b: &msgs::SpliceLocked, is_0conf: bool, ) { let (prev_funding_outpoint, prev_funding_script) = node_a .chain_monitor .chain_monitor - .get_monitor(channel_id) + .get_monitor(splice_locked_for_node_b.channel_id) .map(|monitor| (monitor.get_funding_txo(), monitor.get_funding_script())) .unwrap(); - connect_blocks(node_a, num_blocks); - connect_blocks(node_b, num_blocks); - let node_id_a = node_a.node.get_our_node_id(); let node_id_b = node_b.node.get_our_node_id(); - let splice_locked_a = get_event_msg!(node_a, MessageSendEvent::SendSpliceLocked, node_id_b); - node_b.node.handle_splice_locked(node_id_a, &splice_locked_a); + node_b.node.handle_splice_locked(node_id_a, splice_locked_for_node_b); let mut msg_events = node_b.node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 2, "{msg_events:?}"); + assert_eq!(msg_events.len(), if is_0conf { 1 } else { 2 }, "{msg_events:?}"); if let MessageSendEvent::SendSpliceLocked { msg, .. } = msg_events.remove(0) { node_a.node.handle_splice_locked(node_id_b, &msg); } else { panic!(); } - if let MessageSendEvent::SendAnnouncementSignatures { msg, .. } = msg_events.remove(0) { - node_a.node.handle_announcement_signatures(node_id_b, &msg); - } else { - panic!(); + if !is_0conf { + if let MessageSendEvent::SendAnnouncementSignatures { msg, .. } = msg_events.remove(0) { + node_a.node.handle_announcement_signatures(node_id_b, &msg); + } else { + panic!(); + } } expect_channel_ready_event(&node_a, &node_id_b); @@ -323,23 +350,25 @@ fn lock_splice_after_blocks<'a, 'b, 'c, 'd>( expect_channel_ready_event(&node_b, &node_id_a); check_added_monitors(&node_b, 1); - let mut msg_events = node_a.node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 2, "{msg_events:?}"); - if let MessageSendEvent::SendAnnouncementSignatures { msg, .. } = msg_events.remove(0) { - node_b.node.handle_announcement_signatures(node_id_a, &msg); - } else { - panic!(); - } - if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = msg_events.remove(0) { - } else { - panic!(); - } + if !is_0conf { + let mut msg_events = node_a.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2, "{msg_events:?}"); + if let MessageSendEvent::SendAnnouncementSignatures { msg, .. } = msg_events.remove(0) { + node_b.node.handle_announcement_signatures(node_id_a, &msg); + } else { + panic!(); + } + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = msg_events.remove(0) { + } else { + panic!(); + } - let mut msg_events = node_b.node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1, "{msg_events:?}"); - if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = msg_events.remove(0) { - } else { - panic!(); + let mut msg_events = node_b.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = msg_events.remove(0) { + } else { + panic!(); + } } // Remove the corresponding outputs and transactions the chain source is watching for the @@ -533,7 +562,7 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { mine_transaction(&nodes[0], &splice_tx); mine_transaction(&nodes[1], &splice_tx); - lock_splice_after_blocks(&nodes[0], &nodes[1], channel_id, ANTI_REORG_DELAY - 1); + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); } #[test] @@ -633,7 +662,7 @@ fn test_splice_in() { assert!(htlc_limit_msat < initial_channel_value_sat * 1000); let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); - lock_splice_after_blocks(&nodes[0], &nodes[1], channel_id, ANTI_REORG_DELAY - 1); + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; assert!(htlc_limit_msat > initial_channel_value_sat); @@ -676,7 +705,7 @@ fn test_splice_out() { assert!(htlc_limit_msat < initial_channel_value_sat / 2 * 1000); let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); - lock_splice_after_blocks(&nodes[0], &nodes[1], channel_id, ANTI_REORG_DELAY - 1); + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; assert!(htlc_limit_msat < initial_channel_value_sat / 2 * 1000); @@ -736,7 +765,7 @@ fn do_test_splice_commitment_broadcast(splice_status: SpliceStatus, claim_htlcs: mine_transaction(&nodes[1], &splice_tx); } if splice_status == SpliceStatus::Locked { - lock_splice_after_blocks(&nodes[0], &nodes[1], channel_id, ANTI_REORG_DELAY - 1); + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); } if claim_htlcs { From ab9769f4cf62be3cbc703a657a1bc7418999ce39 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 23 Sep 2025 14:37:56 -0700 Subject: [PATCH 3/6] Allow outgoing splice request while disconnected This is crucial for peers that serve liquidity for low-availability (i.e., mobile) nodes. We should allow users to queue a splice request while the peer is offline, such that it is negotiated once reconnected. Note that there currently isn't a way to time out/cancel these requests, this is planned for the near future. --- lightning/src/ln/channel.rs | 6 ++++-- lightning/src/ln/channelmanager.rs | 8 -------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 75ac056fea1..8a8b2b0df1b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -11851,10 +11851,10 @@ where }); } - if !self.context.is_live() { + if !self.context.is_usable() { return Err(APIError::APIMisuseError { err: format!( - "Channel {} cannot be spliced, as channel is not live", + "Channel {} cannot be spliced as it is either pending open/close", self.context.channel_id() ), }); @@ -13017,6 +13017,7 @@ where || self.context.channel_state.is_awaiting_quiescence() || self.context.channel_state.is_local_stfu_sent() { + log_debug!(logger, "Channel is either pending quiescence or already quiescent"); return Ok(None); } @@ -13024,6 +13025,7 @@ where if self.context.is_live() { Ok(Some(self.send_stfu(logger)?)) } else { + log_debug!(logger, "Waiting for peer reconnection to send stfu"); Ok(None) } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ff57e9501ae..68c5e810c1d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4728,14 +4728,6 @@ where // Look for the channel match peer_state.channel_by_id.entry(*channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if !chan_phase_entry.get().context().is_connected() { - // TODO: We should probably support this, but right now `splice_channel` refuses when - // the peer is disconnected, so we just check it here. - return Err(APIError::ChannelUnavailable { - err: "Cannot initiate splice while peer is disconnected".to_owned(), - }); - } - let locktime = locktime.unwrap_or_else(|| self.current_best_block().height); if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); From d91e14bae20097f899a93daefbadb8145266580e Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 23 Sep 2025 14:39:04 -0700 Subject: [PATCH 4/6] Attempt queued splice after existing pending splice becomes locked Since we don't yet support contributing to an incoming splice, we need to make sure we attempt our splice negotiation eventually if the counterparty was also attempting a splice at the same time but they won the quiescence tie-breaker. Since only one pending splice (without RBF) is allowed at a time, we do this after the existing splice becomes locked. --- lightning/src/ln/channel.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 8a8b2b0df1b..8f74dc24e71 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -11180,6 +11180,12 @@ where let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, block_height, logger); + if let Some(quiescent_action) = self.quiescent_action.as_ref() { + if matches!(quiescent_action, QuiescentAction::Splice(_)) { + self.context.channel_state.set_awaiting_quiescence(); + } + } + Some(SpliceFundingPromotion { funding_txo, monitor_update, From 3c4e70c102bb080e38e1c2695804123feae8ed52 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Thu, 9 Oct 2025 10:01:52 -0700 Subject: [PATCH 5/6] Capture stfu send in reconnection tests We'll use this in the next commit to test that we'll send a stfu message for a splice we intend to initiate upon reconnecting. --- lightning/src/ln/async_signer_tests.rs | 18 ++++++++----- lightning/src/ln/functional_test_utils.rs | 32 ++++++++++++++++++++++- 2 files changed, 43 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index de5103aeba9..71821081094 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -596,7 +596,7 @@ fn do_test_async_raa_peer_disconnect( } // Expect the RAA - let (_, revoke_and_ack, commitment_signed, resend_order, _, _) = + let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _) = handle_chan_reestablish_msgs!(dst, src); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { assert!(revoke_and_ack.is_none()); @@ -612,14 +612,14 @@ fn do_test_async_raa_peer_disconnect( dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { - let (_, revoke_and_ack, commitment_signed, resend_order, _, _) = + let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_some()); assert!(commitment_signed.is_some()); assert!(resend_order == RAACommitmentOrder::RevokeAndACKFirst); } else { // Make sure we don't double send the RAA. - let (_, revoke_and_ack, commitment_signed, _, _, _) = + let (_, revoke_and_ack, commitment_signed, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_none()); assert!(commitment_signed.is_none()); @@ -746,7 +746,8 @@ fn do_test_async_commitment_signature_peer_disconnect( } // Expect the RAA - let (_, revoke_and_ack, commitment_signed, _, _, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, revoke_and_ack, commitment_signed, _, _, _, _) = + handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_some()); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { assert!(commitment_signed.is_none()); @@ -759,11 +760,11 @@ fn do_test_async_commitment_signature_peer_disconnect( dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { - let (_, _, commitment_signed, _, _, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, _, commitment_signed, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(commitment_signed.is_some()); } else { // Make sure we don't double send the CS. - let (_, _, commitment_signed, _, _, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, _, commitment_signed, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(commitment_signed.is_none()); } } @@ -880,6 +881,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.2.is_none()); assert!(as_resp.4.is_none()); assert!(as_resp.5.is_none()); + assert!(as_resp.6.is_none()); if monitor_update_failure { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -901,6 +903,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.2.is_none()); assert!(as_resp.4.is_none()); assert!(as_resp.5.is_none()); + assert!(as_resp.6.is_none()); nodes[0].enable_channel_signer_op(&node_b_id, &chan_id, SignerOp::SignCounterpartyCommitment); nodes[0].node.signer_unblocked(Some((node_b_id, chan_id))); @@ -923,6 +926,9 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.5.is_none()); assert!(bs_resp.5.is_none()); + assert!(as_resp.6.is_none()); + assert!(bs_resp.6.is_none()); + // Now that everything is restored, get the CS + RAA and handle them. nodes[1] .node diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 9845d6de738..3d7d69c3b76 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -4871,6 +4871,13 @@ macro_rules! handle_chan_reestablish_msgs { had_channel_update = true; } + let mut stfu = None; + if let Some(&MessageSendEvent::SendStfu { ref node_id, ref msg }) = msg_events.get(idx) { + idx += 1; + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + stfu = Some(msg.clone()); + } + let mut revoke_and_ack = None; let mut commitment_update = None; let order = if let Some(ev) = msg_events.get(idx) { @@ -4946,7 +4953,15 @@ macro_rules! handle_chan_reestablish_msgs { assert_eq!(msg_events.len(), idx, "{msg_events:?}"); - (channel_ready, revoke_and_ack, commitment_update, order, announcement_sigs, tx_signatures) + ( + channel_ready, + revoke_and_ack, + commitment_update, + order, + announcement_sigs, + tx_signatures, + stfu, + ) }}; } @@ -4955,6 +4970,7 @@ pub struct ReconnectArgs<'a, 'b, 'c, 'd> { pub node_b: &'a Node<'b, 'c, 'd>, pub send_channel_ready: (bool, bool), pub send_announcement_sigs: (bool, bool), + pub send_stfu: (bool, bool), pub send_interactive_tx_commit_sig: (bool, bool), pub send_interactive_tx_sigs: (bool, bool), pub expect_renegotiated_funding_locked_monitor_update: (bool, bool), @@ -4977,6 +4993,7 @@ impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> { node_b, send_channel_ready: (false, false), send_announcement_sigs: (false, false), + send_stfu: (false, false), send_interactive_tx_commit_sig: (false, false), send_interactive_tx_sigs: (false, false), expect_renegotiated_funding_locked_monitor_update: (false, false), @@ -5000,6 +5017,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_b, send_channel_ready, send_announcement_sigs, + send_stfu, send_interactive_tx_commit_sig, send_interactive_tx_sigs, expect_renegotiated_funding_locked_monitor_update, @@ -5118,6 +5136,12 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } else { assert!(chan_msgs.4.is_none()); } + if send_stfu.0 { + let stfu = chan_msgs.6.take().unwrap(); + node_a.node.handle_stfu(node_b_id, &stfu); + } else { + assert!(chan_msgs.6.is_none()); + } if send_interactive_tx_commit_sig.0 { assert!(chan_msgs.1.is_none()); let commitment_update = chan_msgs.2.take().unwrap(); @@ -5224,6 +5248,12 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } else { assert!(chan_msgs.4.is_none()); } + if send_stfu.1 { + let stfu = chan_msgs.6.take().unwrap(); + node_b.node.handle_stfu(node_a_id, &stfu); + } else { + assert!(chan_msgs.6.is_none()); + } if send_interactive_tx_commit_sig.1 { assert!(chan_msgs.1.is_none()); let commitment_update = chan_msgs.2.take().unwrap(); From 5452f15e76004caefd9e2f40f6db5491f1318377 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Wed, 24 Sep 2025 17:00:12 -0700 Subject: [PATCH 6/6] Test propose channel splice while disconnected --- lightning/src/ln/functional_test_utils.rs | 12 +- lightning/src/ln/splicing_tests.rs | 313 ++++++++++++++++++++++ 2 files changed, 323 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 3d7d69c3b76..d8e59dde166 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1572,6 +1572,14 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option, +) -> (bitcoin::Transaction, ChannelId) { + open_zero_conf_channel_with_value(initiator, receiver, initiator_config, 100_000, 10_001) +} + +// Receiver must have been initialized with manually_accept_inbound_channels set to true. +pub fn open_zero_conf_channel_with_value<'a, 'b, 'c, 'd>( + initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, + initiator_config: Option, channel_value_sat: u64, push_msat: u64, ) -> (bitcoin::Transaction, ChannelId) { let initiator_channels = initiator.node.list_usable_channels().len(); let receiver_channels = receiver.node.list_usable_channels().len(); @@ -1581,7 +1589,7 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( initiator .node - .create_channel(receiver_node_id, 100_000, 10_001, 42, None, initiator_config) + .create_channel(receiver_node_id, channel_value_sat, push_msat, 42, None, initiator_config) .unwrap(); let open_channel = get_event_msg!(initiator, MessageSendEvent::SendOpenChannel, receiver_node_id); @@ -1610,7 +1618,7 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( initiator.node.handle_accept_channel(receiver_node_id, &accept_channel); let (temporary_channel_id, tx, _) = - create_funding_transaction(&initiator, &receiver_node_id, 100_000, 42); + create_funding_transaction(&initiator, &receiver_node_id, channel_value_sat, 42); initiator .node .funding_transaction_generated(temporary_channel_id, receiver_node_id, tx.clone()) diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index deb76a74b5e..db34969074b 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -1178,6 +1178,319 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { .remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script); } +#[test] +fn test_propose_splice_while_disconnected() { + do_test_propose_splice_while_disconnected(false, false); + do_test_propose_splice_while_disconnected(false, true); + do_test_propose_splice_while_disconnected(true, false); + do_test_propose_splice_while_disconnected(true, true); +} + +fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { + // Test that both nodes are able to propose a splice while the counterparty is disconnected, and + // whoever doesn't go first due to the quiescence tie-breaker, will retry their splice after the + // first one becomes locked. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let (persister_0a, persister_0b, persister_1a, persister_1b); + let (chain_monitor_0a, chain_monitor_0b, chain_monitor_1a, chain_monitor_1b); + let mut config = test_default_channel_config(); + if use_0conf { + config.manually_accept_inbound_channels = true; + config.channel_handshake_limits.trust_own_funding_0conf = true; + } + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let (node_0a, node_0b, node_1a, node_1b); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_id_0 = nodes[0].node.get_our_node_id(); + let node_id_1 = nodes[1].node.get_our_node_id(); + + let initial_channel_value_sat = 1_000_000; + let push_msat = initial_channel_value_sat / 2 * 1000; + let channel_id = if use_0conf { + let (funding_tx, channel_id) = open_zero_conf_channel_with_value( + &nodes[0], + &nodes[1], + None, + initial_channel_value_sat, + push_msat, + ); + mine_transaction(&nodes[0], &funding_tx); + mine_transaction(&nodes[1], &funding_tx); + channel_id + } else { + let (_, _, channel_id, _) = create_announced_chan_between_nodes_with_value( + &nodes, + 0, + 1, + initial_channel_value_sat, + push_msat, + ); + channel_id + }; + + // Start with the nodes disconnected, and have each one attempt a splice. + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + + let splice_out_sat = initial_channel_value_sat / 4; + let node_0_contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(splice_out_sat), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }], + }; + nodes[0] + .node + .splice_channel( + &channel_id, + &node_id_1, + node_0_contribution.clone(), + FEERATE_FLOOR_SATS_PER_KW, + None, + ) + .unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + let node_1_contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(splice_out_sat), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }], + }; + nodes[1] + .node + .splice_channel( + &channel_id, + &node_id_0, + node_1_contribution.clone(), + FEERATE_FLOOR_SATS_PER_KW, + None, + ) + .unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + if reload { + let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); + reload_node!( + nodes[0], + nodes[0].node.encode(), + &[&encoded_monitor_0], + persister_0a, + chain_monitor_0a, + node_0a + ); + let encoded_monitor_1 = get_monitor!(nodes[1], channel_id).encode(); + reload_node!( + nodes[1], + nodes[1].node.encode(), + &[&encoded_monitor_1], + persister_1a, + chain_monitor_1a, + node_1a + ); + } + + // Reconnect the nodes. Both nodes should attempt quiescence as the initiator, but only one will + // be it via the tie-breaker. + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_channel_ready = (true, true); + if !use_0conf { + reconnect_args.send_announcement_sigs = (true, true); + } + reconnect_args.send_stfu = (true, true); + reconnect_nodes(reconnect_args); + let splice_init = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceInit, node_id_1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let (prev_funding_outpoint, prev_funding_script) = nodes[0] + .chain_monitor + .chain_monitor + .get_monitor(channel_id) + .map(|monitor| (monitor.get_funding_txo(), monitor.get_funding_script())) + .unwrap(); + + // Negotiate the first splice to completion. + let initial_commit_sig = { + nodes[1].node.handle_splice_init(node_id_0, &splice_init); + let splice_ack = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceAck, node_id_0); + nodes[0].node.handle_splice_ack(node_id_1, &splice_ack); + let new_funding_script = chan_utils::make_funding_redeemscript( + &splice_init.funding_pubkey, + &splice_ack.funding_pubkey, + ) + .to_p2wsh(); + complete_interactive_funding_negotiation( + &nodes[0], + &nodes[1], + channel_id, + node_0_contribution, + new_funding_script, + ) + }; + let (splice_tx, splice_locked) = + sign_interactive_funding_tx(&nodes[0], &nodes[1], initial_commit_sig, use_0conf); + expect_splice_pending_event(&nodes[0], &node_id_1); + expect_splice_pending_event(&nodes[1], &node_id_0); + + let splice_locked = if use_0conf { + let (splice_locked, for_node_id) = splice_locked.unwrap(); + assert_eq!(for_node_id, node_id_1); + splice_locked + } else { + assert!(splice_locked.is_none()); + + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + + // Mine enough blocks for the first splice to become locked. + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + + get_event_msg!(nodes[0], MessageSendEvent::SendSpliceLocked, node_id_1) + }; + nodes[1].node.handle_splice_locked(node_id_0, &splice_locked); + + // We should see the node which lost the tie-breaker attempt their splice now by first + // negotiating quiescence, but their `stfu` won't be sent until after another reconnection. + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), if use_0conf { 2 } else { 3 }, "{msg_events:?}"); + if let MessageSendEvent::SendSpliceLocked { ref msg, .. } = &msg_events[0] { + nodes[0].node.handle_splice_locked(node_id_1, msg); + if use_0conf { + // TODO(splicing): Revisit splice transaction rebroadcasts. + let txn_0 = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn_0.len(), 1); + assert_eq!(&txn_0[0], &splice_tx); + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + } + } else { + panic!("Unexpected event {:?}", &msg_events[0]); + } + if !use_0conf { + if let MessageSendEvent::SendAnnouncementSignatures { ref msg, .. } = &msg_events[1] { + nodes[0].node.handle_announcement_signatures(node_id_1, msg); + } else { + panic!("Unexpected event {:?}", &msg_events[1]); + } + } + assert!(matches!( + &msg_events[if use_0conf { 1 } else { 2 }], + MessageSendEvent::SendStfu { .. } + )); + + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), if use_0conf { 0 } else { 2 }, "{msg_events:?}"); + if !use_0conf { + if let MessageSendEvent::SendAnnouncementSignatures { ref msg, .. } = &msg_events[0] { + nodes[1].node.handle_announcement_signatures(node_id_0, msg); + } else { + panic!("Unexpected event {:?}", &msg_events[1]); + } + assert!(matches!(&msg_events[1], MessageSendEvent::BroadcastChannelAnnouncement { .. })); + } + + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), if use_0conf { 0 } else { 1 }, "{msg_events:?}"); + if !use_0conf { + assert!(matches!(&msg_events[0], MessageSendEvent::BroadcastChannelAnnouncement { .. })); + } + + expect_channel_ready_event(&nodes[0], &node_id_1); + check_added_monitors(&nodes[0], 1); + expect_channel_ready_event(&nodes[1], &node_id_0); + check_added_monitors(&nodes[1], 1); + + // Remove the corresponding outputs and transactions the chain source is watching for the + // old funding as it is no longer being tracked. + nodes[0] + .chain_source + .remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script.clone()); + nodes[1] + .chain_source + .remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script); + + // Reconnect the nodes. This should trigger the node which lost the tie-breaker to resend `stfu` + // for their splice attempt. + if reload { + let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); + reload_node!( + nodes[0], + nodes[0].node.encode(), + &[&encoded_monitor_0], + persister_0b, + chain_monitor_0b, + node_0b + ); + let encoded_monitor_1 = get_monitor!(nodes[1], channel_id).encode(); + reload_node!( + nodes[1], + nodes[1].node.encode(), + &[&encoded_monitor_1], + persister_1b, + chain_monitor_1b, + node_1b + ); + } else { + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + } + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + if !use_0conf { + reconnect_args.send_announcement_sigs = (true, true); + } + reconnect_args.send_stfu = (true, false); + reconnect_nodes(reconnect_args); + + // Drive the second splice to completion. + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + if let MessageSendEvent::SendStfu { ref msg, .. } = msg_events[0] { + nodes[1].node.handle_stfu(node_id_0, msg); + } else { + panic!("Unexpected event {:?}", &msg_events[0]); + } + + let splice_init = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceInit, node_id_0); + let initial_commit_sig = { + nodes[0].node.handle_splice_init(node_id_1, &splice_init); + let splice_ack = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceAck, node_id_1); + nodes[1].node.handle_splice_ack(node_id_0, &splice_ack); + let new_funding_script = chan_utils::make_funding_redeemscript( + &splice_init.funding_pubkey, + &splice_ack.funding_pubkey, + ) + .to_p2wsh(); + complete_interactive_funding_negotiation( + &nodes[1], + &nodes[0], + channel_id, + node_1_contribution, + new_funding_script, + ) + }; + let (splice_tx, splice_locked) = + sign_interactive_funding_tx(&nodes[1], &nodes[0], initial_commit_sig, use_0conf); + expect_splice_pending_event(&nodes[0], &node_id_1); + expect_splice_pending_event(&nodes[1], &node_id_0); + + if use_0conf { + let (splice_locked, for_node_id) = splice_locked.unwrap(); + assert_eq!(for_node_id, node_id_0); + lock_splice(&nodes[1], &nodes[0], &splice_locked, true); + } else { + assert!(splice_locked.is_none()); + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + lock_splice_after_blocks(&nodes[1], &nodes[0], ANTI_REORG_DELAY - 1); + } + + // Sanity check that we can still make a test payment. + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); +} + #[test] fn disconnect_on_unexpected_interactive_tx_message() { let chanmon_cfgs = create_chanmon_cfgs(2);