From c9d03d44238bbaa8913c5dc9856562c86acc29ad Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Oct 2025 10:00:57 +0100 Subject: [PATCH] store channel in monitor --- lightning/Cargo.toml | 1 + lightning/src/chain/channelmonitor.rs | 71 +++- lightning/src/ln/chanmon_update_fail_tests.rs | 8 + lightning/src/ln/channel.rs | 24 +- lightning/src/ln/channel_open_tests.rs | 2 + lightning/src/ln/channelmanager.rs | 310 +++++++++--------- lightning/src/ln/functional_tests.rs | 1 + lightning/src/ln/monitor_tests.rs | 6 + lightning/src/ln/offers_tests.rs | 1 + lightning/src/ln/onion_route_tests.rs | 1 + lightning/src/ln/payment_tests.rs | 7 + lightning/src/ln/priv_short_conf_tests.rs | 1 + lightning/src/ln/quiescence_tests.rs | 1 + lightning/src/ln/reload_tests.rs | 12 +- lightning/src/ln/reorg_tests.rs | 3 + lightning/src/ln/splicing_tests.rs | 3 + 16 files changed, 280 insertions(+), 172 deletions(-) diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index 47ae12f9e1e..1d586a842f9 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -22,6 +22,7 @@ _externalize_tests = ["inventory", "_test_utils"] # Allow signing of local transactions that may have been revoked or will be revoked, for functional testing (e.g. justice tx handling). # This is unsafe to use in production because it may result in the counterparty publishing taking our funds. unsafe_revoked_tx_signing = [] +safe_channels = [] std = [] diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 1d035b68650..552aed55146 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -111,6 +111,9 @@ pub struct ChannelMonitorUpdate { /// Will be `None` for `ChannelMonitorUpdate`s constructed on LDK versions prior to 0.0.121 and /// always `Some` otherwise. pub channel_id: Option, + + /// The encoded channel data associated with this ChannelMonitor, if any. + pub encoded_channel: Option>, } impl ChannelMonitorUpdate { @@ -156,6 +159,13 @@ impl Writeable for ChannelMonitorUpdate { for update_step in self.updates.iter() { update_step.write(w)?; } + #[cfg(feature = "safe_channels")] + write_tlv_fields!(w, { + // 1 was previously used to store `counterparty_node_id` + (3, self.channel_id, option), + (5, self.encoded_channel, option) + }); + #[cfg(not(feature = "safe_channels"))] write_tlv_fields!(w, { // 1 was previously used to store `counterparty_node_id` (3, self.channel_id, option), @@ -176,11 +186,13 @@ impl Readable for ChannelMonitorUpdate { } } let mut channel_id = None; + let mut encoded_channel = None; read_tlv_fields!(r, { // 1 was previously used to store `counterparty_node_id` (3, channel_id, option), + (5, encoded_channel, option) }); - Ok(Self { update_id, updates, channel_id }) + Ok(Self { update_id, updates, channel_id, encoded_channel }) } } @@ -1402,6 +1414,8 @@ pub(crate) struct ChannelMonitorImpl { /// make deciding whether to do so simple, here we track whether this monitor was last written /// prior to 0.1. written_by_0_1_or_later: bool, + + encoded_channel: Option>, } // Returns a `&FundingScope` for the one we are currently observing/handling commitment transactions @@ -1733,6 +1747,32 @@ pub(crate) fn write_chanmon_internal( _ => channel_monitor.pending_monitor_events.clone(), }; + #[cfg(feature = "safe_channels")] + write_tlv_fields!(writer, { + (1, channel_monitor.funding_spend_confirmed, option), + (3, channel_monitor.htlcs_resolved_on_chain, required_vec), + (5, pending_monitor_events, required_vec), + (7, channel_monitor.funding_spend_seen, required), + (9, channel_monitor.counterparty_node_id, required), + (11, channel_monitor.confirmed_commitment_tx_counterparty_output, option), + (13, channel_monitor.spendable_txids_confirmed, required_vec), + (15, channel_monitor.counterparty_fulfilled_htlcs, required), + (17, channel_monitor.initial_counterparty_commitment_info, option), + (19, channel_monitor.channel_id, required), + (21, channel_monitor.balances_empty_height, option), + (23, channel_monitor.holder_pays_commitment_tx_fee, option), + (25, channel_monitor.payment_preimages, required), + (27, channel_monitor.first_negotiated_funding_txo, required), + (29, channel_monitor.initial_counterparty_commitment_tx, option), + (31, channel_monitor.funding.channel_parameters, required), + (32, channel_monitor.pending_funding, optional_vec), + (33, channel_monitor.htlcs_resolved_to_user, required), + (34, channel_monitor.alternative_funding_confirmed, option), + (35, channel_monitor.is_manual_broadcast, required), + (37, channel_monitor.funding_seen_onchain, required), + (39, channel_monitor.encoded_channel, option), + }); + #[cfg(not(feature = "safe_channels"))] write_tlv_fields!(writer, { (1, channel_monitor.funding_spend_confirmed, option), (3, channel_monitor.htlcs_resolved_on_chain, required_vec), @@ -1994,6 +2034,7 @@ impl ChannelMonitor { alternative_funding_confirmed: None, written_by_0_1_or_later: true, + encoded_channel: None, }) } @@ -2114,6 +2155,16 @@ impl ChannelMonitor { inner.update_monitor(updates, broadcaster, fee_estimator, &logger) } + /// Gets the encoded channel data, if any, associated with this ChannelMonitor. + pub fn get_encoded_channel(&self) -> Option> { + self.inner.lock().unwrap().encoded_channel.clone() + } + + /// Updates the encoded channel data associated with this ChannelMonitor. + pub fn update_encoded_channel(&self, encoded: Vec) { + self.inner.lock().unwrap().encoded_channel = Some(encoded); + } + /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this /// ChannelMonitor. /// @@ -4405,9 +4456,18 @@ impl ChannelMonitorImpl { } } - if ret.is_ok() && self.no_further_updates_allowed() && is_pre_close_update { - log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); - Err(()) + if ret.is_ok() { + if self.no_further_updates_allowed() && is_pre_close_update { + log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); + Err(()) + } else { + // Assume that if the updates contains no encoded channel, that the channel remained unchanged. We + // therefore do not update the monitor. + if let Some(encoded_channel) = updates.encoded_channel.as_ref() { + self.encoded_channel = Some(encoded_channel.clone()); + } + Ok(()) + } } else { ret } } @@ -6645,6 +6705,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let mut alternative_funding_confirmed = None; let mut is_manual_broadcast = RequiredWrapper(None); let mut funding_seen_onchain = RequiredWrapper(None); + let mut encoded_channel = None; read_tlv_fields!(reader, { (1, funding_spend_confirmed, option), (3, htlcs_resolved_on_chain, optional_vec), @@ -6667,6 +6728,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (34, alternative_funding_confirmed, option), (35, is_manual_broadcast, (default_value, false)), (37, funding_seen_onchain, (default_value, true)), + (39, encoded_channel, option), }); // Note that `payment_preimages_with_info` was added (and is always written) in LDK 0.1, so // we can use it to determine if this monitor was last written by LDK 0.1 or later. @@ -6844,6 +6906,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP alternative_funding_confirmed, written_by_0_1_or_later, + encoded_channel, }); if counterparty_node_id.is_none() { diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 1a9af4f2071..d0fbcca2493 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -2839,6 +2839,8 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } + +#[cfg(not(feature = "safe_channels"))] #[test] fn channel_holding_cell_serialize() { do_channel_holding_cell_serialize(true, true); @@ -3320,6 +3322,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { assert!(nodes[0].node.list_channels().is_empty()); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_outbound_reload_without_init_mon() { do_test_outbound_reload_without_init_mon(true); @@ -3428,6 +3431,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo assert!(nodes[1].node.list_channels().is_empty()); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_inbound_reload_without_init_mon() { do_test_inbound_reload_without_init_mon(true, true); @@ -3767,6 +3771,7 @@ fn do_test_inverted_mon_completion_order( expect_payment_sent(&nodes[0], payment_preimage, None, true, true); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_inverted_mon_completion_order() { do_test_inverted_mon_completion_order(true, true); @@ -3969,6 +3974,7 @@ fn do_test_durable_preimages_on_closed_channel( } } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_durable_preimages_on_closed_channel() { do_test_durable_preimages_on_closed_channel(true, true, true); @@ -4093,6 +4099,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { send_payment(&nodes[1], &[&nodes[2]], 100_000); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_reload_mon_update_completion_actions() { do_test_reload_mon_update_completion_actions(true); @@ -4459,6 +4466,7 @@ fn do_test_partial_claim_mon_update_compl_actions(reload_a: bool, reload_b: bool assert!(!get_monitor!(nodes[3], chan_4_id).get_stored_preimages().contains_key(&payment_hash)); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_partial_claim_mon_update_compl_actions() { do_test_partial_claim_mon_update_compl_actions(true, true); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 63d042b4cfa..1c6e2546e91 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2153,7 +2153,7 @@ where // Not having a signing session implies they've already sent `splice_locked`, // which must always come after the initial commitment signed is sent. .unwrap_or(true); - let res = if has_negotiated_pending_splice && !session_received_commitment_signed { + let res: Result<(Option::Target as SignerProvider>::EcdsaSigner>>, Option), ChannelError> = if has_negotiated_pending_splice && !session_received_commitment_signed { funded_channel .splice_initial_commitment_signed(msg, fee_estimator, logger) .map(|monitor_update_opt| (None, monitor_update_opt)) @@ -6045,6 +6045,7 @@ where should_broadcast: broadcast, }], channel_id: Some(self.channel_id()), + encoded_channel: None, }; Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), update)) } else { @@ -7276,6 +7277,7 @@ where payment_info, }], channel_id: Some(self.context.channel_id()), + encoded_channel: None, }; if !self.context.channel_state.can_generate_new_commitment() { @@ -7417,6 +7419,7 @@ where Vec::new(), Vec::new(), ); + monitor_update.encoded_channel = Some(self.encode()); UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat } }, UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {}, @@ -7892,7 +7895,7 @@ where &self.context.channel_id(), pending_splice_funding.get_funding_txo().unwrap().txid); self.context.latest_monitor_update_id += 1; - let monitor_update = ChannelMonitorUpdate { + let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::RenegotiatedFunding { channel_parameters: pending_splice_funding.channel_transaction_parameters.clone(), @@ -7900,6 +7903,7 @@ where counterparty_commitment_tx, }], channel_id: Some(self.context.channel_id()), + encoded_channel: None, }; self.context @@ -7909,6 +7913,7 @@ where .received_commitment_signed(); self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + monitor_update.encoded_channel = Some(self.encode()); Ok(self.push_ret_blockable_mon_update(monitor_update)) } @@ -8165,6 +8170,7 @@ where update_id: self.context.latest_monitor_update_id, updates: vec![update], channel_id: Some(self.context.channel_id()), + encoded_channel: None, }; self.context.expecting_peer_commitment_signed = false; @@ -8217,6 +8223,7 @@ where Vec::new(), Vec::new(), ); + monitor_update.encoded_channel = Some(self.encode()); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -8270,6 +8277,7 @@ where update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet! updates: Vec::new(), channel_id: Some(self.context.channel_id()), + encoded_channel: None, }; let mut htlc_updates = Vec::new(); @@ -8346,7 +8354,7 @@ where // `ChannelMonitorUpdate` to the user, making this one redundant, however // there's no harm in including the extra `ChannelMonitorUpdateStep` here. // We do not bother to track and include `payment_info` here, however. - let fulfill = self.get_update_fulfill_htlc( + let fulfill: UpdateFulfillFetch = self.get_update_fulfill_htlc( htlc_id, *payment_preimage, None, @@ -8360,6 +8368,8 @@ where unreachable!() }; update_fulfill_count += 1; + + additional_monitor_update.encoded_channel = Some(self.encode()); monitor_update.updates.append(&mut additional_monitor_update.updates); None }, @@ -8418,6 +8428,8 @@ where update_add_count, update_fulfill_count, update_fail_count); self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + + monitor_update.encoded_channel = Some(self.encode()); (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) } else { (None, Vec::new()) @@ -8534,6 +8546,7 @@ where secret: msg.per_commitment_secret, }], channel_id: Some(self.context.channel_id()), + encoded_channel: None, }; // Update state now that we've passed all the can-fail calls... @@ -8759,6 +8772,7 @@ where }; macro_rules! return_with_htlcs_to_fail { ($htlcs_to_fail: expr) => { + monitor_update.encoded_channel = Some(self.encode()); if !release_monitor { self.context .blocked_monitor_updates @@ -10384,6 +10398,7 @@ where scriptpubkey: self.get_closing_scriptpubkey(), }], channel_id: Some(self.context.channel_id()), + encoded_channel: Some(self.encode()), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); self.push_ret_blockable_mon_update(monitor_update) @@ -11153,6 +11168,7 @@ where funding_txid: funding_txo.txid, }], channel_id: Some(self.context.channel_id()), + encoded_channel: Some(self.encode()), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); let monitor_update = self.push_ret_blockable_mon_update(monitor_update); @@ -12712,6 +12728,7 @@ where update_id: self.context.latest_monitor_update_id, updates: vec![update], channel_id: Some(self.context.channel_id()), + encoded_channel: Some(self.encode()), }; self.context.channel_state.set_awaiting_remote_revoke(); monitor_update @@ -12958,6 +12975,7 @@ where scriptpubkey: self.get_closing_scriptpubkey(), }], channel_id: Some(self.context.channel_id()), + encoded_channel: Some(self.encode()), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); self.push_ret_blockable_mon_update(monitor_update) diff --git a/lightning/src/ln/channel_open_tests.rs b/lightning/src/ln/channel_open_tests.rs index 3fd546aaff7..fd0483601fc 100644 --- a/lightning/src/ln/channel_open_tests.rs +++ b/lightning/src/ln/channel_open_tests.rs @@ -2095,6 +2095,7 @@ pub fn test_batch_channel_open() { ))); } +#[cfg(not(feature = "safe_channels"))] #[xtest(feature = "_externalize_tests")] pub fn test_close_in_funding_batch() { // This test ensures that if one of the channels @@ -2183,6 +2184,7 @@ pub fn test_close_in_funding_batch() { assert!(nodes[0].node.list_channels().is_empty()); } +#[cfg(not(feature = "safe_channels"))] #[xtest(feature = "_externalize_tests")] pub fn test_batch_funding_close_after_funding_signed() { let chanmon_cfgs = create_chanmon_cfgs(3); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0b4223a6a81..3c5cf5cb834 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9100,6 +9100,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ payment_info, }], channel_id: Some(prev_hop.channel_id), + encoded_channel: None, }; // We don't have any idea if this is a duplicate claim without interrogating the @@ -10316,6 +10317,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fail_chan!("Already had channel with the new channel_id"); }, hash_map::Entry::Vacant(e) => { + monitor.update_encoded_channel(chan.encode()); let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { // There's no problem signing a counterparty's funding transaction if our monitor @@ -10480,6 +10482,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match chan .funding_signed(&msg, best_block, &self.signer_provider, &self.logger) .and_then(|(funded_chan, monitor)| { + monitor.update_encoded_channel(funded_chan.encode()); self.chain_monitor .watch_channel(funded_chan.context.channel_id(), monitor) .map_err(|()| { @@ -11191,6 +11194,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = chan.as_funded_mut() { if let Some(monitor) = monitor_opt { + monitor.update_encoded_channel(chan.encode()); let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { handle_initial_monitor!(self, persist_state, peer_state_lock, peer_state, @@ -13621,6 +13625,7 @@ where updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id, }], + encoded_channel: None, }; let during_startup = @@ -16760,6 +16765,19 @@ where let mut failed_htlcs = Vec::new(); let channel_count: u64 = Readable::read(reader)?; + + // Discard channel manager versions of channels. + for _ in 0..channel_count { + _ = FundedChannel::read( + reader, + ( + &args.entropy_source, + &args.signer_provider, + &provided_channel_type_features(&args.config), + ), + )?; + } + let mut channel_id_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); let mut per_peer_state = hash_map_with_capacity(cmp::min( channel_count as usize, @@ -16768,181 +16786,97 @@ where let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); - for _ in 0..channel_count { + for (_, monitor) in args.channel_monitors.iter() { + let encoded_channel = monitor.get_encoded_channel().unwrap(); + let encoded_channel_reader = &mut &encoded_channel[..]; let mut channel: FundedChannel = FundedChannel::read( - reader, + encoded_channel_reader, ( &args.entropy_source, &args.signer_provider, &provided_channel_type_features(&args.config), ), )?; + let logger = WithChannelContext::from(&args.logger, &channel.context, None); let channel_id = channel.context.channel_id(); channel_id_set.insert(channel_id); - if let Some(ref mut monitor) = args.channel_monitors.get_mut(&channel_id) { + if channel.get_cur_holder_commitment_transaction_number() + > monitor.get_cur_holder_commitment_number() + || channel.get_revoked_counterparty_commitment_transaction_number() + > monitor.get_min_seen_secret() + || channel.get_cur_counterparty_commitment_transaction_number() + > monitor.get_cur_counterparty_commitment_number() + || channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() + { + // But if the channel is behind of the monitor, close the channel: + log_error!( + logger, + "A ChannelManager is stale compared to the current ChannelMonitor!" + ); + log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); + if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() { + log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", + &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); + } if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() - || channel.get_revoked_counterparty_commitment_transaction_number() - > monitor.get_min_seen_secret() - || channel.get_cur_counterparty_commitment_transaction_number() - > monitor.get_cur_counterparty_commitment_number() - || channel.context.get_latest_monitor_update_id() - < monitor.get_latest_update_id() { - // But if the channel is behind of the monitor, close the channel: - log_error!( - logger, - "A ChannelManager is stale compared to the current ChannelMonitor!" - ); - log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); - if channel.context.get_latest_monitor_update_id() - < monitor.get_latest_update_id() - { - log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", - &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); - } - if channel.get_cur_holder_commitment_transaction_number() - > monitor.get_cur_holder_commitment_number() - { - log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.", + log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.", &channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number()); - } - if channel.get_revoked_counterparty_commitment_transaction_number() - > monitor.get_min_seen_secret() - { - log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.", + } + if channel.get_revoked_counterparty_commitment_transaction_number() + > monitor.get_min_seen_secret() + { + log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.", &channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number()); - } - if channel.get_cur_counterparty_commitment_transaction_number() - > monitor.get_cur_counterparty_commitment_number() - { - log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", + } + if channel.get_cur_counterparty_commitment_transaction_number() + > monitor.get_cur_counterparty_commitment_number() + { + log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); - } - let shutdown_result = - channel.force_shutdown(ClosureReason::OutdatedChannelManager); - if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { - return Err(DecodeError::InvalidValue); - } - if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = - shutdown_result.monitor_update - { - // Our channel information is out of sync with the `ChannelMonitor`, so - // force the update to use the `ChannelMonitor`'s update_id for the close - // update. - let latest_update_id = monitor.get_latest_update_id().saturating_add(1); - update.update_id = latest_update_id; - per_peer_state - .entry(counterparty_node_id) - .or_insert_with(|| Mutex::new(empty_peer_state())) - .lock() - .unwrap() - .closed_channel_monitor_update_ids - .entry(channel_id) - .and_modify(|v| *v = cmp::max(latest_update_id, *v)) - .or_insert(latest_update_id); - - close_background_events.push( - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, - funding_txo, - channel_id, - update, - }, - ); - } - for (source, hash, cp_id, chan_id) in shutdown_result.dropped_outbound_htlcs { - let reason = LocalHTLCFailureReason::ChannelClosed; - failed_htlcs.push((source, hash, cp_id, chan_id, reason, None)); - } - channel_closures.push_back(( - events::Event::ChannelClosed { - channel_id: channel.context.channel_id(), - user_channel_id: channel.context.get_user_id(), - reason: ClosureReason::OutdatedChannelManager, - counterparty_node_id: Some(channel.context.get_counterparty_node_id()), - channel_capacity_sats: Some(channel.funding.get_value_satoshis()), - channel_funding_txo: channel.funding.get_funding_txo(), - last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()), - }, - None, - )); - for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { - let mut found_htlc = false; - for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() { - if *channel_htlc_source == monitor_htlc_source { - found_htlc = true; - break; - } - } - if !found_htlc { - // If we have some HTLCs in the channel which are not present in the newer - // ChannelMonitor, they have been removed and should be failed back to - // ensure we don't forget them entirely. Note that if the missing HTLC(s) - // were actually claimed we'd have generated and ensured the previous-hop - // claim update ChannelMonitor updates were persisted prior to persising - // the ChannelMonitor update for the forward leg, so attempting to fail the - // backwards leg of the HTLC will simply be rejected. - let logger = WithChannelContext::from( - &args.logger, - &channel.context, - Some(*payment_hash), - ); - log_info!(logger, - "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", - &channel.context.channel_id(), &payment_hash); - failed_htlcs.push(( - channel_htlc_source.clone(), - *payment_hash, - channel.context.get_counterparty_node_id(), - channel.context.channel_id(), - LocalHTLCFailureReason::ChannelClosed, - None, - )); - } - } - } else { - channel.on_startup_drop_completed_blocked_mon_updates_through( - &logger, - monitor.get_latest_update_id(), - ); - log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates", - &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(), - monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending()); - if let Some(short_channel_id) = channel.funding.get_short_channel_id() { - short_to_chan_info.insert( - short_channel_id, - ( - channel.context.get_counterparty_node_id(), - channel.context.channel_id(), - ), - ); - } - - for short_channel_id in channel.context.historical_scids() { - let cp_id = channel.context.get_counterparty_node_id(); - let chan_id = channel.context.channel_id(); - short_to_chan_info.insert(*short_channel_id, (cp_id, chan_id)); - } - + } + let shutdown_result = channel.force_shutdown(ClosureReason::OutdatedChannelManager); + if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { + return Err(DecodeError::InvalidValue); + } + if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = + shutdown_result.monitor_update + { + // Our channel information is out of sync with the `ChannelMonitor`, so + // force the update to use the `ChannelMonitor`'s update_id for the close + // update. + let latest_update_id = monitor.get_latest_update_id().saturating_add(1); + update.update_id = latest_update_id; per_peer_state - .entry(channel.context.get_counterparty_node_id()) + .entry(counterparty_node_id) .or_insert_with(|| Mutex::new(empty_peer_state())) - .get_mut() + .lock() .unwrap() - .channel_by_id - .insert(channel.context.channel_id(), Channel::from(channel)); + .closed_channel_monitor_update_ids + .entry(channel_id) + .and_modify(|v| *v = cmp::max(latest_update_id, *v)) + .or_insert(latest_update_id); + + close_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo, + channel_id, + update, + }, + ); + } + for (source, hash, cp_id, chan_id) in shutdown_result.dropped_outbound_htlcs { + let reason = LocalHTLCFailureReason::ChannelClosed; + failed_htlcs.push((source, hash, cp_id, chan_id, reason, None)); } - } else if channel.is_awaiting_initial_mon_persist() { - // If we were persisted and shut down while the initial ChannelMonitor persistence - // was in-progress, we never broadcasted the funding transaction and can still - // safely discard the channel. channel_closures.push_back(( events::Event::ChannelClosed { channel_id: channel.context.channel_id(), user_channel_id: channel.context.get_user_id(), - reason: ClosureReason::DisconnectedPeer, + reason: ClosureReason::OutdatedChannelManager, counterparty_node_id: Some(channel.context.get_counterparty_node_id()), channel_capacity_sats: Some(channel.funding.get_value_satoshis()), channel_funding_txo: channel.funding.get_funding_txo(), @@ -16950,20 +16884,68 @@ where }, None, )); + for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { + let mut found_htlc = false; + for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() { + if *channel_htlc_source == monitor_htlc_source { + found_htlc = true; + break; + } + } + if !found_htlc { + // If we have some HTLCs in the channel which are not present in the newer + // ChannelMonitor, they have been removed and should be failed back to + // ensure we don't forget them entirely. Note that if the missing HTLC(s) + // were actually claimed we'd have generated and ensured the previous-hop + // claim update ChannelMonitor updates were persisted prior to persising + // the ChannelMonitor update for the forward leg, so attempting to fail the + // backwards leg of the HTLC will simply be rejected. + let logger = WithChannelContext::from( + &args.logger, + &channel.context, + Some(*payment_hash), + ); + log_info!(logger, + "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", + &channel.context.channel_id(), &payment_hash); + failed_htlcs.push(( + channel_htlc_source.clone(), + *payment_hash, + channel.context.get_counterparty_node_id(), + channel.context.channel_id(), + LocalHTLCFailureReason::ChannelClosed, + None, + )); + } + } } else { - log_error!( - logger, - "Missing ChannelMonitor for channel {} needed by ChannelManager.", - &channel.context.channel_id() - ); - log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); - log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); - log_error!( - logger, - " Without the ChannelMonitor we cannot continue without risking funds." + channel.on_startup_drop_completed_blocked_mon_updates_through( + &logger, + monitor.get_latest_update_id(), ); - log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); - return Err(DecodeError::InvalidValue); + log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates", + &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(), + monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending()); + if let Some(short_channel_id) = channel.funding.get_short_channel_id() { + short_to_chan_info.insert( + short_channel_id, + (channel.context.get_counterparty_node_id(), channel.context.channel_id()), + ); + } + + for short_channel_id in channel.context.historical_scids() { + let cp_id = channel.context.get_counterparty_node_id(); + let chan_id = channel.context.channel_id(); + short_to_chan_info.insert(*short_channel_id, (cp_id, chan_id)); + } + + per_peer_state + .entry(channel.context.get_counterparty_node_id()) + .or_insert_with(|| Mutex::new(empty_peer_state())) + .get_mut() + .unwrap() + .channel_by_id + .insert(channel.context.channel_id(), Channel::from(channel)); } } @@ -17014,6 +16996,7 @@ where should_broadcast: true, }], channel_id: Some(monitor.channel_id()), + encoded_channel: None, }; let funding_txo = monitor.get_funding_txo(); let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { @@ -17645,6 +17628,7 @@ where updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id, }], + encoded_channel: None, }, }); } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 876324737a6..f556107a6ee 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -9843,6 +9843,7 @@ fn do_test_multi_post_event_actions(do_reload: bool) { check_added_monitors(&nodes[0], 3); } +#[cfg(not(feature = "safe_channels"))] #[xtest(feature = "_externalize_tests")] pub fn test_multi_post_event_actions() { do_test_multi_post_event_actions(true); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 5a287585680..fed15dd98f3 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -2388,6 +2388,7 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool } } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_restored_packages_retry() { do_test_restored_packages_retry(false); @@ -3023,6 +3024,7 @@ fn do_test_anchors_aggregated_revoked_htlc_tx(p2a_anchor: bool) { assert_eq!(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[]).len(), 6); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_anchors_aggregated_revoked_htlc_tx() { do_test_anchors_aggregated_revoked_htlc_tx(false); @@ -3116,6 +3118,7 @@ fn do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(confirm_c } } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_anchors_monitor_fixes_counterparty_payment_script_on_reload() { do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(false); @@ -3343,6 +3346,7 @@ fn test_update_replay_panics() { monitor.update_monitor(&updates[3], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap(); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_claim_event_never_handled() { // When a payment is claimed, the `ChannelMonitorUpdate` containing the payment preimage goes @@ -3570,6 +3574,7 @@ fn do_test_lost_preimage_monitor_events(on_counterparty_tx: bool, p2a_anchor: bo expect_payment_sent(&nodes[0], preimage_a, None, true, true); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_lost_preimage_monitor_events() { do_test_lost_preimage_monitor_events(true, false); @@ -3815,6 +3820,7 @@ fn do_test_lost_timeout_monitor_events(confirm_tx: CommitmentType, dust_htlcs: b expect_payment_failed!(nodes[0], hash_a, false); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_lost_timeout_monitor_events() { do_test_lost_timeout_monitor_events(CommitmentType::RevokedCounterparty, false, false); diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 3a6965c6646..6c3f6b17058 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -2430,6 +2430,7 @@ fn rejects_keysend_to_non_static_invoice_path() { expect_payment_failed_conditions(&nodes[0], payment_hash, true, PaymentFailedConditions::new()); } +#[cfg(not(feature = "safe_channels"))] #[test] fn no_double_pay_with_stale_channelmanager() { // This tests the following bug: diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index f4cfb9eda00..ae5eadd5416 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -1795,6 +1795,7 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { assert_eq!(config, config_after_restart); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_onion_failure_stale_channel_update() { do_test_onion_failure_stale_channel_update(false); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 9eb85173a83..16175693ad3 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -997,6 +997,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { expect_payment_sent!(nodes[0], payment_preimage, Some(new_route.paths[0].hops[0].fee_msat)); } +#[cfg(not(feature = "safe_channels"))] #[test] fn retry_with_no_persist() { do_retry_with_no_persist(true); @@ -1225,6 +1226,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_completed_payment_not_retryable_on_reload() { do_test_completed_payment_not_retryable_on_reload(true); @@ -1402,6 +1404,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_dup_htlc_onchain_doesnt_fail_on_reload() { do_test_dup_htlc_onchain_doesnt_fail_on_reload(true, true, true, true); @@ -1415,6 +1418,7 @@ fn test_dup_htlc_onchain_doesnt_fail_on_reload() { do_test_dup_htlc_onchain_doesnt_fail_on_reload(false, false, false, false); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_fulfill_restart_failure() { // When we receive an update_fulfill_htlc message, we immediately consider the HTLC fully @@ -4194,12 +4198,14 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: assert!(events.is_empty()); } +#[cfg(not(feature = "safe_channels"))] #[test] fn no_missing_sent_on_midpoint_reload() { do_no_missing_sent_on_reload(false, true); do_no_missing_sent_on_reload(true, true); } +#[cfg(not(feature = "safe_channels"))] #[test] fn no_missing_sent_on_reload() { do_no_missing_sent_on_reload(false, false); @@ -4908,6 +4914,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { } } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_payment_metadata_consistency() { do_test_payment_metadata_consistency(true, true); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index f19ca89097e..b5a2c1fda3d 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -30,6 +30,7 @@ use crate::prelude::*; use crate::ln::functional_test_utils::*; +#[cfg(not(feature = "safe_channels"))] #[test] fn test_priv_forwarding_rejection() { // If we have a private channel with outbound liquidity, and diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index 7a5d35b12a5..3155bad7804 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -701,6 +701,7 @@ fn test_quiescence_during_disconnection() { do_test_quiescence_during_disconnection(true, true); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_quiescence_termination_on_disconnect() { do_test_quiescence_termination_on_disconnect(false); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 8c9e552fa04..c95d1abda61 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -190,6 +190,7 @@ fn test_funding_peer_disconnect() { reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_no_txn_manager_serialize_deserialize() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -235,6 +236,7 @@ fn test_no_txn_manager_serialize_deserialize() { send_payment(&nodes[0], &[&nodes[1]], 1000000); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_manager_serialize_deserialize_events() { // This test makes sure the events field in ChannelManager survives de/serialization @@ -357,6 +359,7 @@ fn test_simple_manager_serialize_deserialize() { claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_manager_serialize_deserialize_inconsistent_monitor() { // Test deserializing a ChannelManager with an out-of-date ChannelMonitor @@ -705,7 +708,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, } #[test] -#[cfg(feature = "std")] +#[cfg(all(feature = "std", not(feature = "safe_channels")))] fn test_data_loss_protect() { do_test_data_loss_protect(true, false, true); do_test_data_loss_protect(true, true, false); @@ -911,6 +914,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest } } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_partial_claim_before_restart() { do_test_partial_claim_before_restart(false, false); @@ -1081,6 +1085,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht } } +#[cfg(not(feature = "safe_channels"))] #[test] fn forwarded_payment_no_manager_persistence() { do_forwarded_payment_no_manager_persistence(true, true, false); @@ -1088,6 +1093,7 @@ fn forwarded_payment_no_manager_persistence() { do_forwarded_payment_no_manager_persistence(false, false, false); } +#[cfg(not(feature = "safe_channels"))] #[test] fn intercepted_payment_no_manager_persistence() { do_forwarded_payment_no_manager_persistence(true, true, true); @@ -1095,6 +1101,7 @@ fn intercepted_payment_no_manager_persistence() { do_forwarded_payment_no_manager_persistence(false, false, true); } +#[cfg(not(feature = "safe_channels"))] #[test] fn removed_payment_no_manager_persistence() { // If an HTLC is failed to us on a channel, and the ChannelMonitor persistence completes, but @@ -1173,6 +1180,7 @@ fn removed_payment_no_manager_persistence() { expect_payment_failed!(nodes[0], payment_hash, false); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_reload_partial_funding_batch() { let chanmon_cfgs = create_chanmon_cfgs(3); @@ -1236,6 +1244,7 @@ fn test_reload_partial_funding_batch() { assert!(nodes[0].node.list_channels().is_empty()); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_htlc_localremoved_persistence() { // Tests that if we fail an htlc back (update_fail_htlc message) and then restart the node, the node will resend the @@ -1419,4 +1428,3 @@ fn test_peer_storage() { let res = std::panic::catch_unwind(|| drop(nodes)); assert!(res.is_err()); } - diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index ede6acfb639..3f0128f9245 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -410,6 +410,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ send_payment(&nodes[0], &[&nodes[1]], 8000000); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_unconf_chan() { do_test_unconf_chan(true, true, false, ConnectStyle::BestBlockFirstSkippingBlocks); @@ -423,6 +424,7 @@ fn test_unconf_chan() { do_test_unconf_chan(false, false, false, ConnectStyle::BestBlockFirstReorgsOnlyTip); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_unconf_chan_via_listen() { do_test_unconf_chan(true, true, false, ConnectStyle::FullBlockViaListen); @@ -431,6 +433,7 @@ fn test_unconf_chan_via_listen() { do_test_unconf_chan(false, false, false, ConnectStyle::FullBlockViaListen); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_unconf_chan_via_funding_unconfirmed() { do_test_unconf_chan(true, true, true, ConnectStyle::BestBlockFirstSkippingBlocks); diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index f95883e9434..44ec2d961de 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -383,6 +383,7 @@ pub fn lock_splice<'a, 'b, 'c, 'd>( node_b.chain_source.remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_splice_state_reset_on_disconnect() { do_test_splice_state_reset_on_disconnect(false); @@ -923,6 +924,7 @@ fn do_test_splice_commitment_broadcast(splice_status: SpliceStatus, claim_htlcs: } } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_splice_reestablish() { do_test_splice_reestablish(false, false); @@ -1186,6 +1188,7 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { .remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_propose_splice_while_disconnected() { do_test_propose_splice_while_disconnected(false, false);