From b1fc759f0a6ae8dfb59f8711d978403838022127 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 7 Oct 2025 22:54:03 +0000 Subject: [PATCH] Allow stale `ChannelMonitor`s if we are sure they don't have funds In 601bf4bd8fdc37d55e571b1b3b9011a1c107afd9 we started refusing to load `ChannelMonitor`s which were created prior to LDK 0.0.110 and which were last updated prior to LDK 0.0.119. While this is likely fine for nearly all of our users, there's some risk that some (like me) have ancient un-archived `ChannelMonitor`s. We do need to start auto-archiving `ChannelMonitor`s but for now we need some way for such users to at least not fail on startup. Sadly, ancient `ChannelMonitor`s often don't contain some of the data we need to accurately calculate `Balance`s, but in cases where they do, and where there are no claimable `Balance`s, we can be confident we don't need the `ChannelMonitor`s. Thus, here, we adapt the `ChannelMonitor` `Readable` implementation used in the persistence wrappers to detect this case and simply skip loading such `ChannelMonitor`s. Its not clear if this is sufficient, but its at least better than the current state of affairs after 601bf4bd8fdc37d55e571b1b3b9011a1c107afd9. --- lightning/src/chain/channelmonitor.rs | 57 ++++++++++++++++++------ lightning/src/util/persist.rs | 63 ++++++++++++++++++++------- 2 files changed, 92 insertions(+), 28 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 5f2bb248cd1..68256a61c25 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1059,11 +1059,21 @@ impl Readable for IrrevocablyResolvedHTLC { /// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date /// information and are actively monitoring the chain. /// -/// Note that the deserializer is only implemented for (BlockHash, ChannelMonitor), which -/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along -/// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the -/// returned block hash and the the current chain and then reconnecting blocks to get to the -/// best chain) upon deserializing the object! +/// Like the [`ChannelManager`], deserialization is implemented for `(BlockHash, ChannelMonitor)`, +/// providing you with the last block hash which was connected before shutting down. You must begin +/// syncing the chain from that point, disconnecting and connecting blocks as required to get to +/// the best chain on startup. Note that all [`ChannelMonitor`]s passed to a [`ChainMonitor`] must +/// by synced as of the same block, so syncing must happen prior to [`ChainMonitor`] +/// initialization. +/// +/// For those loading potentially-ancient [`ChannelMonitor`]s, deserialization is also implemented +/// for `Option<(BlockHash, ChannelMonitor)>`. LDK can no longer deserialize a [`ChannelMonitor`] +/// that was first created in LDK prior to 0.0.110 and last updated prior to LDK 0.0.119. In such +/// cases, the `Option<(..)>` deserialization option may return `Ok(None)` rather than failing to +/// deserialize, allowing you to differentiate between the two cases. +/// +/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager +/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor pub struct ChannelMonitor { pub(crate) inner: Mutex>, } @@ -6251,6 +6261,18 @@ const MAX_ALLOC_SIZE: usize = 64 * 1024; impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP)> for (BlockHash, ChannelMonitor) +{ + fn read(reader: &mut R, args: (&'a ES, &'b SP)) -> Result { + match >::read(reader, args) { + Ok(Some(res)) => Ok(res), + Ok(None) => Err(DecodeError::UnknownRequiredFeature), + Err(e) => Err(e), + } + } +} + +impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP)> + for Option<(BlockHash, ChannelMonitor)> { #[rustfmt::skip] fn read(reader: &mut R, args: (&'a ES, &'b SP)) -> Result { @@ -6528,11 +6550,6 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP } let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint)); - if counterparty_node_id.is_none() { - panic!("Found monitor for channel {} with no updates since v0.0.118.\ - These monitors are no longer supported.\ - To continue, run a v0.1 release, send/route a payment over the channel or close it.", channel_id); - } let (current_holder_commitment_tx, current_holder_htlc_data) = { let holder_commitment_tx = onchain_tx_handler.current_holder_commitment_tx(); @@ -6586,7 +6603,8 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (None, None) }; - Ok((best_block.block_hash, ChannelMonitor::from_impl(ChannelMonitorImpl { + let dummy_node_id = PublicKey::from_slice(&[2; 33]).unwrap(); + let monitor = ChannelMonitor::from_impl(ChannelMonitorImpl { funding: FundingScope { channel_parameters, @@ -6646,7 +6664,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP spendable_txids_confirmed: spendable_txids_confirmed.unwrap(), best_block, - counterparty_node_id: counterparty_node_id.unwrap(), + counterparty_node_id: counterparty_node_id.unwrap_or(dummy_node_id), initial_counterparty_commitment_info, initial_counterparty_commitment_tx, balances_empty_height, @@ -6658,7 +6676,20 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP alternative_funding_confirmed, written_by_0_1_or_later, - }))) + }); + + if counterparty_node_id.is_none() { + if (holder_tx_signed || lockdown_from_offchain) && monitor.get_claimable_balances().is_empty() { + // If the monitor is no longer readable, but it is a candidate for archiving, + // return Ok(None) to allow it to be skipped and not loaded. + return Ok(None); + } else { + panic!("Found monitor for channel {channel_id} with no updates since v0.0.118. \ + These monitors are no longer supported. \ + To continue, run a v0.1 release, send/route a payment over the channel or close it."); + } + } + Ok(Some((best_block.block_hash, monitor))) } } diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index ce7c8311813..0b4ba190740 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -372,7 +372,7 @@ where CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, )? { - match <(BlockHash, ChannelMonitor<::EcdsaSigner>)>::read( + match ::EcdsaSigner>)>>::read( &mut io::Cursor::new(kv_store.read( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, @@ -380,7 +380,7 @@ where )?), (&*entropy_source, &*signer_provider), ) { - Ok((block_hash, channel_monitor)) => { + Ok(Some((block_hash, channel_monitor))) => { let monitor_name = MonitorName::from_str(&stored_key)?; if channel_monitor.persistence_key() != monitor_name { return Err(io::Error::new( @@ -391,6 +391,7 @@ where res.push((block_hash, channel_monitor)); }, + Ok(None) => {}, Err(_) => { return Err(io::Error::new( io::ErrorKind::InvalidData, @@ -783,9 +784,12 @@ where let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; let monitor_list = self.0.kv_store.list(primary, secondary).await?; let mut res = Vec::with_capacity(monitor_list.len()); - // TODO: Parallelize this loop for monitor_key in monitor_list { - res.push(self.read_channel_monitor_with_updates(monitor_key.as_str()).await?) + let result = + self.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await?; + if let Some(read_res) = result { + res.push(read_res); + } } Ok(res) } @@ -923,8 +927,29 @@ where &self, monitor_key: &str, ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> { + match self.maybe_read_channel_monitor_with_updates(monitor_key).await? { + Some(res) => Ok(res), + None => Err(io::Error::new( + io::ErrorKind::InvalidData, + "ChannelMonitor was stale, with no updates since LDK 0.0.118. \ + It cannot be read by modern versions of LDK, though also does not contain any funds left to sweep. \ + You should manually delete it instead", + )), + } + } + + async fn maybe_read_channel_monitor_with_updates( + &self, monitor_key: &str, + ) -> Result< + Option<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + io::Error, + > { let monitor_name = MonitorName::from_str(monitor_key)?; - let (block_hash, monitor) = self.read_monitor(&monitor_name, monitor_key).await?; + let read_res = self.maybe_read_monitor(&monitor_name, monitor_key).await?; + let (block_hash, monitor) = match read_res { + Some(res) => res, + None => return Ok(None), + }; let mut current_update_id = monitor.get_latest_update_id(); // TODO: Parallelize this loop by speculatively reading a batch of updates loop { @@ -955,14 +980,16 @@ where io::Error::new(io::ErrorKind::Other, "Monitor update failed") })?; } - Ok((block_hash, monitor)) + Ok(Some((block_hash, monitor))) } /// Read a channel monitor. - async fn read_monitor( + async fn maybe_read_monitor( &self, monitor_name: &MonitorName, monitor_key: &str, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> - { + ) -> Result< + Option<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + io::Error, + > { let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; let monitor_bytes = self.kv_store.read(primary, secondary, monitor_key).await?; @@ -971,11 +998,12 @@ where if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) { monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64); } - match <(BlockHash, ChannelMonitor<::EcdsaSigner>)>::read( + match ::EcdsaSigner>)>>::read( &mut monitor_cursor, (&*self.entropy_source, &*self.signer_provider), ) { - Ok((blockhash, channel_monitor)) => { + Ok(None) => Ok(None), + Ok(Some((blockhash, channel_monitor))) => { if channel_monitor.persistence_key() != *monitor_name { log_error!( self.logger, @@ -987,7 +1015,7 @@ where "ChannelMonitor was stored under the wrong key", )) } else { - Ok((blockhash, channel_monitor)) + Ok(Some((blockhash, channel_monitor))) } }, Err(e) => { @@ -1027,9 +1055,14 @@ where let monitor_keys = self.kv_store.list(primary, secondary).await?; for monitor_key in monitor_keys { let monitor_name = MonitorName::from_str(&monitor_key)?; - let (_, current_monitor) = self.read_monitor(&monitor_name, &monitor_key).await?; - let latest_update_id = current_monitor.get_latest_update_id(); - self.cleanup_stale_updates_for_monitor_to(&monitor_key, latest_update_id).await?; + let maybe_monitor = self.maybe_read_monitor(&monitor_name, &monitor_key).await?; + if let Some((_, current_monitor)) = maybe_monitor { + let latest_update_id = current_monitor.get_latest_update_id(); + self.cleanup_stale_updates_for_monitor_to(&monitor_key, latest_update_id).await?; + } else { + // TODO: Also clean up super stale monitors (created pre-0.0.110 and last updated + // pre-0.0.119). + } } Ok(()) }