Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 44 additions & 13 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1059,11 +1059,21 @@ impl Readable for IrrevocablyResolvedHTLC {
/// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date
/// information and are actively monitoring the chain.
///
/// Note that the deserializer is only implemented for (BlockHash, ChannelMonitor), which
/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
/// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the
/// returned block hash and the the current chain and then reconnecting blocks to get to the
/// best chain) upon deserializing the object!
/// Like the [`ChannelManager`], deserialization is implemented for `(BlockHash, ChannelMonitor)`,
/// providing you with the last block hash which was connected before shutting down. You must begin
/// syncing the chain from that point, disconnecting and connecting blocks as required to get to
/// the best chain on startup. Note that all [`ChannelMonitor`]s passed to a [`ChainMonitor`] must
/// by synced as of the same block, so syncing must happen prior to [`ChainMonitor`]
/// initialization.
///
/// For those loading potentially-ancient [`ChannelMonitor`]s, deserialization is also implemented
/// for `Option<(BlockHash, ChannelMonitor)>`. LDK can no longer deserialize a [`ChannelMonitor`]
/// that was first created in LDK prior to 0.0.110 and last updated prior to LDK 0.0.119. In such
/// cases, the `Option<(..)>` deserialization option may return `Ok(None)` rather than failing to
/// deserialize, allowing you to differentiate between the two cases.
///
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
pub struct ChannelMonitor<Signer: EcdsaChannelSigner> {
pub(crate) inner: Mutex<ChannelMonitorImpl<Signer>>,
}
Expand Down Expand Up @@ -6251,6 +6261,18 @@ const MAX_ALLOC_SIZE: usize = 64 * 1024;

impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP)>
for (BlockHash, ChannelMonitor<SP::EcdsaSigner>)
{
fn read<R: io::Read>(reader: &mut R, args: (&'a ES, &'b SP)) -> Result<Self, DecodeError> {
match <Option<Self>>::read(reader, args) {
Ok(Some(res)) => Ok(res),
Ok(None) => Err(DecodeError::UnknownRequiredFeature),
Err(e) => Err(e),
}
}
}

impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP)>
for Option<(BlockHash, ChannelMonitor<SP::EcdsaSigner>)>
{
#[rustfmt::skip]
fn read<R: io::Read>(reader: &mut R, args: (&'a ES, &'b SP)) -> Result<Self, DecodeError> {
Expand Down Expand Up @@ -6528,11 +6550,6 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
}

let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint));
if counterparty_node_id.is_none() {
panic!("Found monitor for channel {} with no updates since v0.0.118.\
These monitors are no longer supported.\
To continue, run a v0.1 release, send/route a payment over the channel or close it.", channel_id);
}

let (current_holder_commitment_tx, current_holder_htlc_data) = {
let holder_commitment_tx = onchain_tx_handler.current_holder_commitment_tx();
Expand Down Expand Up @@ -6586,7 +6603,8 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
(None, None)
};

Ok((best_block.block_hash, ChannelMonitor::from_impl(ChannelMonitorImpl {
let dummy_node_id = PublicKey::from_slice(&[2; 33]).unwrap();
let monitor = ChannelMonitor::from_impl(ChannelMonitorImpl {
funding: FundingScope {
channel_parameters,

Expand Down Expand Up @@ -6646,7 +6664,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
spendable_txids_confirmed: spendable_txids_confirmed.unwrap(),

best_block,
counterparty_node_id: counterparty_node_id.unwrap(),
counterparty_node_id: counterparty_node_id.unwrap_or(dummy_node_id),
initial_counterparty_commitment_info,
initial_counterparty_commitment_tx,
balances_empty_height,
Expand All @@ -6658,7 +6676,20 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
alternative_funding_confirmed,

written_by_0_1_or_later,
})))
});

if counterparty_node_id.is_none() {
if (holder_tx_signed || lockdown_from_offchain) && monitor.get_claimable_balances().is_empty() {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we actually need to bother checking for (holder_tx_signed || lockdown_from_offchain)?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably not but it seemed like a nice additional check to include cause I'm not at all confident in the balance set for really old monitors.

// If the monitor is no longer readable, but it is a candidate for archiving,
// return Ok(None) to allow it to be skipped and not loaded.
return Ok(None);
} else {
panic!("Found monitor for channel {channel_id} with no updates since v0.0.118. \
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This mentions 0.0.118 but the commit mentions 0.0.116

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, that is from 601bf4b. I'll fix but looks like it was actually in 0.0.119

These monitors are no longer supported. \
To continue, run a v0.1 release, send/route a payment over the channel or close it.");
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hm, so what if the monitor is already closed and you still have balances to claim? Is there any way you can realistically start up? I guess this would be pretty unlikely, unless you have an outbound payment you still need to time out on chain.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, you can't without manually deleting the monitor from your store. It really sucks and I'm not really happy with the state of things but it seems a bit late to walk this back :/

}
}
Ok(Some((best_block.block_hash, monitor)))
}
}

Expand Down
63 changes: 48 additions & 15 deletions lightning/src/util/persist.rs
Original file line number Diff line number Diff line change
Expand Up @@ -372,15 +372,15 @@ where
CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
)? {
match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
match <Option<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>>::read(
&mut io::Cursor::new(kv_store.read(
CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
&stored_key,
)?),
(&*entropy_source, &*signer_provider),
) {
Ok((block_hash, channel_monitor)) => {
Ok(Some((block_hash, channel_monitor))) => {
let monitor_name = MonitorName::from_str(&stored_key)?;
if channel_monitor.persistence_key() != monitor_name {
return Err(io::Error::new(
Expand All @@ -391,6 +391,7 @@ where

res.push((block_hash, channel_monitor));
},
Ok(None) => {},
Err(_) => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
Expand Down Expand Up @@ -783,9 +784,12 @@ where
let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE;
let monitor_list = self.0.kv_store.list(primary, secondary).await?;
let mut res = Vec::with_capacity(monitor_list.len());
// TODO: Parallelize this loop
for monitor_key in monitor_list {
res.push(self.read_channel_monitor_with_updates(monitor_key.as_str()).await?)
let result =
self.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await?;
if let Some(read_res) = result {
res.push(read_res);
}
}
Ok(res)
}
Expand Down Expand Up @@ -923,8 +927,29 @@ where
&self, monitor_key: &str,
) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error>
{
match self.maybe_read_channel_monitor_with_updates(monitor_key).await? {
Some(res) => Ok(res),
None => Err(io::Error::new(
io::ErrorKind::InvalidData,
"ChannelMonitor was stale, with no updates since LDK 0.0.118. \
It cannot be read by modern versions of LDK, though also does not contain any funds left to sweep. \
You should manually delete it instead",
Comment on lines +934 to +936
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Might be worth including the monitor_key given this is called indirectly from read_all_channel_monitors_with_updates.

)),
}
}

async fn maybe_read_channel_monitor_with_updates(
&self, monitor_key: &str,
) -> Result<
Option<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>,
io::Error,
> {
let monitor_name = MonitorName::from_str(monitor_key)?;
let (block_hash, monitor) = self.read_monitor(&monitor_name, monitor_key).await?;
let read_res = self.maybe_read_monitor(&monitor_name, monitor_key).await?;
let (block_hash, monitor) = match read_res {
Some(res) => res,
None => return Ok(None),
};
let mut current_update_id = monitor.get_latest_update_id();
// TODO: Parallelize this loop by speculatively reading a batch of updates
loop {
Expand Down Expand Up @@ -955,14 +980,16 @@ where
io::Error::new(io::ErrorKind::Other, "Monitor update failed")
})?;
}
Ok((block_hash, monitor))
Ok(Some((block_hash, monitor)))
}

/// Read a channel monitor.
async fn read_monitor(
async fn maybe_read_monitor(
&self, monitor_name: &MonitorName, monitor_key: &str,
) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error>
{
) -> Result<
Option<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>,
io::Error,
> {
let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE;
let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE;
let monitor_bytes = self.kv_store.read(primary, secondary, monitor_key).await?;
Expand All @@ -971,11 +998,12 @@ where
if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
}
match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
match <Option<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>>::read(
&mut monitor_cursor,
(&*self.entropy_source, &*self.signer_provider),
) {
Ok((blockhash, channel_monitor)) => {
Ok(None) => Ok(None),
Ok(Some((blockhash, channel_monitor))) => {
if channel_monitor.persistence_key() != *monitor_name {
log_error!(
self.logger,
Expand All @@ -987,7 +1015,7 @@ where
"ChannelMonitor was stored under the wrong key",
))
} else {
Ok((blockhash, channel_monitor))
Ok(Some((blockhash, channel_monitor)))
}
},
Err(e) => {
Expand Down Expand Up @@ -1027,9 +1055,14 @@ where
let monitor_keys = self.kv_store.list(primary, secondary).await?;
for monitor_key in monitor_keys {
let monitor_name = MonitorName::from_str(&monitor_key)?;
let (_, current_monitor) = self.read_monitor(&monitor_name, &monitor_key).await?;
let latest_update_id = current_monitor.get_latest_update_id();
self.cleanup_stale_updates_for_monitor_to(&monitor_key, latest_update_id).await?;
let maybe_monitor = self.maybe_read_monitor(&monitor_name, &monitor_key).await?;
if let Some((_, current_monitor)) = maybe_monitor {
let latest_update_id = current_monitor.get_latest_update_id();
self.cleanup_stale_updates_for_monitor_to(&monitor_key, latest_update_id).await?;
} else {
// TODO: Also clean up super stale monitors (created pre-0.0.110 and last updated
// pre-0.0.119).
}
}
Ok(())
}
Expand Down