From a1d2a957ed01a2aa8c7fe172c8713230418438b2 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 24 Oct 2025 13:00:49 +0000 Subject: [PATCH 1/4] Be a bit more verbose in the `_actions_deferred` mon upd handler In 99df17e3654c37006aa2b630a4f8341669be845f we moved the "call style" argument out of `handle_new_monitor_update`, making each an individual macro instead. For the `REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER` call style, we renamed the macro `handle_new_monitor_update_actions_deferred`. That name doesn't really capture the requirements of that incredibly-awkward macro - namely that the actions need to be processed by the caller (rather than just being "deferred" and handled in some automated way later) and that the macro really should only be used when the callsite needs the peer state locks to remain locked, rather than being able to drop them to handle post-update actions. Here we rename it to the (mouthful) `handle_new_monitor_update_locked_actions_handled_by_caller`. Luckily its only used in two places. --- lightning/src/ln/channelmanager.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 22efceef69a..73d98b205b6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3288,8 +3288,9 @@ macro_rules! locked_close_channel { }}; ($self: ident, $peer_state: expr, $funded_chan: expr, $shutdown_res_mut: expr, FUNDED) => {{ if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() { - handle_new_monitor_update_actions_deferred!($self, funding_txo, update, $peer_state, - $funded_chan.context); + handle_new_monitor_update_locked_actions_handled_by_caller!( + $self, funding_txo, update, $peer_state, $funded_chan.context + ); } // If there's a possibility that we need to generate further monitor updates for this // channel, we need to store the last update_id of it. However, we don't want to insert @@ -3741,7 +3742,7 @@ macro_rules! handle_post_close_monitor_update { /// drop the aforementioned peer state locks at a given callsite. In this situation, use this macro /// to apply the monitor update immediately and handle the monitor update completion actions at a /// later time. -macro_rules! handle_new_monitor_update_actions_deferred { +macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller { ( $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr ) => {{ @@ -14360,7 +14361,7 @@ where insert_short_channel_id!(short_to_chan_info, funded_channel); if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update_actions_deferred!( + handle_new_monitor_update_locked_actions_handled_by_caller!( self, funding_txo, monitor_update, From e1e39557344fb1ffff369b5f57e50187fcbd4dfc Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 24 Oct 2025 13:18:28 +0000 Subject: [PATCH 2/4] Marginally simplify `handle_new_monitor_update_internal` It turns out the arguments to track the pushed update index and map weren't necessary, so are removed here. --- lightning/src/ln/channelmanager.rs | 58 +++++++++++------------------- 1 file changed, 21 insertions(+), 37 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 73d98b205b6..fa1df90fa9c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3703,8 +3703,6 @@ macro_rules! handle_post_close_monitor_update { ) => {{ let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None); - let in_flight_updates; - let idx; handle_new_monitor_update_internal!( $self, $funding_txo, @@ -3713,21 +3711,16 @@ macro_rules! handle_post_close_monitor_update { logger, $channel_id, $counterparty_node_id, - in_flight_updates, - idx, { - let _ = in_flight_updates.remove(idx); - if in_flight_updates.is_empty() { - let update_actions = $peer_state - .monitor_update_blocked_actions - .remove(&$channel_id) - .unwrap_or(Vec::new()); + let update_actions = $peer_state + .monitor_update_blocked_actions + .remove(&$channel_id) + .unwrap_or(Vec::new()); - mem::drop($peer_state_lock); - mem::drop($per_peer_state_lock); + mem::drop($peer_state_lock); + mem::drop($per_peer_state_lock); - $self.handle_monitor_update_completion_actions(update_actions); - } + $self.handle_monitor_update_completion_actions(update_actions); } ) }}; @@ -3749,8 +3742,6 @@ macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller { let logger = WithChannelContext::from(&$self.logger, &$chan_context, None); let chan_id = $chan_context.channel_id(); let counterparty_node_id = $chan_context.get_counterparty_node_id(); - let in_flight_updates; - let idx; handle_new_monitor_update_internal!( $self, $funding_txo, @@ -3759,11 +3750,7 @@ macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller { logger, chan_id, counterparty_node_id, - in_flight_updates, - idx, - { - let _ = in_flight_updates.remove(idx); - } + {} ) }}; } @@ -3771,10 +3758,9 @@ macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller { macro_rules! handle_new_monitor_update_internal { ( $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr, - $chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident, - $completed: expr + $chan_id: expr, $counterparty_node_id: expr, $all_completed: expr ) => {{ - $in_flight_updates = &mut $peer_state + let in_flight_updates = &mut $peer_state .in_flight_monitor_updates .entry($chan_id) .or_insert_with(|| ($funding_txo, Vec::new())) @@ -3782,17 +3768,20 @@ macro_rules! handle_new_monitor_update_internal { // During startup, we push monitor updates as background events through to here in // order to replay updates that were in-flight when we shut down. Thus, we have to // filter for uniqueness here. - $update_idx = - $in_flight_updates.iter().position(|upd| upd == &$update).unwrap_or_else(|| { - $in_flight_updates.push($update); - $in_flight_updates.len() - 1 + let update_idx = + in_flight_updates.iter().position(|upd| upd == &$update).unwrap_or_else(|| { + in_flight_updates.push($update); + in_flight_updates.len() - 1 }); if $self.background_events_processed_since_startup.load(Ordering::Acquire) { let update_res = - $self.chain_monitor.update_channel($chan_id, &$in_flight_updates[$update_idx]); + $self.chain_monitor.update_channel($chan_id, &in_flight_updates[update_idx]); let update_completed = handle_monitor_update_res($self, update_res, $chan_id, $logger); if update_completed { - $completed; + let _ = in_flight_updates.remove(update_idx); + if in_flight_updates.is_empty() { + $all_completed; + } } update_completed } else { @@ -3803,7 +3792,7 @@ macro_rules! handle_new_monitor_update_internal { counterparty_node_id: $counterparty_node_id, funding_txo: $funding_txo, channel_id: $chan_id, - update: $in_flight_updates[$update_idx].clone(), + update: in_flight_updates[update_idx].clone(), }; // We want to track the in-flight update both in `in_flight_monitor_updates` and in // `pending_background_events` to avoid a race condition during @@ -3827,8 +3816,6 @@ macro_rules! handle_new_monitor_update { let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); let chan_id = $chan.context.channel_id(); let counterparty_node_id = $chan.context.get_counterparty_node_id(); - let in_flight_updates; - let idx; handle_new_monitor_update_internal!( $self, $funding_txo, @@ -3837,11 +3824,8 @@ macro_rules! handle_new_monitor_update { logger, chan_id, counterparty_node_id, - in_flight_updates, - idx, { - let _ = in_flight_updates.remove(idx); - if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 { + if $chan.blocked_monitor_updates_pending() == 0 { handle_monitor_update_completion!( $self, $peer_state_lock, From f3478a01532981b3cf555bd1b938fea540d087d6 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 24 Oct 2025 13:59:18 +0000 Subject: [PATCH 3/4] Move `handle_new_monitor_update_internal` above macros that use it --- lightning/src/ln/channelmanager.rs | 106 ++++++++++++++--------------- 1 file changed, 53 insertions(+), 53 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index fa1df90fa9c..67d3c3c36cc 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3696,6 +3696,59 @@ macro_rules! handle_initial_monitor { }; } +macro_rules! handle_new_monitor_update_internal { + ( + $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr, + $chan_id: expr, $counterparty_node_id: expr, $all_completed: expr + ) => {{ + let in_flight_updates = &mut $peer_state + .in_flight_monitor_updates + .entry($chan_id) + .or_insert_with(|| ($funding_txo, Vec::new())) + .1; + // During startup, we push monitor updates as background events through to here in + // order to replay updates that were in-flight when we shut down. Thus, we have to + // filter for uniqueness here. + let update_idx = + in_flight_updates.iter().position(|upd| upd == &$update).unwrap_or_else(|| { + in_flight_updates.push($update); + in_flight_updates.len() - 1 + }); + if $self.background_events_processed_since_startup.load(Ordering::Acquire) { + let update_res = + $self.chain_monitor.update_channel($chan_id, &in_flight_updates[update_idx]); + let update_completed = handle_monitor_update_res($self, update_res, $chan_id, $logger); + if update_completed { + let _ = in_flight_updates.remove(update_idx); + if in_flight_updates.is_empty() { + $all_completed; + } + } + update_completed + } else { + // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we + // fail to persist it. This is a fairly safe assumption, however, since anything we do + // during the startup sequence should be replayed exactly if we immediately crash. + let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: $counterparty_node_id, + funding_txo: $funding_txo, + channel_id: $chan_id, + update: in_flight_updates[update_idx].clone(), + }; + // We want to track the in-flight update both in `in_flight_monitor_updates` and in + // `pending_background_events` to avoid a race condition during + // `pending_background_events` processing where we complete one + // `ChannelMonitorUpdate` (but there are more pending as background events) but we + // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to + // run post-completion actions. + // We could work around that with some effort, but its simpler to just track updates + // twice. + $self.pending_background_events.lock().unwrap().push(event); + false + } + }}; +} + macro_rules! handle_post_close_monitor_update { ( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, @@ -3755,59 +3808,6 @@ macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller { }}; } -macro_rules! handle_new_monitor_update_internal { - ( - $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr, - $chan_id: expr, $counterparty_node_id: expr, $all_completed: expr - ) => {{ - let in_flight_updates = &mut $peer_state - .in_flight_monitor_updates - .entry($chan_id) - .or_insert_with(|| ($funding_txo, Vec::new())) - .1; - // During startup, we push monitor updates as background events through to here in - // order to replay updates that were in-flight when we shut down. Thus, we have to - // filter for uniqueness here. - let update_idx = - in_flight_updates.iter().position(|upd| upd == &$update).unwrap_or_else(|| { - in_flight_updates.push($update); - in_flight_updates.len() - 1 - }); - if $self.background_events_processed_since_startup.load(Ordering::Acquire) { - let update_res = - $self.chain_monitor.update_channel($chan_id, &in_flight_updates[update_idx]); - let update_completed = handle_monitor_update_res($self, update_res, $chan_id, $logger); - if update_completed { - let _ = in_flight_updates.remove(update_idx); - if in_flight_updates.is_empty() { - $all_completed; - } - } - update_completed - } else { - // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we - // fail to persist it. This is a fairly safe assumption, however, since anything we do - // during the startup sequence should be replayed exactly if we immediately crash. - let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: $counterparty_node_id, - funding_txo: $funding_txo, - channel_id: $chan_id, - update: in_flight_updates[update_idx].clone(), - }; - // We want to track the in-flight update both in `in_flight_monitor_updates` and in - // `pending_background_events` to avoid a race condition during - // `pending_background_events` processing where we complete one - // `ChannelMonitorUpdate` (but there are more pending as background events) but we - // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to - // run post-completion actions. - // We could work around that with some effort, but its simpler to just track updates - // twice. - $self.pending_background_events.lock().unwrap().push(event); - false - } - }}; -} - macro_rules! handle_new_monitor_update { ( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, From faf72385c523b1009d5ab63cd06ea6bec1231682 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 24 Oct 2025 14:14:17 +0000 Subject: [PATCH 4/4] Convert of `handle_new_monitor_update_internal` to a function Due to lifetime limitations, our `ChanelMonitorUpdate` handling logic mostly lives in macros. The bulk of the code, though, can easily be moved into an fn, which we do here, reducing the expanded size of the `lightning` crate from 256,061 lines to 253,536 lines. --- lightning/src/ln/channelmanager.rs | 185 ++++++++++++++--------------- 1 file changed, 87 insertions(+), 98 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 67d3c3c36cc..f4fff82dc23 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3696,57 +3696,54 @@ macro_rules! handle_initial_monitor { }; } -macro_rules! handle_new_monitor_update_internal { - ( - $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr, - $chan_id: expr, $counterparty_node_id: expr, $all_completed: expr - ) => {{ - let in_flight_updates = &mut $peer_state - .in_flight_monitor_updates - .entry($chan_id) - .or_insert_with(|| ($funding_txo, Vec::new())) - .1; - // During startup, we push monitor updates as background events through to here in - // order to replay updates that were in-flight when we shut down. Thus, we have to - // filter for uniqueness here. - let update_idx = - in_flight_updates.iter().position(|upd| upd == &$update).unwrap_or_else(|| { - in_flight_updates.push($update); - in_flight_updates.len() - 1 - }); - if $self.background_events_processed_since_startup.load(Ordering::Acquire) { - let update_res = - $self.chain_monitor.update_channel($chan_id, &in_flight_updates[update_idx]); - let update_completed = handle_monitor_update_res($self, update_res, $chan_id, $logger); - if update_completed { - let _ = in_flight_updates.remove(update_idx); - if in_flight_updates.is_empty() { - $all_completed; - } - } - update_completed - } else { - // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we - // fail to persist it. This is a fairly safe assumption, however, since anything we do - // during the startup sequence should be replayed exactly if we immediately crash. - let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: $counterparty_node_id, - funding_txo: $funding_txo, - channel_id: $chan_id, - update: in_flight_updates[update_idx].clone(), - }; - // We want to track the in-flight update both in `in_flight_monitor_updates` and in - // `pending_background_events` to avoid a race condition during - // `pending_background_events` processing where we complete one - // `ChannelMonitorUpdate` (but there are more pending as background events) but we - // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to - // run post-completion actions. - // We could work around that with some effort, but its simpler to just track updates - // twice. - $self.pending_background_events.lock().unwrap().push(event); - false - } - }}; +fn handle_new_monitor_update_internal( + cm: &CM, + in_flight_monitor_updates: &mut BTreeMap)>, + channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey, + new_update: ChannelMonitorUpdate, logger: LG, +) -> (bool, bool) { + let in_flight_updates = &mut in_flight_monitor_updates + .entry(channel_id) + .or_insert_with(|| (funding_txo, Vec::new())) + .1; + // During startup, we push monitor updates as background events through to here in + // order to replay updates that were in-flight when we shut down. Thus, we have to + // filter for uniqueness here. + let update_idx = + in_flight_updates.iter().position(|upd| upd == &new_update).unwrap_or_else(|| { + in_flight_updates.push(new_update); + in_flight_updates.len() - 1 + }); + + if cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire) { + let update_res = + cm.get_cm().chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]); + let update_completed = handle_monitor_update_res(cm, update_res, channel_id, logger); + if update_completed { + let _ = in_flight_updates.remove(update_idx); + } + (update_completed, update_completed && in_flight_updates.is_empty()) + } else { + // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we + // fail to persist it. This is a fairly safe assumption, however, since anything we do + // during the startup sequence should be replayed exactly if we immediately crash. + let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo, + channel_id, + update: in_flight_updates[update_idx].clone(), + }; + // We want to track the in-flight update both in `in_flight_monitor_updates` and in + // `pending_background_events` to avoid a race condition during + // `pending_background_events` processing where we complete one + // `ChannelMonitorUpdate` (but there are more pending as background events) but we + // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to + // run post-completion actions. + // We could work around that with some effort, but its simpler to just track updates + // twice. + cm.get_cm().pending_background_events.lock().unwrap().push(event); + (false, false) + } } macro_rules! handle_post_close_monitor_update { @@ -3754,28 +3751,27 @@ macro_rules! handle_post_close_monitor_update { $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr ) => {{ - let logger = - WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None); - handle_new_monitor_update_internal!( + let (update_completed, all_updates_complete) = handle_new_monitor_update_internal( $self, - $funding_txo, - $update, - $peer_state, - logger, + &mut $peer_state.in_flight_monitor_updates, $channel_id, + $funding_txo, $counterparty_node_id, - { - let update_actions = $peer_state - .monitor_update_blocked_actions - .remove(&$channel_id) - .unwrap_or(Vec::new()); + $update, + WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None), + ); + if all_updates_complete { + let update_actions = $peer_state + .monitor_update_blocked_actions + .remove(&$channel_id) + .unwrap_or(Vec::new()); - mem::drop($peer_state_lock); - mem::drop($per_peer_state_lock); + mem::drop($peer_state_lock); + mem::drop($per_peer_state_lock); - $self.handle_monitor_update_completion_actions(update_actions); - } - ) + $self.handle_monitor_update_completion_actions(update_actions); + } + update_completed }}; } @@ -3792,19 +3788,16 @@ macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller { ( $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr ) => {{ - let logger = WithChannelContext::from(&$self.logger, &$chan_context, None); - let chan_id = $chan_context.channel_id(); - let counterparty_node_id = $chan_context.get_counterparty_node_id(); - handle_new_monitor_update_internal!( + let (update_completed, _all_updates_complete) = handle_new_monitor_update_internal( $self, + &mut $peer_state.in_flight_monitor_updates, + $chan_context.channel_id(), $funding_txo, + $chan_context.get_counterparty_node_id(), $update, - $peer_state, - logger, - chan_id, - counterparty_node_id, - {} - ) + WithChannelContext::from(&$self.logger, &$chan_context, None), + ); + update_completed }}; } @@ -3813,29 +3806,25 @@ macro_rules! handle_new_monitor_update { $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr ) => {{ - let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - let chan_id = $chan.context.channel_id(); - let counterparty_node_id = $chan.context.get_counterparty_node_id(); - handle_new_monitor_update_internal!( + let (update_completed, all_updates_complete) = handle_new_monitor_update_internal( $self, + &mut $peer_state.in_flight_monitor_updates, + $chan.context.channel_id(), $funding_txo, + $chan.context.get_counterparty_node_id(), $update, - $peer_state, - logger, - chan_id, - counterparty_node_id, - { - if $chan.blocked_monitor_updates_pending() == 0 { - handle_monitor_update_completion!( - $self, - $peer_state_lock, - $peer_state, - $per_peer_state_lock, - $chan - ); - } - } - ) + WithChannelContext::from(&$self.logger, &$chan.context, None), + ); + if all_updates_complete && $chan.blocked_monitor_updates_pending() == 0 { + handle_monitor_update_completion!( + $self, + $peer_state_lock, + $peer_state, + $per_peer_state_lock, + $chan + ); + } + update_completed }}; }