From f59557f827b3ce154ee1b52d615bfda577194b93 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 7 Oct 2025 15:52:39 +0000 Subject: [PATCH] Correctly track reloaded update_id in `chanmon_consistency` fuzzer In the `chanmon_consistency` fuzzer, when reloading a node, we take a pending monitor update (or the latest persisted one) and put it in `persisted_monitor` as it is implicitly the latest persisted monitor on restart. However, we failed to update `persisted_monitor_id`. As a result, we may restart and write the loaded monitor to `persisted_monitor` (eg at ID 2) but have a later `persisted_monitor_id` (eg ID 3). Then, when we complete the monitor update for the `persisted_monitor_id` (here, 3) we will think that its not a new update and neglect to update `persisted_monitor`/`persisted_monitor_id`. As a result, later updates (e.g. ID 4) will fail as we're trying to apply them to the original persisted monitor (at ID 2). The fix is simply to ensure `persisted_monitor_id` is always updated in lock-step with `persisted_monitor` on reload. --- fuzz/src/chanmon_consistency.rs | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 40a840eb164..8d3145ff071 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -171,7 +171,11 @@ impl Writer for VecWriter { /// Note that such "being persisted" `ChannelMonitor`s are stored in `ChannelManager` and will /// simply be replayed on startup. struct LatestMonitorState { - /// The latest monitor id which we told LDK we've persisted + /// The latest monitor id which we told LDK we've persisted. + /// + /// Note that there may still be earlier pending monitor updates in [`Self::pending_monitors`] + /// which we haven't yet completed. We're allowed to reload with those as well, at least until + /// they're completed. persisted_monitor_id: u64, /// The latest serialized `ChannelMonitor` that we told LDK we persisted. persisted_monitor: Vec, @@ -726,18 +730,18 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut monitors = new_hash_map(); let mut old_monitors = old_monitors.latest_monitors.lock().unwrap(); for (channel_id, mut prev_state) in old_monitors.drain() { - let serialized_mon = if use_old_mons % 3 == 0 { + let (mon_id, serialized_mon) = if use_old_mons % 3 == 0 { // Reload with the oldest `ChannelMonitor` (the one that we already told // `ChannelManager` we finished persisting). - prev_state.persisted_monitor + (prev_state.persisted_monitor_id, prev_state.persisted_monitor) } else if use_old_mons % 3 == 1 { // Reload with the second-oldest `ChannelMonitor` - let old_mon = prev_state.persisted_monitor; - prev_state.pending_monitors.drain(..).next().map(|(_, v)| v).unwrap_or(old_mon) + let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); + prev_state.pending_monitors.drain(..).next().unwrap_or(old_mon) } else { // Reload with the newest `ChannelMonitor` - let old_mon = prev_state.persisted_monitor; - prev_state.pending_monitors.pop().map(|(_, v)| v).unwrap_or(old_mon) + let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); + prev_state.pending_monitors.pop().unwrap_or(old_mon) }; // Use a different value of `use_old_mons` if we have another monitor (only for node B) // by shifting `use_old_mons` one in base-3. @@ -750,6 +754,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { monitors.insert(channel_id, mon.1); // Update the latest `ChannelMonitor` state to match what we just told LDK. prev_state.persisted_monitor = serialized_mon; + prev_state.persisted_monitor_id = mon_id; // Wipe any `ChannelMonitor`s which we never told LDK we finished persisting, // considering them discarded. LDK should replay these for us as they're stored in // the `ChannelManager`.