From eb5492f5cfb7dbfb6dcefbbf4250ae54f38ec38f Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 20 Nov 2025 00:44:22 +0000 Subject: [PATCH] Handle mon update completion actions even with update(s) is blocked If we complete a `ChannelMonitorUpdate` persistence but there are blocked `ChannelMonitorUpdate`s in the channel, we'll skip all the post-monitor-update logic entirely. While its correct that we can't resume the channel (as it expected the monitor updates it generated to complete, even if they ended up blocked), the post-update actions are a `channelmanager.rs` concept - they cannot be tied to blocked updates because `channelmanager.rs` doesn't even see blocked updates. This can lead to a channel getting stuck waiting on itself. In a production environment, an LDK user saw a case where: (a) an MPP payment was received over several channels, let's call them A + B. (b) channel B got into `AwaitingRAA` due to unrelated operations, (c) the MPP payment was claimed, with async monitor updating, (d) the `revoke_and_ack` we were waiting on was delivered, but the resulting `ChannelMonitorUpdate` was blocked due to the pending claim having inserted an RAA-blocking action, (e) the preimage `ChannelMonitorUpdate` generated for channel B completed persistence, which did nothing due to the blocked `ChannelMonitorUpdate`. (f) the `Event::PaymentClaimed` event was handled but it, too, failed to unblock the channel. Instead, here, we simply process post-update actions when an update completes, even if there are pending blocked updates. We do not fully unblock the channel, of course. --- lightning/src/ln/chanmon_update_fail_tests.rs | 117 ++++++++++++- lightning/src/ln/channelmanager.rs | 154 ++++++++++-------- lightning/src/ln/functional_test_utils.rs | 135 ++++++++------- 3 files changed, 271 insertions(+), 135 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 891451075e4..e79e8becc66 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -19,12 +19,13 @@ use crate::chain::transaction::OutPoint; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentPurpose}; use crate::ln::channel::AnnouncementSigsState; -use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields, Retry}; use crate::ln::msgs; use crate::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, MessageSendEvent, RoutingMessageHandler, }; use crate::ln::types::ChannelId; +use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::sign::NodeSigner; use crate::util::native_async::FutureQueue; use crate::util::persist::{ @@ -3535,7 +3536,7 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode assert!(a.is_none()); nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); - check_added_monitors(&nodes[1], 0); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); } @@ -3554,8 +3555,8 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode panic!(); } - // The event processing should release the last RAA updates on both channels. - check_added_monitors(&nodes[1], 2); + // The event processing should release the last RAA update. + check_added_monitors(&nodes[1], 1); // When we fetch the next update the message getter will generate the next update for nodes[2], // generating a further monitor update. @@ -5055,3 +5056,111 @@ fn native_async_persist() { panic!(); } } + +#[test] +fn test_mpp_claim_to_holding_cell() { + // Previously, if an MPP payment was claimed while one channel was AwaitingRAA (causing the + // HTLC claim to go into the holding cell), and the RAA came in before the async monitor + // update with the preimage completed, the channel could hang waiting on itself. + // This tests that behavior. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + + // First open channels in a diamond and deliver the MPP payment. + let chan_1_scid = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; + let chan_2_scid = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; + let (chan_3_update, _, chan_3_id, ..) = create_announced_chan_between_nodes(&nodes, 1, 3); + let chan_3_scid = chan_3_update.contents.short_channel_id; + let (chan_4_update, _, chan_4_id, ..) = create_announced_chan_between_nodes(&nodes, 2, 3); + let chan_4_scid = chan_4_update.contents.short_channel_id; + + let (mut route, paymnt_hash_1, preimage_1, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[3], 500_000); + let path = route.paths[0].clone(); + route.paths.push(path); + route.paths[0].hops[0].pubkey = node_b_id; + route.paths[0].hops[0].short_channel_id = chan_1_scid; + route.paths[0].hops[1].short_channel_id = chan_3_scid; + route.paths[0].hops[1].fee_msat = 250_000; + route.paths[1].hops[0].pubkey = node_c_id; + route.paths[1].hops[0].short_channel_id = chan_2_scid; + route.paths[1].hops[1].short_channel_id = chan_4_scid; + route.paths[1].hops[1].fee_msat = 250_000; + let paths = &[&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]][..]]; + send_along_route_with_secret(&nodes[0], route, paths, 500_000, paymnt_hash_1, payment_secret); + + // Put the C <-> D channel into AwaitingRaa + let (preimage_2, paymnt_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[3]); + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId([42; 32]); + let pay_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV); + let route_params = RouteParameters::from_payment_params_and_value(pay_params, 400_000); + nodes[2].node.send_payment(paymnt_hash_2, onion, id, route_params, Retry::Attempts(0)).unwrap(); + check_added_monitors(&nodes[2], 1); + + let mut payment_event = SendEvent::from_node(&nodes[2]); + nodes[3].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); + nodes[3].node.handle_commitment_signed_batch_test(node_c_id, &payment_event.commitment_msg); + check_added_monitors(&nodes[3], 1); + + let (raa, cs) = get_revoke_commit_msgs(&nodes[3], &node_c_id); + nodes[2].node.handle_revoke_and_ack(node_d_id, &raa); + check_added_monitors(&nodes[2], 1); + + nodes[2].node.handle_commitment_signed_batch_test(node_d_id, &cs); + check_added_monitors(&nodes[2], 1); + + let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_d_id); + + // Now claim the payment, completing both channel monitor updates async + // In the current code, the C <-> D channel happens to be the `durable_preimage_channel`, + // improving coverage somewhat but it isn't strictly critical to the test. + chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + nodes[3].node.claim_funds(preimage_1); + check_added_monitors(&nodes[3], 2); + + // Complete the B <-> D monitor update, freeing the first fulfill. + let (latest_id, _) = get_latest_mon_update_id(&nodes[3], chan_3_id); + nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(chan_3_id, latest_id).unwrap(); + let mut b_claim = get_htlc_update_msgs(&nodes[3], &node_b_id); + + // When we deliver the pre-claim RAA, node D will shove the monitor update into the blocked + // state since we have a pending MPP payment which is blocking RAA monitor updates. + nodes[3].node.handle_revoke_and_ack(node_c_id, &cs_raa); + check_added_monitors(&nodes[3], 0); + + // Finally, complete the C <-> D monitor update. Previously, this unlock failed to be processed + // due to the existence of the blocked RAA update above. + let (latest_id, _) = get_latest_mon_update_id(&nodes[3], chan_4_id); + nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(chan_4_id, latest_id).unwrap(); + // Once we process monitor events (in this case by checking for the `PaymentClaimed` event, the + // RAA monitor update blocked above will be released. + expect_payment_claimed!(nodes[3], paymnt_hash_1, 500_000); + check_added_monitors(&nodes[3], 1); + + // After the RAA monitor update completes, the C <-> D channel will be able to generate its + // fulfill updates as well. + let mut c_claim = get_htlc_update_msgs(&nodes[3], &node_c_id); + check_added_monitors(&nodes[3], 1); + + // Finally, clear all the pending payments. + let path = [&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]][..]]; + let mut args = ClaimAlongRouteArgs::new(&nodes[0], &path[..], preimage_1); + let b_claim_msgs = (b_claim.update_fulfill_htlcs.pop().unwrap(), b_claim.commitment_signed); + let c_claim_msgs = (c_claim.update_fulfill_htlcs.pop().unwrap(), c_claim.commitment_signed); + let claims = vec![(b_claim_msgs, node_b_id), (c_claim_msgs, node_c_id)]; + pass_claimed_payment_along_route_from_ev(250_000, claims, args); + + expect_payment_sent(&nodes[0], preimage_1, None, true, true); + + expect_and_process_pending_htlcs(&nodes[3], false); + expect_payment_claimable!(nodes[3], paymnt_hash_2, payment_secret_2, 400_000); + claim_payment(&nodes[2], &[&nodes[3]], preimage_2); +} diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0a13d2312b2..cd063bbe33e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1296,6 +1296,10 @@ impl MaybeReadable for EventUnblockedChannel { } #[derive(Debug)] +/// Note that these run after all *non-blocked* [`ChannelMonitorUpdate`]s have been persisted. +/// Thus, they're primarily useful for (and currently only used for) claims, where the +/// [`ChannelMonitorUpdate`] we care about is a preimage update, which bypass the monitor update +/// blocking logic entirely and can never be blocked. pub(crate) enum MonitorUpdateCompletionAction { /// Indicates that a payment ultimately destined for us was claimed and we should emit an /// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for @@ -1580,6 +1584,11 @@ where /// same `temporary_channel_id` (or final `channel_id` in the case of 0conf channels or prior /// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure /// duplicates do not occur, so such channels should fail without a monitor update completing. + /// + /// Note that these run after all *non-blocked* [`ChannelMonitorUpdate`]s have been persisted. + /// Thus, they're primarily useful for (and currently only used for) claims, where the + /// [`ChannelMonitorUpdate`] we care about is a preimage update, which bypass the monitor + /// update blocking logic entirely and can never be blocked. monitor_update_blocked_actions: BTreeMap>, /// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have /// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update @@ -3353,78 +3362,90 @@ macro_rules! handle_monitor_update_completion { let chan_id = $chan.context.channel_id(); let outbound_alias = $chan.context().outbound_scid_alias(); let cp_node_id = $chan.context.get_counterparty_node_id(); + #[cfg(debug_assertions)] { let in_flight_updates = $peer_state.in_flight_monitor_updates.get(&chan_id); assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true)); - assert_eq!($chan.blocked_monitor_updates_pending(), 0); + assert!($chan.is_awaiting_monitor_update()); } + let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - let updates = $chan.monitor_updating_restored( - &&logger, - &$self.node_signer, - $self.chain_hash, - &*$self.config.read().unwrap(), - $self.best_block.read().unwrap().height, - |htlc_id| { - $self.path_for_release_held_htlc(htlc_id, outbound_alias, &chan_id, &cp_node_id) - }, - ); - let channel_update = if updates.channel_ready.is_some() - && $chan.context.is_usable() - && $peer_state.is_connected - { - // We only send a channel_update in the case where we are just now sending a - // channel_ready and the channel is in a usable state. We may re-send a - // channel_update later through the announcement_signatures process for public - // channels, but there's no reason not to just inform our counterparty of our fees - // now. - if let Ok((msg, _, _)) = $self.get_channel_update_for_unicast($chan) { - Some(MessageSendEvent::SendChannelUpdate { node_id: cp_node_id, msg }) - } else { - None - } - } else { - None - }; let update_actions = $peer_state.monitor_update_blocked_actions.remove(&chan_id).unwrap_or(Vec::new()); - let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( - &mut $peer_state.pending_msg_events, - $chan, - updates.raa, - updates.commitment_update, - updates.commitment_order, - updates.accepted_htlcs, - updates.pending_update_adds, - updates.funding_broadcastable, - updates.channel_ready, - updates.announcement_sigs, - updates.tx_signatures, - None, - updates.channel_ready_order, - ); - if let Some(upd) = channel_update { - $peer_state.pending_msg_events.push(upd); - } - - let unbroadcasted_batch_funding_txid = - $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding); - core::mem::drop($peer_state_lock); - core::mem::drop($per_peer_state_lock); - - $self.post_monitor_update_unlock( - chan_id, - cp_node_id, - unbroadcasted_batch_funding_txid, - update_actions, - htlc_forwards, - decode_update_add_htlcs, - updates.finalized_claimed_htlcs, - updates.failed_htlcs, - ); + if $chan.blocked_monitor_updates_pending() != 0 { + mem::drop($peer_state_lock); + mem::drop($per_peer_state_lock); + + log_debug!(logger, "Channel has blocked monitor updates, completing update actions but leaving channel blocked"); + $self.handle_monitor_update_completion_actions(update_actions); + } else { + log_debug!(logger, "Channel is open and awaiting update, resuming it"); + let updates = $chan.monitor_updating_restored( + &&logger, + &$self.node_signer, + $self.chain_hash, + &*$self.config.read().unwrap(), + $self.best_block.read().unwrap().height, + |htlc_id| { + $self.path_for_release_held_htlc(htlc_id, outbound_alias, &chan_id, &cp_node_id) + }, + ); + let channel_update = if updates.channel_ready.is_some() + && $chan.context.is_usable() + && $peer_state.is_connected + { + // We only send a channel_update in the case where we are just now sending a + // channel_ready and the channel is in a usable state. We may re-send a + // channel_update later through the announcement_signatures process for public + // channels, but there's no reason not to just inform our counterparty of our fees + // now. + if let Ok((msg, _, _)) = $self.get_channel_update_for_unicast($chan) { + Some(MessageSendEvent::SendChannelUpdate { node_id: cp_node_id, msg }) + } else { + None + } + } else { + None + }; + + let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( + &mut $peer_state.pending_msg_events, + $chan, + updates.raa, + updates.commitment_update, + updates.commitment_order, + updates.accepted_htlcs, + updates.pending_update_adds, + updates.funding_broadcastable, + updates.channel_ready, + updates.announcement_sigs, + updates.tx_signatures, + None, + updates.channel_ready_order, + ); + if let Some(upd) = channel_update { + $peer_state.pending_msg_events.push(upd); + } + + let unbroadcasted_batch_funding_txid = + $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding); + core::mem::drop($peer_state_lock); + core::mem::drop($per_peer_state_lock); + + $self.post_monitor_update_unlock( + chan_id, + cp_node_id, + unbroadcasted_batch_funding_txid, + update_actions, + htlc_forwards, + decode_update_add_htlcs, + updates.finalized_claimed_htlcs, + updates.failed_htlcs, + ); + } }}; } @@ -3595,7 +3616,7 @@ macro_rules! handle_new_monitor_update { $update, WithChannelContext::from(&$self.logger, &$chan.context, None), ); - if all_updates_complete && $chan.blocked_monitor_updates_pending() == 0 { + if all_updates_complete { handle_monitor_update_completion!( $self, $peer_state_lock, @@ -9813,12 +9834,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ .and_then(Channel::as_funded_mut) { if chan.is_awaiting_monitor_update() { - if chan.blocked_monitor_updates_pending() == 0 { - log_trace!(logger, "Channel is open and awaiting update, resuming it"); - handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); - } else { - log_trace!(logger, "Channel is open and awaiting update, leaving it blocked due to a blocked monitor update"); - } + handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); } else { log_trace!(logger, "Channel is open but not awaiting update"); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e31630a4926..d0cef55afa4 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1099,7 +1099,7 @@ pub fn get_htlc_update_msgs(node: &Node, recipient: &PublicKey) -> msgs::Commitm assert_eq!(node_id, recipient); (*updates).clone() }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event {events:?}"), } } @@ -2970,9 +2970,9 @@ pub fn expect_payment_sent>( bitcoin::hashes::sha256::Hash::hash(&expected_payment_preimage.0).to_byte_array(), ); if expect_per_path_claims { - assert!(events.len() > 1); + assert!(events.len() > 1, "{events:?}"); } else { - assert_eq!(events.len(), 1); + assert_eq!(events.len(), 1, "{events:?}"); } if expect_post_ev_mon_update { check_added_monitors(node, 1); @@ -3807,19 +3807,38 @@ impl<'a, 'b, 'c, 'd> ClaimAlongRouteArgs<'a, 'b, 'c, 'd> { } } +macro_rules! single_fulfill_commit_from_ev { + ($ev: expr) => { + match $ev { + &MessageSendEvent::UpdateHTLCs { + ref node_id, + ref channel_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + ref commitment_signed, + }, + } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + assert!(commitment_signed.iter().all(|cs| cs.channel_id == *channel_id)); + ((update_fulfill_htlcs[0].clone(), commitment_signed.clone()), node_id.clone()) + }, + _ => panic!("Unexpected event"), + } + }; +} + pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { - let ClaimAlongRouteArgs { - origin_node, - expected_paths, - expected_extra_fees, - expected_min_htlc_overpay, - skip_last, - payment_preimage: our_payment_preimage, - allow_1_msat_fee_overpay, - custom_tlvs, - } = args; - let claim_event = expected_paths[0].last().unwrap().node.get_and_clear_pending_events(); - assert_eq!(claim_event.len(), 1); + let claim_event = args.expected_paths[0].last().unwrap().node.get_and_clear_pending_events(); + assert_eq!(claim_event.len(), 1, "{claim_event:?}"); #[allow(unused)] let mut fwd_amt_msat = 0; match claim_event[0] { @@ -3834,11 +3853,11 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { ref onion_fields, .. } => { - assert_eq!(preimage, our_payment_preimage); - assert_eq!(htlcs.len(), expected_paths.len()); // One per path. + assert_eq!(preimage, args.payment_preimage); + assert_eq!(htlcs.len(), args.expected_paths.len()); // One per path. assert_eq!(htlcs.iter().map(|h| h.value_msat).sum::(), amount_msat); - assert_eq!(onion_fields.as_ref().unwrap().custom_tlvs, custom_tlvs); - check_claimed_htlcs_match_route(origin_node, expected_paths, htlcs); + assert_eq!(onion_fields.as_ref().unwrap().custom_tlvs, args.custom_tlvs); + check_claimed_htlcs_match_route(args.origin_node, args.expected_paths, htlcs); fwd_amt_msat = amount_msat; }, Event::PaymentClaimed { @@ -3852,59 +3871,29 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { ref onion_fields, .. } => { - assert_eq!(&payment_hash.0, &Sha256::hash(&our_payment_preimage.0)[..]); - assert_eq!(htlcs.len(), expected_paths.len()); // One per path. + assert_eq!(&payment_hash.0, &Sha256::hash(&args.payment_preimage.0)[..]); + assert_eq!(htlcs.len(), args.expected_paths.len()); // One per path. assert_eq!(htlcs.iter().map(|h| h.value_msat).sum::(), amount_msat); - assert_eq!(onion_fields.as_ref().unwrap().custom_tlvs, custom_tlvs); - check_claimed_htlcs_match_route(origin_node, expected_paths, htlcs); + assert_eq!(onion_fields.as_ref().unwrap().custom_tlvs, args.custom_tlvs); + check_claimed_htlcs_match_route(args.origin_node, args.expected_paths, htlcs); fwd_amt_msat = amount_msat; }, _ => panic!(), } - check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len()); - - let mut expected_total_fee_msat = 0; + check_added_monitors(args.expected_paths[0].last().unwrap(), args.expected_paths.len()); - macro_rules! msgs_from_ev { - ($ev: expr) => { - match $ev { - &MessageSendEvent::UpdateHTLCs { - ref node_id, - ref channel_id, - updates: - msgs::CommitmentUpdate { - ref update_add_htlcs, - ref update_fulfill_htlcs, - ref update_fail_htlcs, - ref update_fail_malformed_htlcs, - ref update_fee, - ref commitment_signed, - }, - } => { - assert!(update_add_htlcs.is_empty()); - assert_eq!(update_fulfill_htlcs.len(), 1); - assert!(update_fail_htlcs.is_empty()); - assert!(update_fail_malformed_htlcs.is_empty()); - assert!(update_fee.is_none()); - assert!(commitment_signed.iter().all(|cs| cs.channel_id == *channel_id)); - ((update_fulfill_htlcs[0].clone(), commitment_signed.clone()), node_id.clone()) - }, - _ => panic!("Unexpected event"), - } - }; - } let mut per_path_msgs: Vec<( (msgs::UpdateFulfillHTLC, Vec), PublicKey, - )> = Vec::with_capacity(expected_paths.len()); - let mut events = expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), expected_paths.len()); + )> = Vec::with_capacity(args.expected_paths.len()); + let mut events = args.expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), args.expected_paths.len()); if events.len() == 1 { - per_path_msgs.push(msgs_from_ev!(&events[0])); + per_path_msgs.push(single_fulfill_commit_from_ev!(&events[0])); } else { - for expected_path in expected_paths.iter() { + for expected_path in args.expected_paths.iter() { // For MPP payments, we want the fulfill message from the payee to the penultimate hop in the // path. let penultimate_hop_node_id = expected_path @@ -3913,12 +3902,34 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { .skip(1) .next() .map(|n| n.node.get_our_node_id()) - .unwrap_or(origin_node.node.get_our_node_id()); + .unwrap_or(args.origin_node.node.get_our_node_id()); let ev = remove_first_msg_event_to_node(&penultimate_hop_node_id, &mut events); - per_path_msgs.push(msgs_from_ev!(&ev)); + per_path_msgs.push(single_fulfill_commit_from_ev!(&ev)); } } + pass_claimed_payment_along_route_from_ev(fwd_amt_msat, per_path_msgs, args) +} + +pub fn pass_claimed_payment_along_route_from_ev( + each_htlc_claim_amt_msat: u64, + mut per_path_msgs: Vec<((msgs::UpdateFulfillHTLC, Vec), PublicKey)>, + args: ClaimAlongRouteArgs, +) -> u64 { + let ClaimAlongRouteArgs { + origin_node, + expected_paths, + expected_extra_fees, + expected_min_htlc_overpay, + skip_last, + payment_preimage: our_payment_preimage, + allow_1_msat_fee_overpay, + .. + } = args; + + let mut fwd_amt_msat = each_htlc_claim_amt_msat; + let mut expected_total_fee_msat = 0; + for (i, (expected_route, (path_msgs, next_hop))) in expected_paths.iter().zip(per_path_msgs.drain(..)).enumerate() { @@ -4000,7 +4011,7 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { let new_next_msgs = if $new_msgs { let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - let (res, nexthop) = msgs_from_ev!(&events[0]); + let (res, nexthop) = single_fulfill_commit_from_ev!(&events[0]); expected_next_node = nexthop; Some(res) } else {