diff --git a/lightning/src/blinded_path/message.rs b/lightning/src/blinded_path/message.rs index e291c83b66c..ed55ca5dc9b 100644 --- a/lightning/src/blinded_path/message.rs +++ b/lightning/src/blinded_path/message.rs @@ -587,6 +587,20 @@ pub enum AsyncPaymentsContext { /// An identifier for the HTLC that should be released by us as the sender's always-online /// channel counterparty to the often-offline recipient. intercept_id: InterceptId, + /// The short channel id alias corresponding to the to-be-released inbound HTLC, to help locate + /// the HTLC internally if the [`ReleaseHeldHtlc`] races our node decoding the held HTLC's + /// onion. + /// + /// We use the outbound scid alias because it is stable even if the channel splices, unlike + /// regular short channel ids. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + prev_outbound_scid_alias: u64, + /// The id of the to-be-released HTLC, to help locate the HTLC internally if the + /// [`ReleaseHeldHtlc`] races our node decoding the held HTLC's onion. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + htlc_id: u64, }, } @@ -645,6 +659,8 @@ impl_writeable_tlv_based_enum!(AsyncPaymentsContext, }, (6, ReleaseHeldHtlc) => { (0, intercept_id, required), + (2, prev_outbound_scid_alias, required), + (4, htlc_id, required), }, ); diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index ccef4480efc..2ca42286345 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -3349,3 +3349,70 @@ fn fail_held_htlcs_when_cfg_unset() { PaymentFailureReason::RetriesExhausted, ); } + +#[test] +fn release_htlc_races_htlc_onion_decode() { + // Test that an async sender's LSP will release held HTLCs even if they receive the + // release_held_htlc message before they have a chance to process the held HTLC's onion. This was + // previously broken. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + + let (sender_cfg, recipient_cfg) = (often_offline_node_cfg(), often_offline_node_cfg()); + let mut sender_lsp_cfg = test_default_channel_config(); + sender_lsp_cfg.enable_htlc_hold = true; + let mut invoice_server_cfg = test_default_channel_config(); + invoice_server_cfg.accept_forwards_to_priv_channels = true; + + let node_chanmgrs = create_node_chanmgrs( + 4, + &node_cfgs, + &[Some(sender_cfg), Some(sender_lsp_cfg), Some(invoice_server_cfg), Some(recipient_cfg)], + ); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0); + unify_blockheight_across_nodes(&nodes); + let sender = &nodes[0]; + let sender_lsp = &nodes[1]; + let invoice_server = &nodes[2]; + let recipient = &nodes[3]; + + let amt_msat = 5000; + let (static_invoice, peer_id, static_invoice_om) = + build_async_offer_and_init_payment(amt_msat, &nodes); + let payment_hash = + lock_in_htlc_for_static_invoice(&static_invoice_om, peer_id, sender, sender_lsp); + + // The LSP has not transitioned the HTLC to the intercepts map internally because + // process_pending_htlc_forwards has not been called. + let (peer_id, held_htlc_om) = + extract_held_htlc_available_oms(sender, &[sender_lsp, invoice_server, recipient]) + .pop() + .unwrap(); + recipient.onion_messenger.handle_onion_message(peer_id, &held_htlc_om); + + // Extract the release_htlc_om and ensure the sender's LSP will release the HTLC on the next call + // to process_pending_htlc_forwards, even though the HTLC was not yet officially intercepted when + // the release message arrived. + let (peer_id, release_htlc_om) = + extract_release_htlc_oms(recipient, &[sender, sender_lsp, invoice_server]).pop().unwrap(); + sender_lsp.onion_messenger.handle_onion_message(peer_id, &release_htlc_om); + + sender_lsp.node.process_pending_htlc_forwards(); + let mut events = sender_lsp.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = remove_first_msg_event_to_node(&invoice_server.node.get_our_node_id(), &mut events); + check_added_monitors!(sender_lsp, 1); + + let path: &[&Node] = &[invoice_server, recipient]; + let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); + let claimable_ev = do_pass_along_path(args).unwrap(); + + let route: &[&[&Node]] = &[&[sender_lsp, invoice_server, recipient]]; + let keysend_preimage = extract_payment_preimage(&claimable_ev); + let (res, _) = + claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage)); + assert_eq!(res, Some(PaidBolt12Invoice::StaticInvoice(static_invoice))); +} diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0bdca77b366..c5b6cc56d97 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -452,7 +452,7 @@ pub(super) struct PendingAddHTLCInfo { // HTLCs. // // Note that this may be an outbound SCID alias for the associated channel. - prev_short_channel_id: u64, + prev_outbound_scid_alias: u64, prev_htlc_id: u64, prev_counterparty_node_id: PublicKey, prev_channel_id: ChannelId, @@ -467,7 +467,7 @@ impl PendingAddHTLCInfo { _ => None, }; HTLCPreviousHopData { - short_channel_id: self.prev_short_channel_id, + prev_outbound_scid_alias: self.prev_outbound_scid_alias, user_channel_id: Some(self.prev_user_channel_id), outpoint: self.prev_funding_outpoint, channel_id: self.prev_channel_id, @@ -735,14 +735,14 @@ impl Default for OptionalOfferPaymentParams { #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] /// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`]. pub(crate) enum SentHTLCId { - PreviousHopData { short_channel_id: u64, htlc_id: u64 }, + PreviousHopData { prev_outbound_scid_alias: u64, htlc_id: u64 }, OutboundRoute { session_priv: [u8; SECRET_KEY_SIZE] }, } impl SentHTLCId { pub(crate) fn from_source(source: &HTLCSource) -> Self { match source { HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData { - short_channel_id: hop_data.short_channel_id, + prev_outbound_scid_alias: hop_data.prev_outbound_scid_alias, htlc_id: hop_data.htlc_id, }, HTLCSource::OutboundRoute { session_priv, .. } => { @@ -753,7 +753,7 @@ impl SentHTLCId { } impl_writeable_tlv_based_enum!(SentHTLCId, (0, PreviousHopData) => { - (0, short_channel_id, required), + (0, prev_outbound_scid_alias, required), (2, htlc_id, required), }, (2, OutboundRoute) => { @@ -761,7 +761,7 @@ impl_writeable_tlv_based_enum!(SentHTLCId, }, ); -// (src_channel_id, src_counterparty_node_id, src_funding_outpoint, src_chan_id, src_user_chan_id) +// (src_outbound_scid_alias, src_counterparty_node_id, src_funding_outpoint, src_chan_id, src_user_chan_id) type PerSourcePendingForward = (u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>); @@ -792,8 +792,7 @@ mod fuzzy_channelmanager { /// Tracks the inbound corresponding to an outbound HTLC #[derive(Clone, Debug, Hash, PartialEq, Eq)] pub struct HTLCPreviousHopData { - // Note that this may be an outbound SCID alias for the associated channel. - pub short_channel_id: u64, + pub prev_outbound_scid_alias: u64, pub user_channel_id: Option, pub htlc_id: u64, pub incoming_packet_shared_secret: [u8; 32], @@ -2718,11 +2717,8 @@ pub struct ChannelManager< /// See `ChannelManager` struct-level documentation for lock order requirements. pending_intercepted_htlcs: Mutex>, - /// SCID/SCID Alias -> pending `update_add_htlc`s to decode. - /// - /// Note that because we may have an SCID Alias as the key we can have two entries per channel, - /// though in practice we probably won't be receiving HTLCs for a channel both via the alias - /// and via the classic SCID. + /// Outbound SCID Alias -> pending `update_add_htlc`s to decode. + /// We use the scid alias because regular scids may change if a splice occurs. /// /// Note that no consistency guarantees are made about the existence of a channel with the /// `short_channel_id` here, nor the `channel_id` in `UpdateAddHTLC`! @@ -3513,6 +3509,7 @@ macro_rules! emit_initial_channel_ready_event { macro_rules! handle_monitor_update_completion { ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { let channel_id = $chan.context.channel_id(); + let outbound_scid_alias = $chan.context().outbound_scid_alias(); let counterparty_node_id = $chan.context.get_counterparty_node_id(); #[cfg(debug_assertions)] { @@ -3525,7 +3522,7 @@ macro_rules! handle_monitor_update_completion { let mut updates = $chan.monitor_updating_restored(&&logger, &$self.node_signer, $self.chain_hash, &*$self.config.read().unwrap(), $self.best_block.read().unwrap().height, - |htlc_id| $self.path_for_release_held_htlc(htlc_id, &channel_id, &counterparty_node_id)); + |htlc_id| $self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &counterparty_node_id)); let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() && $peer_state.is_connected @@ -5627,11 +5624,17 @@ where /// [`HeldHtlcAvailable`] onion message, so the recipient's [`ReleaseHeldHtlc`] response will be /// received to our node. fn path_for_release_held_htlc( - &self, htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey, + &self, htlc_id: u64, prev_outbound_scid_alias: u64, channel_id: &ChannelId, + counterparty_node_id: &PublicKey, ) -> BlindedMessagePath { let intercept_id = InterceptId::from_htlc_id_and_chan_id(htlc_id, channel_id, counterparty_node_id); - self.flow.path_for_release_held_htlc(intercept_id, &*self.entropy_source) + self.flow.path_for_release_held_htlc( + intercept_id, + prev_outbound_scid_alias, + htlc_id, + &*self.entropy_source, + ) } /// Signals that no further attempts for the given payment should occur. Useful if you have a @@ -6441,7 +6444,7 @@ where ) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - let next_hop_scid = { + let outbound_scid_alias = { let peer_state_lock = self.per_peer_state.read().unwrap(); let peer_state_mutex = peer_state_lock.get(&next_node_id).ok_or_else(|| APIError::ChannelUnavailable { @@ -6461,10 +6464,7 @@ where ), }); } - funded_chan - .funding - .get_short_channel_id() - .unwrap_or(funded_chan.context.outbound_scid_alias()) + funded_chan.context.outbound_scid_alias() } else { return Err(APIError::ChannelUnavailable { err: format!( @@ -6512,7 +6512,7 @@ where blinded, incoming_cltv_expiry, hold_htlc, - short_channel_id: next_hop_scid, + short_channel_id: outbound_scid_alias, } }, _ => unreachable!(), // Only `PendingHTLCRouting::Forward`s are intercepted @@ -6527,7 +6527,7 @@ where }; let mut per_source_pending_forward = [( - payment.prev_short_channel_id, + payment.prev_outbound_scid_alias, payment.prev_counterparty_node_id, payment.prev_funding_outpoint, payment.prev_channel_id, @@ -6588,11 +6588,12 @@ where } }; - 'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs { + 'outer_loop: for (incoming_scid_alias, update_add_htlcs) in decode_update_add_htlcs { // If any decoded update_add_htlcs were processed, we need to persist. should_persist = true; - let incoming_channel_details_opt = - self.do_funded_channel_callback(incoming_scid, |chan: &mut FundedChannel| { + let incoming_channel_details_opt = self.do_funded_channel_callback( + incoming_scid_alias, + |chan: &mut FundedChannel| { let counterparty_node_id = chan.context.get_counterparty_node_id(); let channel_id = chan.context.channel_id(); let funding_txo = chan.funding.get_funding_txo().unwrap(); @@ -6605,7 +6606,8 @@ where user_channel_id, accept_underpaying_htlcs, ) - }); + }, + ); let ( incoming_counterparty_node_id, incoming_channel_id, @@ -6674,7 +6676,7 @@ where // Process the HTLC on the incoming channel. match self.do_funded_channel_callback( - incoming_scid, + incoming_scid_alias, |chan: &mut FundedChannel| { let logger = WithChannelContext::from( &self.logger, @@ -6747,7 +6749,7 @@ where // Process all of the forwards and failures for the channel in which the HTLCs were // proposed to as a batch. let pending_forwards = ( - incoming_scid, + incoming_scid_alias, incoming_counterparty_node_id, incoming_funding_txo, incoming_channel_id, @@ -6769,7 +6771,12 @@ where } }, }; - self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_default().push(failure); + self.forward_htlcs + .lock() + .unwrap() + .entry(incoming_scid_alias) + .or_default() + .push(failure); self.pending_events.lock().unwrap().push_back(( events::Event::HTLCHandlingFailed { prev_channel_id: incoming_channel_id, @@ -6906,7 +6913,7 @@ where match forward_info { HTLCForwardInfo::AddHTLC(payment) => { let PendingAddHTLCInfo { - prev_short_channel_id, + prev_outbound_scid_alias, prev_htlc_id, prev_channel_id, prev_funding_outpoint, @@ -7021,7 +7028,7 @@ where ); match create_res { Ok(info) => phantom_receives.push(( - prev_short_channel_id, + prev_outbound_scid_alias, prev_counterparty_node_id, prev_funding_outpoint, prev_channel_id, @@ -7118,7 +7125,7 @@ where HTLCForwardInfo::AddHTLC(ref payment) => { let htlc_source = HTLCSource::PreviousHopData(payment.htlc_previous_hop_data()); let PendingAddHTLCInfo { - prev_short_channel_id, + prev_outbound_scid_alias, forward_info: PendingHTLCInfo { payment_hash, @@ -7212,7 +7219,7 @@ where "alternate" }; log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}", - prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id); + prev_outbound_scid_alias, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id); if let Err((reason, msg)) = optimal_channel.queue_add_htlc( *outgoing_amt_msat, *payment_hash, @@ -7461,9 +7468,10 @@ where let counterparty_node_id = $htlc.prev_hop.counterparty_node_id; let incoming_packet_shared_secret = $htlc.prev_hop.incoming_packet_shared_secret; + let prev_outbound_scid_alias = $htlc.prev_hop.prev_outbound_scid_alias; failed_forwards.push(( HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: $htlc.prev_hop.short_channel_id, + prev_outbound_scid_alias, user_channel_id: $htlc.prev_hop.user_channel_id, counterparty_node_id, channel_id: prev_channel_id, @@ -8268,7 +8276,7 @@ where } }, HTLCSource::PreviousHopData(HTLCPreviousHopData { - ref short_channel_id, + ref prev_outbound_scid_alias, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, @@ -8311,7 +8319,7 @@ where }; let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); - match forward_htlcs.entry(*short_channel_id) { + match forward_htlcs.entry(*prev_outbound_scid_alias) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(failure); }, @@ -8574,7 +8582,7 @@ where ) { let counterparty_node_id = prev_hop.counterparty_node_id.or_else(|| { let short_to_chan_info = self.short_to_chan_info.read().unwrap(); - short_to_chan_info.get(&prev_hop.short_channel_id).map(|(cp_id, _)| *cp_id) + short_to_chan_info.get(&prev_hop.prev_outbound_scid_alias).map(|(cp_id, _)| *cp_id) }); let counterparty_node_id = if let Some(node_id) = counterparty_node_id { node_id @@ -9225,19 +9233,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ); let counterparty_node_id = channel.context.get_counterparty_node_id(); - let short_channel_id = channel.funding.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()); + let outbound_scid_alias = channel.context.outbound_scid_alias(); let mut htlc_forwards = None; if !pending_forwards.is_empty() { htlc_forwards = Some(( - short_channel_id, channel.context.get_counterparty_node_id(), + outbound_scid_alias, channel.context.get_counterparty_node_id(), channel.funding.get_funding_txo().unwrap(), channel.context.channel_id(), channel.context.get_user_id(), pending_forwards )); } let mut decode_update_add_htlcs = None; if !pending_update_adds.is_empty() { - decode_update_add_htlcs = Some((short_channel_id, pending_update_adds)); + decode_update_add_htlcs = Some((outbound_scid_alias, pending_update_adds)); } if channel.context.is_connected() { @@ -10846,8 +10854,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec)) { let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap(); - let scid = update_add_htlcs.0; - match decode_update_add_htlcs.entry(scid) { + let src_outbound_scid_alias = update_add_htlcs.0; + match decode_update_add_htlcs.entry(src_outbound_scid_alias) { hash_map::Entry::Occupied(mut e) => { e.get_mut().append(&mut update_add_htlcs.1); }, @@ -10860,7 +10868,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[inline] fn forward_htlcs(&self, per_source_pending_forwards: &mut [PerSourcePendingForward]) { for &mut ( - prev_short_channel_id, + prev_outbound_scid_alias, prev_counterparty_node_id, prev_funding_outpoint, prev_channel_id, @@ -10889,7 +10897,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Some(payment_hash), ); let pending_add = PendingAddHTLCInfo { - prev_short_channel_id, + prev_outbound_scid_alias, prev_counterparty_node_id, prev_funding_outpoint, prev_channel_id, @@ -11301,6 +11309,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // disconnect, so Channel's reestablish will never hand us any holding cell // freed HTLCs to fail backwards. If in the future we no longer drop pending // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. + let outbound_scid_alias = chan.context.outbound_scid_alias(); let res = chan.channel_reestablish( msg, &&logger, @@ -11308,7 +11317,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.chain_hash, &self.config.read().unwrap(), &*self.best_block.read().unwrap(), - |htlc_id| self.path_for_release_held_htlc(htlc_id, &msg.channel_id, counterparty_node_id) + |htlc_id| self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &msg.channel_id, counterparty_node_id) ); let responses = try_channel_entry!(self, peer_state, res, chan_entry); let mut channel_update = None; @@ -11785,11 +11794,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Returns whether we should remove this channel as it's just been closed. let unblock_chan = |chan: &mut Channel, pending_msg_events: &mut Vec| -> Option { let channel_id = chan.context().channel_id(); + let outbound_scid_alias = chan.context().outbound_scid_alias(); let logger = WithChannelContext::from(&self.logger, &chan.context(), None); let node_id = chan.context().get_counterparty_node_id(); if let Some(msgs) = chan.signer_maybe_unblocked( self.chain_hash, &&logger, - |htlc_id| self.path_for_release_held_htlc(htlc_id, &channel_id, &node_id) + |htlc_id| self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &node_id) ) { if chan.context().is_connected() { if let Some(msg) = msgs.open_channel { @@ -15028,7 +15038,34 @@ where ); } }, - AsyncPaymentsContext::ReleaseHeldHtlc { intercept_id } => { + AsyncPaymentsContext::ReleaseHeldHtlc { + intercept_id, + prev_outbound_scid_alias, + htlc_id, + } => { + // It's possible the release_held_htlc message raced ahead of us transitioning the pending + // update_add to `Self::pending_intercept_htlcs`. If that's the case, update the pending + // update_add to indicate that the HTLC should be released immediately. + // + // Check for the HTLC here before checking `pending_intercept_htlcs` to avoid a different + // race where the HTLC gets transitioned to `pending_intercept_htlcs` after we drop that + // map's lock but before acquiring the `decode_update_add_htlcs` lock. + let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap(); + if let Some(htlcs) = decode_update_add_htlcs.get_mut(&prev_outbound_scid_alias) { + for update_add in htlcs.iter_mut() { + if update_add.htlc_id == htlc_id { + log_trace!( + self.logger, + "Marking held htlc with intercept_id {} as ready to release", + intercept_id + ); + update_add.hold_htlc.take(); + return; + } + } + } + core::mem::drop(decode_update_add_htlcs); + let mut htlc = { let mut pending_intercept_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); @@ -15064,7 +15101,7 @@ where log_trace!(logger, "Releasing held htlc with intercept_id {}", intercept_id); let mut per_source_pending_forward = [( - htlc.prev_short_channel_id, + htlc.prev_outbound_scid_alias, htlc.prev_counterparty_node_id, htlc.prev_funding_outpoint, htlc.prev_channel_id, @@ -15406,7 +15443,7 @@ impl_writeable_tlv_based_enum!(BlindedFailure, ); impl_writeable_tlv_based!(HTLCPreviousHopData, { - (0, short_channel_id, required), + (0, prev_outbound_scid_alias, required), (1, phantom_shared_secret, option), (2, outpoint, required), (3, blinded_failure, option), @@ -15578,7 +15615,7 @@ impl Writeable for HTLCSource { impl_writeable_tlv_based!(PendingAddHTLCInfo, { (0, forward_info, required), (1, prev_user_channel_id, (default_value, 0)), - (2, prev_short_channel_id, required), + (2, prev_outbound_scid_alias, required), (4, prev_htlc_id, required), (6, prev_funding_outpoint, required), // Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be @@ -17001,9 +17038,9 @@ where // still have an entry for this HTLC in `forward_htlcs` or // `pending_intercepted_htlcs`, we were apparently not persisted after // the monitor was when forwarding the payment. - decode_update_add_htlcs.retain(|scid, update_add_htlcs| { + decode_update_add_htlcs.retain(|src_outb_alias, update_add_htlcs| { update_add_htlcs.retain(|update_add_htlc| { - let matches = *scid == prev_hop_data.short_channel_id && + let matches = *src_outb_alias == prev_hop_data.prev_outbound_scid_alias && update_add_htlc.htlc_id == prev_hop_data.htlc_id; if matches { log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}", @@ -17198,7 +17235,7 @@ where // to replay this claim to get the preimage into the inbound // edge monitor but the channel is closed (and thus we'll // immediately panic if we call claim_funds_from_hop). - if short_to_chan_info.get(&prev_hop.short_channel_id).is_none() { + if short_to_chan_info.get(&prev_hop.prev_outbound_scid_alias).is_none() { log_error!(args.logger, "We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124.\ All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1", @@ -17324,7 +17361,7 @@ where if htlc.prev_hop.counterparty_node_id.is_some() { continue; } - if short_to_chan_info.get(&htlc.prev_hop.short_channel_id).is_some() { + if short_to_chan_info.get(&htlc.prev_hop.prev_outbound_scid_alias).is_some() { log_error!(args.logger, "We do not have the required information to claim a pending payment with payment hash {} reliably.\ As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\ diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index a6484f0076e..6629b03f3d4 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -1227,14 +1227,17 @@ where /// /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc pub fn path_for_release_held_htlc( - &self, intercept_id: InterceptId, entropy: ES, + &self, intercept_id: InterceptId, prev_outbound_scid_alias: u64, htlc_id: u64, entropy: ES, ) -> BlindedMessagePath where ES::Target: EntropySource, { // In the future, we should support multi-hop paths here. - let context = - MessageContext::AsyncPayments(AsyncPaymentsContext::ReleaseHeldHtlc { intercept_id }); + let context = MessageContext::AsyncPayments(AsyncPaymentsContext::ReleaseHeldHtlc { + intercept_id, + prev_outbound_scid_alias, + htlc_id, + }); let num_dummy_hops = PADDED_PATH_LENGTH.saturating_sub(1); BlindedMessagePath::new_with_dummy_hops( &[],