Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
117 changes: 113 additions & 4 deletions lightning/src/ln/chanmon_update_fail_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,13 @@ use crate::chain::transaction::OutPoint;
use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentPurpose};
use crate::ln::channel::AnnouncementSigsState;
use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields};
use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields, Retry};
use crate::ln::msgs;
use crate::ln::msgs::{
BaseMessageHandler, ChannelMessageHandler, MessageSendEvent, RoutingMessageHandler,
};
use crate::ln::types::ChannelId;
use crate::routing::router::{PaymentParameters, RouteParameters};
use crate::sign::NodeSigner;
use crate::util::native_async::FutureQueue;
use crate::util::persist::{
Expand Down Expand Up @@ -3535,7 +3536,7 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode
assert!(a.is_none());

nodes[1].node.handle_revoke_and_ack(node_a_id, &raa);
check_added_monitors(&nodes[1], 0);
check_added_monitors(&nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
}

Expand All @@ -3554,8 +3555,8 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode
panic!();
}

// The event processing should release the last RAA updates on both channels.
check_added_monitors(&nodes[1], 2);
// The event processing should release the last RAA update.
check_added_monitors(&nodes[1], 1);

// When we fetch the next update the message getter will generate the next update for nodes[2],
// generating a further monitor update.
Expand Down Expand Up @@ -5055,3 +5056,111 @@ fn native_async_persist() {
panic!();
}
}

#[test]
fn test_mpp_claim_to_holding_cell() {
// Previously, if an MPP payment was claimed while one channel was AwaitingRAA (causing the
// HTLC claim to go into the holding cell), and the RAA came in before the async monitor
// update with the preimage completed, the channel could hang waiting on itself.
// This tests that behavior.
let chanmon_cfgs = create_chanmon_cfgs(4);
let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
let nodes = create_network(4, &node_cfgs, &node_chanmgrs);

let node_b_id = nodes[1].node.get_our_node_id();
let node_c_id = nodes[2].node.get_our_node_id();
let node_d_id = nodes[3].node.get_our_node_id();

// First open channels in a diamond and deliver the MPP payment.
let chan_1_scid = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
let chan_2_scid = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
let (chan_3_update, _, chan_3_id, ..) = create_announced_chan_between_nodes(&nodes, 1, 3);
let chan_3_scid = chan_3_update.contents.short_channel_id;
let (chan_4_update, _, chan_4_id, ..) = create_announced_chan_between_nodes(&nodes, 2, 3);
let chan_4_scid = chan_4_update.contents.short_channel_id;

let (mut route, paymnt_hash_1, preimage_1, payment_secret) =
get_route_and_payment_hash!(&nodes[0], nodes[3], 500_000);
let path = route.paths[0].clone();
route.paths.push(path);
route.paths[0].hops[0].pubkey = node_b_id;
route.paths[0].hops[0].short_channel_id = chan_1_scid;
route.paths[0].hops[1].short_channel_id = chan_3_scid;
route.paths[0].hops[1].fee_msat = 250_000;
route.paths[1].hops[0].pubkey = node_c_id;
route.paths[1].hops[0].short_channel_id = chan_2_scid;
route.paths[1].hops[1].short_channel_id = chan_4_scid;
route.paths[1].hops[1].fee_msat = 250_000;
let paths = &[&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]][..]];
send_along_route_with_secret(&nodes[0], route, paths, 500_000, paymnt_hash_1, payment_secret);

// Put the C <-> D channel into AwaitingRaa
let (preimage_2, paymnt_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[3]);
let onion = RecipientOnionFields::secret_only(payment_secret_2);
let id = PaymentId([42; 32]);
let pay_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV);
let route_params = RouteParameters::from_payment_params_and_value(pay_params, 400_000);
nodes[2].node.send_payment(paymnt_hash_2, onion, id, route_params, Retry::Attempts(0)).unwrap();
check_added_monitors(&nodes[2], 1);

let mut payment_event = SendEvent::from_node(&nodes[2]);
nodes[3].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]);
nodes[3].node.handle_commitment_signed_batch_test(node_c_id, &payment_event.commitment_msg);
check_added_monitors(&nodes[3], 1);

let (raa, cs) = get_revoke_commit_msgs(&nodes[3], &node_c_id);
nodes[2].node.handle_revoke_and_ack(node_d_id, &raa);
check_added_monitors(&nodes[2], 1);

nodes[2].node.handle_commitment_signed_batch_test(node_d_id, &cs);
check_added_monitors(&nodes[2], 1);

let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_d_id);

// Now claim the payment, completing both channel monitor updates async
// In the current code, the C <-> D channel happens to be the `durable_preimage_channel`,
// improving coverage somewhat but it isn't strictly critical to the test.
chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[3].node.claim_funds(preimage_1);
check_added_monitors(&nodes[3], 2);

// Complete the B <-> D monitor update, freeing the first fulfill.
let (latest_id, _) = get_latest_mon_update_id(&nodes[3], chan_3_id);
nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(chan_3_id, latest_id).unwrap();
let mut b_claim = get_htlc_update_msgs(&nodes[3], &node_b_id);

// When we deliver the pre-claim RAA, node D will shove the monitor update into the blocked
// state since we have a pending MPP payment which is blocking RAA monitor updates.
nodes[3].node.handle_revoke_and_ack(node_c_id, &cs_raa);
check_added_monitors(&nodes[3], 0);

// Finally, complete the C <-> D monitor update. Previously, this unlock failed to be processed
// due to the existence of the blocked RAA update above.
let (latest_id, _) = get_latest_mon_update_id(&nodes[3], chan_4_id);
nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(chan_4_id, latest_id).unwrap();
// Once we process monitor events (in this case by checking for the `PaymentClaimed` event, the
// RAA monitor update blocked above will be released.
expect_payment_claimed!(nodes[3], paymnt_hash_1, 500_000);
check_added_monitors(&nodes[3], 1);

// After the RAA monitor update completes, the C <-> D channel will be able to generate its
// fulfill updates as well.
let mut c_claim = get_htlc_update_msgs(&nodes[3], &node_c_id);
check_added_monitors(&nodes[3], 1);

// Finally, clear all the pending payments.
let path = [&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]][..]];
let mut args = ClaimAlongRouteArgs::new(&nodes[0], &path[..], preimage_1);
let b_claim_msgs = (b_claim.update_fulfill_htlcs.pop().unwrap(), b_claim.commitment_signed);
let c_claim_msgs = (c_claim.update_fulfill_htlcs.pop().unwrap(), c_claim.commitment_signed);
let claims = vec![(b_claim_msgs, node_b_id), (c_claim_msgs, node_c_id)];
pass_claimed_payment_along_route_from_ev(250_000, claims, args);

expect_payment_sent(&nodes[0], preimage_1, None, true, true);

expect_and_process_pending_htlcs(&nodes[3], false);
expect_payment_claimable!(nodes[3], paymnt_hash_2, payment_secret_2, 400_000);
claim_payment(&nodes[2], &[&nodes[3]], preimage_2);
}
154 changes: 85 additions & 69 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1296,6 +1296,10 @@ impl MaybeReadable for EventUnblockedChannel {
}

#[derive(Debug)]
/// Note that these run after all *non-blocked* [`ChannelMonitorUpdate`]s have been persisted.
/// Thus, they're primarily useful for (and currently only used for) claims, where the
/// [`ChannelMonitorUpdate`] we care about is a preimage update, which bypass the monitor update
/// blocking logic entirely and can never be blocked.
pub(crate) enum MonitorUpdateCompletionAction {
/// Indicates that a payment ultimately destined for us was claimed and we should emit an
/// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for
Expand Down Expand Up @@ -1580,6 +1584,11 @@ where
/// same `temporary_channel_id` (or final `channel_id` in the case of 0conf channels or prior
/// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
/// duplicates do not occur, so such channels should fail without a monitor update completing.
///
/// Note that these run after all *non-blocked* [`ChannelMonitorUpdate`]s have been persisted.
/// Thus, they're primarily useful for (and currently only used for) claims, where the
/// [`ChannelMonitorUpdate`] we care about is a preimage update, which bypass the monitor
/// update blocking logic entirely and can never be blocked.
monitor_update_blocked_actions: BTreeMap<ChannelId, Vec<MonitorUpdateCompletionAction>>,
/// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have
/// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update
Expand Down Expand Up @@ -3353,78 +3362,90 @@ macro_rules! handle_monitor_update_completion {
let chan_id = $chan.context.channel_id();
let outbound_alias = $chan.context().outbound_scid_alias();
let cp_node_id = $chan.context.get_counterparty_node_id();

#[cfg(debug_assertions)]
{
let in_flight_updates = $peer_state.in_flight_monitor_updates.get(&chan_id);
assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true));
assert_eq!($chan.blocked_monitor_updates_pending(), 0);
assert!($chan.is_awaiting_monitor_update());
}

let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
let updates = $chan.monitor_updating_restored(
&&logger,
&$self.node_signer,
$self.chain_hash,
&*$self.config.read().unwrap(),
$self.best_block.read().unwrap().height,
|htlc_id| {
$self.path_for_release_held_htlc(htlc_id, outbound_alias, &chan_id, &cp_node_id)
},
);
let channel_update = if updates.channel_ready.is_some()
&& $chan.context.is_usable()
&& $peer_state.is_connected
{
// We only send a channel_update in the case where we are just now sending a
// channel_ready and the channel is in a usable state. We may re-send a
// channel_update later through the announcement_signatures process for public
// channels, but there's no reason not to just inform our counterparty of our fees
// now.
if let Ok((msg, _, _)) = $self.get_channel_update_for_unicast($chan) {
Some(MessageSendEvent::SendChannelUpdate { node_id: cp_node_id, msg })
} else {
None
}
} else {
None
};

let update_actions =
$peer_state.monitor_update_blocked_actions.remove(&chan_id).unwrap_or(Vec::new());

let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
&mut $peer_state.pending_msg_events,
$chan,
updates.raa,
updates.commitment_update,
updates.commitment_order,
updates.accepted_htlcs,
updates.pending_update_adds,
updates.funding_broadcastable,
updates.channel_ready,
updates.announcement_sigs,
updates.tx_signatures,
None,
updates.channel_ready_order,
);
if let Some(upd) = channel_update {
$peer_state.pending_msg_events.push(upd);
}

let unbroadcasted_batch_funding_txid =
$chan.context.unbroadcasted_batch_funding_txid(&$chan.funding);
core::mem::drop($peer_state_lock);
core::mem::drop($per_peer_state_lock);

$self.post_monitor_update_unlock(
chan_id,
cp_node_id,
unbroadcasted_batch_funding_txid,
update_actions,
htlc_forwards,
decode_update_add_htlcs,
updates.finalized_claimed_htlcs,
updates.failed_htlcs,
);
if $chan.blocked_monitor_updates_pending() != 0 {
mem::drop($peer_state_lock);
mem::drop($per_peer_state_lock);

log_debug!(logger, "Channel has blocked monitor updates, completing update actions but leaving channel blocked");
$self.handle_monitor_update_completion_actions(update_actions);
} else {
log_debug!(logger, "Channel is open and awaiting update, resuming it");
let updates = $chan.monitor_updating_restored(
&&logger,
&$self.node_signer,
$self.chain_hash,
&*$self.config.read().unwrap(),
$self.best_block.read().unwrap().height,
|htlc_id| {
$self.path_for_release_held_htlc(htlc_id, outbound_alias, &chan_id, &cp_node_id)
},
);
let channel_update = if updates.channel_ready.is_some()
&& $chan.context.is_usable()
&& $peer_state.is_connected
{
// We only send a channel_update in the case where we are just now sending a
// channel_ready and the channel is in a usable state. We may re-send a
// channel_update later through the announcement_signatures process for public
// channels, but there's no reason not to just inform our counterparty of our fees
// now.
if let Ok((msg, _, _)) = $self.get_channel_update_for_unicast($chan) {
Some(MessageSendEvent::SendChannelUpdate { node_id: cp_node_id, msg })
} else {
None
}
} else {
None
};

let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
&mut $peer_state.pending_msg_events,
$chan,
updates.raa,
updates.commitment_update,
updates.commitment_order,
updates.accepted_htlcs,
updates.pending_update_adds,
updates.funding_broadcastable,
updates.channel_ready,
updates.announcement_sigs,
updates.tx_signatures,
None,
updates.channel_ready_order,
);
if let Some(upd) = channel_update {
$peer_state.pending_msg_events.push(upd);
}

let unbroadcasted_batch_funding_txid =
$chan.context.unbroadcasted_batch_funding_txid(&$chan.funding);
core::mem::drop($peer_state_lock);
core::mem::drop($per_peer_state_lock);

$self.post_monitor_update_unlock(
chan_id,
cp_node_id,
unbroadcasted_batch_funding_txid,
update_actions,
htlc_forwards,
decode_update_add_htlcs,
updates.finalized_claimed_htlcs,
updates.failed_htlcs,
);
}
}};
}

Expand Down Expand Up @@ -3595,7 +3616,7 @@ macro_rules! handle_new_monitor_update {
$update,
WithChannelContext::from(&$self.logger, &$chan.context, None),
);
if all_updates_complete && $chan.blocked_monitor_updates_pending() == 0 {
if all_updates_complete {
handle_monitor_update_completion!(
$self,
$peer_state_lock,
Expand Down Expand Up @@ -9813,12 +9834,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
.and_then(Channel::as_funded_mut)
{
if chan.is_awaiting_monitor_update() {
if chan.blocked_monitor_updates_pending() == 0 {
log_trace!(logger, "Channel is open and awaiting update, resuming it");
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
} else {
log_trace!(logger, "Channel is open and awaiting update, leaving it blocked due to a blocked monitor update");
}
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
} else {
log_trace!(logger, "Channel is open but not awaiting update");
}
Expand Down
Loading