diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 100158ac71d..42d15e524b8 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -8,6 +8,12 @@ RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }') # which we do here. # Further crates which appear only as dev-dependencies are pinned further down. function PIN_RELEASE_DEPS { + # Starting with version 2.0.107, the `syn` crate has an MSRV of rustc 1.68 + [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p syn --precise "2.0.106" --verbose + + # Starting with version 1.0.42, the `quote` crate has an MSRV of rustc 1.68 + [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p quote --precise "1.0.41" --verbose + # Starting with version 1.39.0, the `tokio` crate has an MSRV of rustc 1.70.0 [ "$RUSTC_MINOR_VERSION" -lt 70 ] && cargo update -p tokio --precise "1.38.1" --verbose @@ -53,8 +59,11 @@ done echo -e "\n\nTesting upgrade from prior versions of LDK" pushd lightning-tests +[ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p syn --precise "2.0.106" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p quote --precise "1.0.41" --verbose [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p regex --precise "1.9.6" --verbose cargo test +[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd echo -e "\n\nChecking and testing Block Sync Clients with features" @@ -111,6 +120,8 @@ cargo test -p lightning-invoice --verbose --color always --no-default-features - echo -e "\n\nTesting no_std build on a downstream no-std crate" # check no-std compatibility across dependencies pushd no-std-check +[ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p syn --precise "2.0.106" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p quote --precise "1.0.41" --verbose cargo check --verbose --color always [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd @@ -124,6 +135,8 @@ popd if [ -f "$(which arm-none-eabi-gcc)" ]; then pushd no-std-check + [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p syn --precise "2.0.106" --verbose + [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p quote --precise "1.0.41" --verbose cargo build --target=thumbv7m-none-eabi [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 6b2808eceda..ef645e71d63 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -19,11 +19,12 @@ use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; use crate::chain::transaction::OutPoint; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; -use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields, Retry}; use crate::ln::channel::{AnnouncementSigsState, ChannelPhase}; use crate::ln::msgs; use crate::ln::types::ChannelId; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; +use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::util::test_channel_signer::TestChannelSigner; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_utils::TestBroadcaster; @@ -3030,7 +3031,7 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode assert!(a.is_none()); nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); - check_added_monitors(&nodes[1], 0); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); } @@ -3040,8 +3041,8 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); } if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); } - // The event processing should release the last RAA updates on both channels. - check_added_monitors(&nodes[1], 2); + // The event processing should release the last RAA update. + check_added_monitors(&nodes[1], 1); // When we fetch the next update the message getter will generate the next update for nodes[2], // generating a further monitor update. @@ -4271,3 +4272,121 @@ fn test_single_channel_multiple_mpp() { nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); check_added_monitors(&nodes[7], 1); } + +#[test] +fn test_mpp_claim_to_holding_cell() { + // Previously, if an MPP payment was claimed while one channel was AwaitingRAA (causing the + // HTLC claim to go into the holding cell), and the RAA came in before the async monitor + // update with the preimage completed, the channel could hang waiting on itself. + // This tests that behavior. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + + // First open channels in a diamond and deliver the MPP payment. + let chan_1_scid = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; + let chan_2_scid = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; + let (chan_3_update, _, chan_3_id, ..) = create_announced_chan_between_nodes(&nodes, 1, 3); + let chan_3_scid = chan_3_update.contents.short_channel_id; + let (chan_4_update, _, chan_4_id, ..) = create_announced_chan_between_nodes(&nodes, 2, 3); + let chan_4_scid = chan_4_update.contents.short_channel_id; + + let (mut route, payment_hash, preimage_1, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[3], 500_000); + let path = route.paths[0].clone(); + route.paths.push(path); + route.paths[0].hops[0].pubkey = node_b_id; + route.paths[0].hops[0].short_channel_id = chan_1_scid; + route.paths[0].hops[1].short_channel_id = chan_3_scid; + route.paths[0].hops[1].fee_msat = 250_000; + route.paths[1].hops[0].pubkey = node_c_id; + route.paths[1].hops[0].short_channel_id = chan_2_scid; + route.paths[1].hops[1].short_channel_id = chan_4_scid; + route.paths[1].hops[1].fee_msat = 250_000; + let paths = &[&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]][..]]; + send_along_route_with_secret(&nodes[0], route, paths, 500_000, payment_hash, payment_secret); + + // Put the C <-> D channel into AwaitingRaa + let (preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[3]); + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId([42; 32]); + let pay_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV); + let route_params = RouteParameters::from_payment_params_and_value(pay_params, 400_000); + nodes[2].node.send_payment(payment_hash_2, onion, id, route_params, Retry::Attempts(0)).unwrap(); + check_added_monitors(&nodes[2], 1); + + let mut payment_event = SendEvent::from_node(&nodes[2]); + nodes[3].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); + nodes[3].node.handle_commitment_signed(node_c_id, &payment_event.commitment_msg); + check_added_monitors(&nodes[3], 1); + + let (raa, cs) = get_revoke_commit_msgs(&nodes[3], &node_c_id); + nodes[2].node.handle_revoke_and_ack(node_d_id, &raa); + check_added_monitors(&nodes[2], 1); + + nodes[2].node.handle_commitment_signed(node_d_id, &cs); + check_added_monitors(&nodes[2], 1); + + let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_d_id); + + // Now claim the payment, completing both channel monitor updates async + // In the current code, the C <-> D channel happens to be the `durable_preimage_channel`, + // improving coverage somewhat but it isn't strictly critical to the test. + chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + nodes[3].node.claim_funds(preimage_1); + check_added_monitors(&nodes[3], 2); + + // Complete the B <-> D monitor update, freeing the first fulfill. + let (outpoint, latest_id, _) = nodes[3].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_3_id).unwrap().clone(); + nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, latest_id).unwrap(); + + let mut b_claim = get_htlc_update_msgs(&nodes[3], &node_b_id); + + // When we deliver the pre-claim RAA, node D will shove the monitor update into the blocked + // state since we have a pending MPP payment which is blocking RAA monitor updates. + nodes[3].node.handle_revoke_and_ack(node_c_id, &cs_raa); + check_added_monitors(&nodes[3], 0); + + // Finally, complete the C <-> D monitor update. Previously, this unlock failed to be processed + // due to the existence of the blocked RAA update above. + let (outpoint, latest_id, _) = nodes[3].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_4_id).unwrap().clone(); + nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, latest_id).unwrap(); + // Once we process monitor events (in this case by checking for the `PaymentClaimed` event, the + // RAA monitor update blocked above will be released. + let events = nodes[3].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + if let Event::PaymentClaimed { .. } = &events[0] { + } else { + panic!("Unexpected event: {events:?}"); + } + if let Event::PendingHTLCsForwardable { .. } = &events[1] { + } else { + panic!("Unexpected event: {events:?}"); + } + check_added_monitors(&nodes[3], 1); + + // After the RAA monitor update completes, the C <-> D channel will be able to generate its + // fulfill updates as well. + let mut c_claim = get_htlc_update_msgs(&nodes[3], &node_c_id); + check_added_monitors(&nodes[3], 1); + + // Finally, clear all the pending payments. + let path = [&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]][..]]; + let mut args = ClaimAlongRouteArgs::new(&nodes[0], &path[..], preimage_1); + let b_claim_msgs = (b_claim.update_fulfill_htlcs.pop().unwrap(), b_claim.commitment_signed); + let c_claim_msgs = (c_claim.update_fulfill_htlcs.pop().unwrap(), c_claim.commitment_signed); + let claims = vec![(b_claim_msgs, node_b_id), (c_claim_msgs, node_c_id)]; + pass_claimed_payment_along_route_from_ev(250_000, claims, args); + + expect_payment_sent(&nodes[0], preimage_1, None, true, true); + + nodes[3].node.process_pending_htlc_forwards(); + expect_payment_claimable!(nodes[3], payment_hash_2, payment_secret_2, 400_000); + claim_payment(&nodes[2], &[&nodes[3]], preimage_2); +} diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7924511d799..eaca6e03beb 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1116,6 +1116,10 @@ impl MaybeReadable for EventUnblockedChannel { } #[derive(Debug)] +/// Note that these run after all *non-blocked* [`ChannelMonitorUpdate`]s have been persisted. +/// Thus, they're primarily useful for (and currently only used for) claims, where the +/// [`ChannelMonitorUpdate`] we care about is a preimage update, which bypass the monitor update +/// blocking logic entirely and can never be blocked. pub(crate) enum MonitorUpdateCompletionAction { /// Indicates that a payment ultimately destined for us was claimed and we should emit an /// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for @@ -1396,6 +1400,11 @@ pub(super) struct PeerState where SP::Target: SignerProvider { /// same `temporary_channel_id` (or final `channel_id` in the case of 0conf channels or prior /// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure /// duplicates do not occur, so such channels should fail without a monitor update completing. + /// + /// Note that these run after all *non-blocked* [`ChannelMonitorUpdate`]s have been persisted. + /// Thus, they're primarily useful for (and currently only used for) claims, where the + /// [`ChannelMonitorUpdate`] we care about is a preimage update, which bypass the monitor + /// update blocking logic entirely and can never be blocked. monitor_update_blocked_actions: BTreeMap>, /// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have /// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update @@ -3229,97 +3238,108 @@ macro_rules! emit_channel_ready_event { macro_rules! handle_monitor_update_completion { ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - let mut updates = $chan.monitor_updating_restored(&&logger, - &$self.node_signer, $self.chain_hash, &$self.default_configuration, - $self.best_block.read().unwrap().height); - let counterparty_node_id = $chan.context.get_counterparty_node_id(); - let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() { - // We only send a channel_update in the case where we are just now sending a - // channel_ready and the channel is in a usable state. We may re-send a - // channel_update later through the announcement_signatures process for public - // channels, but there's no reason not to just inform our counterparty of our fees - // now. - if let Ok(msg) = $self.get_channel_update_for_unicast($chan) { - Some(events::MessageSendEvent::SendChannelUpdate { - node_id: counterparty_node_id, - msg, - }) - } else { None } - } else { None }; let update_actions = $peer_state.monitor_update_blocked_actions .remove(&$chan.context.channel_id()).unwrap_or(Vec::new()); - let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( - &mut $peer_state.pending_msg_events, $chan, updates.raa, - updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds, - updates.funding_broadcastable, updates.channel_ready, - updates.announcement_sigs, updates.tx_signatures); - if let Some(upd) = channel_update { - $peer_state.pending_msg_events.push(upd); - } - - let channel_id = $chan.context.channel_id(); - let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid(); - core::mem::drop($peer_state_lock); - core::mem::drop($per_peer_state_lock); - - // If the channel belongs to a batch funding transaction, the progress of the batch - // should be updated as we have received funding_signed and persisted the monitor. - if let Some(txid) = unbroadcasted_batch_funding_txid { - let mut funding_batch_states = $self.funding_batch_states.lock().unwrap(); - let mut batch_completed = false; - if let Some(batch_state) = funding_batch_states.get_mut(&txid) { - let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| ( - *chan_id == channel_id && - *pubkey == counterparty_node_id - )); - if let Some(channel_state) = channel_state { - channel_state.2 = true; + if $chan.blocked_monitor_updates_pending() != 0 { + mem::drop($peer_state_lock); + mem::drop($per_peer_state_lock); + + log_debug!(logger, "Channel has blocked monitor updates, completing update actions but leaving channel blocked"); + $self.handle_monitor_update_completion_actions(update_actions); + } else { + log_debug!(logger, "Channel is open and awaiting update, resuming it"); + + let mut updates = $chan.monitor_updating_restored(&&logger, + &$self.node_signer, $self.chain_hash, &$self.default_configuration, + $self.best_block.read().unwrap().height); + let counterparty_node_id = $chan.context.get_counterparty_node_id(); + let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() { + // We only send a channel_update in the case where we are just now sending a + // channel_ready and the channel is in a usable state. We may re-send a + // channel_update later through the announcement_signatures process for public + // channels, but there's no reason not to just inform our counterparty of our fees + // now. + if let Ok(msg) = $self.get_channel_update_for_unicast($chan) { + Some(events::MessageSendEvent::SendChannelUpdate { + node_id: counterparty_node_id, + msg, + }) + } else { None } + } else { None }; + + let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( + &mut $peer_state.pending_msg_events, $chan, updates.raa, + updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds, + updates.funding_broadcastable, updates.channel_ready, + updates.announcement_sigs, updates.tx_signatures); + if let Some(upd) = channel_update { + $peer_state.pending_msg_events.push(upd); + } + + let channel_id = $chan.context.channel_id(); + let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid(); + core::mem::drop($peer_state_lock); + core::mem::drop($per_peer_state_lock); + + // If the channel belongs to a batch funding transaction, the progress of the batch + // should be updated as we have received funding_signed and persisted the monitor. + if let Some(txid) = unbroadcasted_batch_funding_txid { + let mut funding_batch_states = $self.funding_batch_states.lock().unwrap(); + let mut batch_completed = false; + if let Some(batch_state) = funding_batch_states.get_mut(&txid) { + let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| ( + *chan_id == channel_id && + *pubkey == counterparty_node_id + )); + if let Some(channel_state) = channel_state { + channel_state.2 = true; + } else { + debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update"); + } + batch_completed = batch_state.iter().all(|(_, _, completed)| *completed); } else { - debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update"); + debug_assert!(false, "Missing batch state for channel which completed initial monitor update"); } - batch_completed = batch_state.iter().all(|(_, _, completed)| *completed); - } else { - debug_assert!(false, "Missing batch state for channel which completed initial monitor update"); - } - // When all channels in a batched funding transaction have become ready, it is not necessary - // to track the progress of the batch anymore and the state of the channels can be updated. - if batch_completed { - let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten(); - let per_peer_state = $self.per_peer_state.read().unwrap(); - let mut batch_funding_tx = None; - for (channel_id, counterparty_node_id, _) in removed_batch_state { - if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let mut peer_state = peer_state_mutex.lock().unwrap(); - if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { - batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding()); - chan.set_batch_ready(); - let mut pending_events = $self.pending_events.lock().unwrap(); - emit_channel_pending_event!(pending_events, chan); + // When all channels in a batched funding transaction have become ready, it is not necessary + // to track the progress of the batch anymore and the state of the channels can be updated. + if batch_completed { + let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten(); + let per_peer_state = $self.per_peer_state.read().unwrap(); + let mut batch_funding_tx = None; + for (channel_id, counterparty_node_id, _) in removed_batch_state { + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { + batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding()); + chan.set_batch_ready(); + let mut pending_events = $self.pending_events.lock().unwrap(); + emit_channel_pending_event!(pending_events, chan); + } } } - } - if let Some(tx) = batch_funding_tx { - log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.compute_txid()); - $self.tx_broadcaster.broadcast_transactions(&[&tx]); + if let Some(tx) = batch_funding_tx { + log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.compute_txid()); + $self.tx_broadcaster.broadcast_transactions(&[&tx]); + } } } - } - $self.handle_monitor_update_completion_actions(update_actions); + $self.handle_monitor_update_completion_actions(update_actions); - if let Some(forwards) = htlc_forwards { - $self.forward_htlcs(&mut [forwards][..]); - } - if let Some(decode) = decode_update_add_htlcs { - $self.push_decode_update_add_htlcs(decode); - } - $self.finalize_claims(updates.finalized_claimed_htlcs); - for failure in updates.failed_htlcs.drain(..) { - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; - $self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); + if let Some(forwards) = htlc_forwards { + $self.forward_htlcs(&mut [forwards][..]); + } + if let Some(decode) = decode_update_add_htlcs { + $self.push_decode_update_add_htlcs(decode); + } + $self.finalize_claims(updates.finalized_claimed_htlcs); + for failure in updates.failed_htlcs.drain(..) { + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + $self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); + } } } } } @@ -3439,7 +3459,7 @@ macro_rules! handle_new_monitor_update { counterparty_node_id, in_flight_updates, idx, _internal_outer, { let _ = in_flight_updates.remove(idx); - if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 { + if in_flight_updates.is_empty() { handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan); } }) @@ -6397,9 +6417,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { - if chan.blocked_monitor_updates_pending() == 0 { - handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); - } + handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); } else { let update_actions = peer_state.monitor_update_blocked_actions .remove(&channel_id).unwrap_or(Vec::new()); @@ -7805,12 +7823,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) { if chan.is_awaiting_monitor_update() { - if chan.blocked_monitor_updates_pending() == 0 { - log_trace!(logger, "Channel is open and awaiting update, resuming it"); - handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); - } else { - log_trace!(logger, "Channel is open and awaiting update, leaving it blocked due to a blocked monitor update"); - } + handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); } else { log_trace!(logger, "Channel is open but not awaiting update"); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 273dd12e4b2..e06940a21d1 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -902,7 +902,7 @@ pub fn get_htlc_update_msgs(node: &Node, recipient: &PublicKey) -> msgs::Commitm assert_eq!(node_id, recipient); (*updates).clone() }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event {events:?}"), } } @@ -2344,9 +2344,9 @@ pub fn expect_payment_sent>( let expected_payment_hash = PaymentHash( bitcoin::hashes::sha256::Hash::hash(&expected_payment_preimage.0).to_byte_array()); if expect_per_path_claims { - assert!(events.len() > 1); + assert!(events.len() > 1, "{events:?}"); } else { - assert_eq!(events.len(), 1); + assert_eq!(events.len(), 1, "{events:?}"); } if expect_post_ev_mon_update { check_added_monitors(node, 1); @@ -2963,13 +2963,36 @@ impl<'a, 'b, 'c, 'd> ClaimAlongRouteArgs<'a, 'b, 'c, 'd> { } } +macro_rules! single_fulfill_commit_from_ev { + ($ev: expr) => { + match $ev { + &MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + ref commitment_signed, + }, + } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + ((update_fulfill_htlcs[0].clone(), commitment_signed.clone()), node_id.clone()) + }, + _ => panic!("Unexpected event"), + } + }; +} + pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { - let ClaimAlongRouteArgs { - origin_node, expected_paths, expected_extra_fees, expected_min_htlc_overpay, skip_last, - payment_preimage: our_payment_preimage, allow_1_msat_fee_overpay, custom_tlvs, - } = args; - let claim_event = expected_paths[0].last().unwrap().node.get_and_clear_pending_events(); - assert_eq!(claim_event.len(), 1); + let claim_event = args.expected_paths[0].last().unwrap().node.get_and_clear_pending_events(); + assert_eq!(claim_event.len(), 1, "{claim_event:?}"); #[allow(unused)] let mut fwd_amt_msat = 0; match claim_event[0] { @@ -2983,11 +3006,11 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { ref onion_fields, .. } => { - assert_eq!(preimage, our_payment_preimage); - assert_eq!(htlcs.len(), expected_paths.len()); // One per path. + assert_eq!(preimage, args.payment_preimage); + assert_eq!(htlcs.len(), args.expected_paths.len()); // One per path. assert_eq!(htlcs.iter().map(|h| h.value_msat).sum::(), amount_msat); - assert_eq!(onion_fields.as_ref().unwrap().custom_tlvs, custom_tlvs); - check_claimed_htlcs_match_route(origin_node, expected_paths, htlcs); + assert_eq!(onion_fields.as_ref().unwrap().custom_tlvs, args.custom_tlvs); + check_claimed_htlcs_match_route(args.origin_node, args.expected_paths, htlcs); fwd_amt_msat = amount_msat; }, Event::PaymentClaimed { @@ -3000,54 +3023,64 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { ref onion_fields, .. } => { - assert_eq!(&payment_hash.0, &Sha256::hash(&our_payment_preimage.0)[..]); - assert_eq!(htlcs.len(), expected_paths.len()); // One per path. + assert_eq!(&payment_hash.0, &Sha256::hash(&args.payment_preimage.0)[..]); + assert_eq!(htlcs.len(), args.expected_paths.len()); // One per path. assert_eq!(htlcs.iter().map(|h| h.value_msat).sum::(), amount_msat); - assert_eq!(onion_fields.as_ref().unwrap().custom_tlvs, custom_tlvs); - check_claimed_htlcs_match_route(origin_node, expected_paths, htlcs); + assert_eq!(onion_fields.as_ref().unwrap().custom_tlvs, args.custom_tlvs); + check_claimed_htlcs_match_route(args.origin_node, args.expected_paths, htlcs); fwd_amt_msat = amount_msat; } _ => panic!(), } - check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len()); + check_added_monitors(args.expected_paths[0].last().unwrap(), args.expected_paths.len()); - let mut expected_total_fee_msat = 0; - - macro_rules! msgs_from_ev { - ($ev: expr) => { - match $ev { - &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { - assert!(update_add_htlcs.is_empty()); - assert_eq!(update_fulfill_htlcs.len(), 1); - assert!(update_fail_htlcs.is_empty()); - assert!(update_fail_malformed_htlcs.is_empty()); - assert!(update_fee.is_none()); - ((update_fulfill_htlcs[0].clone(), commitment_signed.clone()), node_id.clone()) - }, - _ => panic!("Unexpected event"), - } - } - } - let mut per_path_msgs: Vec<((msgs::UpdateFulfillHTLC, msgs::CommitmentSigned), PublicKey)> = Vec::with_capacity(expected_paths.len()); - let mut events = expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), expected_paths.len()); + let mut per_path_msgs: Vec<( + (msgs::UpdateFulfillHTLC, msgs::CommitmentSigned), + PublicKey, + )> = Vec::with_capacity(args.expected_paths.len()); + let mut events = args.expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), args.expected_paths.len()); if events.len() == 1 { - per_path_msgs.push(msgs_from_ev!(&events[0])); + per_path_msgs.push(single_fulfill_commit_from_ev!(&events[0])); } else { - for expected_path in expected_paths.iter() { + for expected_path in args.expected_paths.iter() { // For MPP payments, we want the fulfill message from the payee to the penultimate hop in the // path. let penultimate_hop_node_id = expected_path.iter().rev().skip(1).next() .map(|n| n.node.get_our_node_id()) - .unwrap_or(origin_node.node.get_our_node_id()); + .unwrap_or(args.origin_node.node.get_our_node_id()); let ev = remove_first_msg_event_to_node(&penultimate_hop_node_id, &mut events); - per_path_msgs.push(msgs_from_ev!(&ev)); + per_path_msgs.push(single_fulfill_commit_from_ev!(&ev)); } } - for (i, (expected_route, (path_msgs, next_hop))) in expected_paths.iter().zip(per_path_msgs.drain(..)).enumerate() { + pass_claimed_payment_along_route_from_ev(fwd_amt_msat, per_path_msgs, args) +} + +pub fn pass_claimed_payment_along_route_from_ev( + each_htlc_claim_amt_msat: u64, + mut per_path_msgs: Vec<((msgs::UpdateFulfillHTLC, msgs::CommitmentSigned), PublicKey)>, + args: ClaimAlongRouteArgs, +) -> u64 { + let ClaimAlongRouteArgs { + origin_node, + expected_paths, + expected_extra_fees, + expected_min_htlc_overpay, + skip_last, + payment_preimage: our_payment_preimage, + allow_1_msat_fee_overpay, + .. + } = args; + + let mut fwd_amt_msat = each_htlc_claim_amt_msat; + let mut expected_total_fee_msat = 0; + + for (i, (expected_route, (path_msgs, next_hop))) in + expected_paths.iter().zip(per_path_msgs.drain(..)).enumerate() + { let mut next_msgs = Some(path_msgs); let mut expected_next_node = next_hop; @@ -3098,7 +3131,7 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { let new_next_msgs = if $new_msgs { let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - let (res, nexthop) = msgs_from_ev!(&events[0]); + let (res, nexthop) = single_fulfill_commit_from_ev!(&events[0]); expected_next_node = nexthop; Some(res) } else { diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index 281789067ea..3ee80c17413 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -1566,7 +1566,14 @@ impl Readable for Duration { fn read(r: &mut R) -> Result { let secs = Readable::read(r)?; let nanos = Readable::read(r)?; - Ok(Duration::new(secs, nanos)) + // Duration::new panics if the nanosecond part in excess of a second, added to the second + // part, overflows. To ensure this won't happen, we simply reject any case where there are + // nanoseconds in excess of a second, which is invalid anyway. + if nanos >= 1_000_000_000 { + Err(DecodeError::InvalidValue) + } else { + Ok(Duration::new(secs, nanos)) + } } }