From 50e1d15c23ebc174dd999ae51238171ec36f6e8c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 12 May 2025 00:37:05 +0000 Subject: [PATCH 01/21] Do not fail to load `ChannelManager` when we see claiming payments When we begin claiming a payment, we move the tracking of it from `claimable_payments` to `claiming_payments`. This ensures we only ever have one payment which is in the process of being claimed with a given payment hash at a time and lets us keep track of when all parts have been claimed with their `ChannelMonitor`s. However, on startup, we check that failing to move a payment from `claimable_payments` to `claiming_payments` implies that it is not present in `claiming_payments`. This is fine if the payment doesn't exist, but if the payment has already started being claimed, this will fail and we'll refuse to deserialize the `ChannelManager` (with a `debug_assert` failure in debug mode). Here we resolve this by checking if a payment is already being claimed before we attempt to initiate claiming and skip the failing check in that case. --- lightning/src/ln/channelmanager.rs | 16 ++++++++++++++++ lightning/src/ln/reload_tests.rs | 23 ++++++++++++++++------- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 33ad59a4139..12362878524 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -14237,6 +14237,22 @@ where if payment_claim.mpp_parts.is_empty() { return Err(DecodeError::InvalidValue); } + { + let payments = channel_manager.claimable_payments.lock().unwrap(); + if !payments.claimable_payments.contains_key(&payment_hash) { + if let Some(payment) = payments.pending_claiming_payments.get(&payment_hash) { + if payment.payment_id == payment_claim.claiming_payment.payment_id { + // If this payment already exists and was marked as + // being-claimed then the serialized state must contain all + // of the pending `ChannelMonitorUpdate`s required to get + // the preimage on disk in all MPP parts. Thus we can skip + // the replay below. + continue; + } + } + } + } + let mut channels_without_preimage = payment_claim.mpp_parts.iter() .map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.funding_txo, htlc_info.channel_id)) .collect::>(); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index eaeb3e7bac4..16904d85758 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -781,7 +781,7 @@ fn test_forwardable_regen() { claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); } -fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { +fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_restart: bool) { // Test what happens if a node receives an MPP payment, claims it, but crashes before // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still @@ -797,11 +797,11 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // definitely claimed. let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); - let persister; - let new_chain_monitor; + let (persist_d_1, persist_d_2); + let (chain_d_1, chain_d_2); let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); - let nodes_3_deserialized; + let (node_d_1, node_d_2); let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); @@ -876,7 +876,14 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { } // Now restart nodes[3]. - reload_node!(nodes[3], original_manager, &[&updated_monitor.0, &original_monitor.0], persister, new_chain_monitor, nodes_3_deserialized); + reload_node!(nodes[3], original_manager.clone(), &[&updated_monitor.0, &original_monitor.0], persist_d_1, chain_d_1, node_d_1); + + if double_restart { + // Previously, we had a bug where we'd fail to reload if we re-persist the `ChannelManager` + // without updating any `ChannelMonitor`s as we'd fail to double-initiate the claim replay. + // We test that here ensuring that we can reload again. + reload_node!(nodes[3], node_d_1.encode(), &[&updated_monitor.0, &original_monitor.0], persist_d_2, chain_d_2, node_d_2); + } // Until the startup background events are processed (in `get_and_clear_pending_events`, // below), the preimage is not copied to the non-persisted monitor... @@ -971,8 +978,10 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { #[test] fn test_partial_claim_before_restart() { - do_test_partial_claim_before_restart(false); - do_test_partial_claim_before_restart(true); + do_test_partial_claim_before_restart(false, false); + do_test_partial_claim_before_restart(false, true); + do_test_partial_claim_before_restart(true, false); + do_test_partial_claim_before_restart(true, true); } fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_htlc: bool, use_intercept: bool) { From 63f5d7733ef186bb89f96500d8d6a7e0948f0b91 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 2 Feb 2025 16:28:51 +0000 Subject: [PATCH 02/21] Optionally disable all state-based policy checks in test signer The signer we use in tests tracks the state of the channel and refuses to sign when the channel attempts an invalid state transition. In the next commit, however, we'll add an upgrade test which will fail these checks as the the state won't get copied from previous versions of LDK to this version. Thus, here, we add the ability to disable all state-based checks in the signer. --- fuzz/src/chanmon_consistency.rs | 4 +- fuzz/src/full_stack.rs | 3 +- lightning/src/util/test_channel_signer.rs | 88 ++++++++++++++--------- lightning/src/util/test_utils.rs | 14 ++-- 4 files changed, 68 insertions(+), 41 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 73e4f88f1f3..1f4e7c24152 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -382,7 +382,7 @@ impl SignerProvider for KeyProvider { channel_keys_id, ); let revoked_commitment = self.make_enforcement_state_cell(keys.commitment_seed); - TestChannelSigner::new_with_revoked(keys, revoked_commitment, false) + TestChannelSigner::new_with_revoked(keys, revoked_commitment, false, false) } fn read_chan_signer(&self, buffer: &[u8]) -> Result { @@ -391,7 +391,7 @@ impl SignerProvider for KeyProvider { let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?; let state = self.make_enforcement_state_cell(inner.commitment_seed); - Ok(TestChannelSigner::new_with_revoked(inner, state, false)) + Ok(TestChannelSigner::new_with_revoked(inner, state, false, false)) } fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result { diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index c1f2dd11b1e..143f69f160e 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -522,6 +522,7 @@ impl SignerProvider for KeyProvider { }, state, false, + false, ) } @@ -529,7 +530,7 @@ impl SignerProvider for KeyProvider { let inner: InMemorySigner = ReadableArgs::read(&mut data, self)?; let state = Arc::new(Mutex::new(EnforcementState::new())); - Ok(TestChannelSigner::new_with_revoked(inner, state, false)) + Ok(TestChannelSigner::new_with_revoked(inner, state, false, false)) } fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result { diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs index f3ef4dc1557..2e1289b2eb0 100644 --- a/lightning/src/util/test_channel_signer.rs +++ b/lightning/src/util/test_channel_signer.rs @@ -71,6 +71,7 @@ pub struct TestChannelSigner { /// Channel state used for policy enforcement pub state: Arc>, pub disable_revocation_policy_check: bool, + pub disable_all_state_policy_checks: bool, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -124,6 +125,7 @@ impl TestChannelSigner { inner, state, disable_revocation_policy_check: false, + disable_all_state_policy_checks: false, } } @@ -132,12 +134,11 @@ impl TestChannelSigner { /// Since there are multiple copies of this struct for each channel, some coordination is needed /// so that all copies are aware of enforcement state. A pointer to this state is provided /// here, usually by an implementation of KeysInterface. - pub fn new_with_revoked(inner: InMemorySigner, state: Arc>, disable_revocation_policy_check: bool) -> Self { - Self { - inner, - state, - disable_revocation_policy_check, - } + pub fn new_with_revoked( + inner: InMemorySigner, state: Arc>, + disable_revocation_policy_check: bool, disable_all_state_policy_checks: bool, + ) -> Self { + Self { inner, state, disable_revocation_policy_check, disable_all_state_policy_checks } } pub fn channel_type_features(&self) -> &ChannelTypeFeatures { self.inner.channel_type_features().unwrap() } @@ -177,19 +178,26 @@ impl ChannelSigner for TestChannelSigner { if !self.is_signer_available(SignerOp::ReleaseCommitmentSecret) { return Err(()); } - { - let mut state = self.state.lock().unwrap(); + let mut state = self.state.lock().unwrap(); + if !self.disable_all_state_policy_checks { assert!(idx == state.last_holder_revoked_commitment || idx == state.last_holder_revoked_commitment - 1, "can only revoke the current or next unrevoked commitment - trying {}, last revoked {}", idx, state.last_holder_revoked_commitment); assert!(idx > state.last_holder_commitment, "cannot revoke the last holder commitment - attempted to revoke {} last commitment {}", idx, state.last_holder_commitment); - state.last_holder_revoked_commitment = idx; } + state.last_holder_revoked_commitment = idx; self.inner.release_commitment_secret(idx) } fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction, _outbound_htlc_preimages: Vec) -> Result<(), ()> { let mut state = self.state.lock().unwrap(); let idx = holder_tx.commitment_number(); - assert!(idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, "expecting to validate the current or next holder commitment - trying {}, current {}", idx, state.last_holder_commitment); + if !self.disable_all_state_policy_checks { + assert!( + idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, + "expecting to validate the current or next holder commitment - trying {}, current {}", + idx, + state.last_holder_commitment + ); + } state.last_holder_commitment = idx; Ok(()) } @@ -200,7 +208,9 @@ impl ChannelSigner for TestChannelSigner { return Err(()); } let mut state = self.state.lock().unwrap(); - assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment); + if !self.disable_all_state_policy_checks { + assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment); + } state.last_counterparty_revoked_commitment = idx; Ok(()) } @@ -218,22 +228,28 @@ impl EcdsaChannelSigner for TestChannelSigner { fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction, inbound_htlc_preimages: Vec, outbound_htlc_preimages: Vec, secp_ctx: &Secp256k1) -> Result<(Signature, Vec), ()> { self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx); - { - #[cfg(test)] - if !self.is_signer_available(SignerOp::SignCounterpartyCommitment) { - return Err(()); - } - let mut state = self.state.lock().unwrap(); - let actual_commitment_number = commitment_tx.commitment_number(); - let last_commitment_number = state.last_counterparty_commitment; + #[cfg(test)] + if !self.is_signer_available(SignerOp::SignCounterpartyCommitment) { + return Err(()); + } + let mut state = self.state.lock().unwrap(); + let actual_commitment_number = commitment_tx.commitment_number(); + let last_commitment_number = state.last_counterparty_commitment; + if !self.disable_all_state_policy_checks { // These commitment numbers are backwards counting. We expect either the same as the previously encountered, // or the next one. assert!(last_commitment_number == actual_commitment_number || last_commitment_number - 1 == actual_commitment_number, "{} doesn't come after {}", actual_commitment_number, last_commitment_number); // Ensure that the counterparty doesn't get more than two broadcastable commitments - // the last and the one we are trying to sign - assert!(actual_commitment_number >= state.last_counterparty_revoked_commitment - 2, "cannot sign a commitment if second to last wasn't revoked - signing {} revoked {}", actual_commitment_number, state.last_counterparty_revoked_commitment); - state.last_counterparty_commitment = cmp::min(last_commitment_number, actual_commitment_number) + assert!( + actual_commitment_number >= state.last_counterparty_revoked_commitment - 2, + "cannot sign a commitment if second to last wasn't revoked - signing {} revoked {}", + actual_commitment_number, + state.last_counterparty_revoked_commitment + ); } + state.last_counterparty_commitment = + cmp::min(last_commitment_number, actual_commitment_number); Ok(self.inner.sign_counterparty_commitment(commitment_tx, inbound_htlc_preimages, outbound_htlc_preimages, secp_ctx).unwrap()) } @@ -244,12 +260,14 @@ impl EcdsaChannelSigner for TestChannelSigner { return Err(()); } let trusted_tx = self.verify_holder_commitment_tx(commitment_tx, secp_ctx); - let state = self.state.lock().unwrap(); - let commitment_number = trusted_tx.commitment_number(); - if state.last_holder_revoked_commitment - 1 != commitment_number && state.last_holder_revoked_commitment - 2 != commitment_number { - if !self.disable_revocation_policy_check { - panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}", - state.last_holder_revoked_commitment, commitment_number, self.inner.commitment_seed[0]) + if !self.disable_all_state_policy_checks { + let state = self.state.lock().unwrap(); + let commitment_number = trusted_tx.commitment_number(); + if state.last_holder_revoked_commitment - 1 != commitment_number && state.last_holder_revoked_commitment - 2 != commitment_number { + if !self.disable_revocation_policy_check { + panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}", + state.last_holder_revoked_commitment, commitment_number, self.inner.commitment_seed[0]) + } } } Ok(self.inner.sign_holder_commitment(commitment_tx, secp_ctx).unwrap()) @@ -284,13 +302,15 @@ impl EcdsaChannelSigner for TestChannelSigner { if !self.is_signer_available(SignerOp::SignHolderHtlcTransaction) { return Err(()); } - let state = self.state.lock().unwrap(); - if state.last_holder_revoked_commitment - 1 != htlc_descriptor.per_commitment_number && - state.last_holder_revoked_commitment - 2 != htlc_descriptor.per_commitment_number - { - if !self.disable_revocation_policy_check { - panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}", - state.last_holder_revoked_commitment, htlc_descriptor.per_commitment_number, self.inner.commitment_seed[0]) + if !self.disable_all_state_policy_checks { + let state = self.state.lock().unwrap(); + if state.last_holder_revoked_commitment - 1 != htlc_descriptor.per_commitment_number && + state.last_holder_revoked_commitment - 2 != htlc_descriptor.per_commitment_number + { + if !self.disable_revocation_policy_check { + panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}", + state.last_holder_revoked_commitment, htlc_descriptor.per_commitment_number, self.inner.commitment_seed[0]) + } } } assert_eq!(htlc_tx.input[input], htlc_descriptor.unsigned_tx_input()); diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 5bd5acaf176..317b46a02ef 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -327,7 +327,8 @@ impl SignerProvider for OnlyReadsKeysInterface { Ok(TestChannelSigner::new_with_revoked( inner, state, - false + false, + false, )) } @@ -1263,7 +1264,8 @@ pub struct TestKeysInterface { pub backing: sign::PhantomKeysManager, pub override_random_bytes: Mutex>, pub disable_revocation_policy_check: bool, - enforcement_states: Mutex>>>, + pub disable_all_state_policy_checks: bool, + enforcement_states: Mutex>>>, expectations: Mutex>>, pub unavailable_signers_ops: Mutex>>, pub next_signer_disabled_ops: Mutex>, @@ -1319,7 +1321,9 @@ impl SignerProvider for TestKeysInterface { fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> TestChannelSigner { let keys = self.backing.derive_channel_signer(channel_value_satoshis, channel_keys_id); let state = self.make_enforcement_state_cell(keys.commitment_seed); - let signer = TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check); + let rev_checks = self.disable_revocation_policy_check; + let state_checks = self.disable_all_state_policy_checks; + let signer = TestChannelSigner::new_with_revoked(keys, state, rev_checks, state_checks); #[cfg(test)] if let Some(ops) = self.unavailable_signers_ops.lock().unwrap().get(&channel_keys_id) { for &op in ops { @@ -1342,7 +1346,8 @@ impl SignerProvider for TestKeysInterface { Ok(TestChannelSigner::new_with_revoked( inner, state, - self.disable_revocation_policy_check + self.disable_revocation_policy_check, + self.disable_all_state_policy_checks, )) } @@ -1366,6 +1371,7 @@ impl TestKeysInterface { backing: sign::PhantomKeysManager::new(seed, now.as_secs(), now.subsec_nanos(), seed), override_random_bytes: Mutex::new(None), disable_revocation_policy_check: false, + disable_all_state_policy_checks: false, enforcement_states: Mutex::new(new_hash_map()), expectations: Mutex::new(None), unavailable_signers_ops: Mutex::new(new_hash_map()), From 87bb72d94b8689240a6c2690bbe56747950418dc Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 2 Feb 2025 16:27:25 +0000 Subject: [PATCH 03/21] Add a simple test of upgrading from LDK 0.1 One major hole in our test coverage historically has been tests covering upgrades or downgrades across LDK versions. Luckily, these aren't particularly hard to write as cargo lets us depend on previous versions of the `lightning` crate directly, which we can use in tests. Here we add a simple initial test of upgrading from LDK 0.1 while there's a pending payment to be claimed. --- Cargo.toml | 1 + ci/ci-tests.sh | 6 ++ lightning-tests/Cargo.toml | 21 ++++++ lightning-tests/src/lib.rs | 5 ++ .../src/upgrade_downgrade_tests.rs | 65 +++++++++++++++++++ lightning/src/ln/functional_test_utils.rs | 2 +- lightning/src/onion_message/messenger.rs | 4 +- 7 files changed, 101 insertions(+), 3 deletions(-) create mode 100644 lightning-tests/Cargo.toml create mode 100644 lightning-tests/src/lib.rs create mode 100644 lightning-tests/src/upgrade_downgrade_tests.rs diff --git a/Cargo.toml b/Cargo.toml index dc3eb92c7e2..2f73851d114 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ members = [ exclude = [ "lightning-transaction-sync", + "lightning-tests", "no-std-check", "msrv-no-dev-deps-check", "bench", diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 1d11a5fd624..3be6afde89a 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -57,6 +57,12 @@ for DIR in "${WORKSPACE_MEMBERS[@]}"; do cargo doc -p "$DIR" --document-private-items done +echo -e "\n\nTesting upgrade from prior versions of LDK" +pushd lightning-tests +[ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p regex --precise "1.9.6" --verbose +cargo test +popd + echo -e "\n\nChecking and testing Block Sync Clients with features" cargo test -p lightning-block-sync --verbose --color always --features rest-client diff --git a/lightning-tests/Cargo.toml b/lightning-tests/Cargo.toml new file mode 100644 index 00000000000..75c68e03f5a --- /dev/null +++ b/lightning-tests/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "lightning-tests" +version = "0.0.1" +authors = ["Matt Corallo"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/lightningdevkit/rust-lightning/" +description = "Tests for LDK crates" +edition = "2021" + +[features] + +[dependencies] +lightning-types = { path = "../lightning-types", features = ["_test_utils"] } +lightning-invoice = { path = "../lightning-invoice", default-features = false } +lightning-macros = { path = "../lightning-macros" } +lightning = { path = "../lightning", features = ["_test_utils"] } +lightning_0_1 = { package = "lightning", version = "0.1.1", features = ["_test_utils"] } + +bitcoin = { version = "0.32.2", default-features = false } + +[dev-dependencies] diff --git a/lightning-tests/src/lib.rs b/lightning-tests/src/lib.rs new file mode 100644 index 00000000000..c028193d692 --- /dev/null +++ b/lightning-tests/src/lib.rs @@ -0,0 +1,5 @@ +#[cfg_attr(test, macro_use)] +extern crate lightning; + +#[cfg(all(test, not(taproot)))] +pub mod upgrade_downgrade_tests; diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs new file mode 100644 index 00000000000..0a989553752 --- /dev/null +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -0,0 +1,65 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Tests which test upgrading from previous versions of LDK or downgrading to previous versions of +//! LDK. + +use lightning_0_1::get_monitor as get_monitor_0_1; +use lightning_0_1::ln::functional_test_utils as lightning_0_1_utils; +use lightning_0_1::util::ser::Writeable; + +use lightning::ln::functional_test_utils::*; + +use lightning_types::payment::PaymentPreimage; + +#[test] +fn simple_upgrade() { + // Tests a simple case of upgrading from LDK 0.1 with a pending payment + let (node_a_ser, node_b_ser, mon_a_ser, mon_b_ser, preimage); + { + let chanmon_cfgs = lightning_0_1_utils::create_chanmon_cfgs(2); + let node_cfgs = lightning_0_1_utils::create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = lightning_0_1_utils::create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = lightning_0_1_utils::create_network(2, &node_cfgs, &node_chanmgrs); + + let chan_id = lightning_0_1_utils::create_announced_chan_between_nodes(&nodes, 0, 1).2; + + let payment_preimage = + lightning_0_1_utils::route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + preimage = PaymentPreimage(payment_preimage.0 .0); + + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + mon_a_ser = get_monitor_0_1!(nodes[0], chan_id).encode(); + mon_b_ser = get_monitor_0_1!(nodes[1], chan_id).encode(); + } + + // Create a dummy node to reload over with the 0.1 state + + let mut chanmon_cfgs = create_chanmon_cfgs(2); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let (persister_a, persister_b, chain_mon_a, chain_mon_b); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let (node_a, node_b); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let config = test_default_channel_config(); + let a_mons = &[&mon_a_ser[..]]; + reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + reload_node!(nodes[1], config, &node_b_ser, &[&mon_b_ser], persister_b, chain_mon_b, node_b); + + reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + + claim_payment(&nodes[0], &[&nodes[1]], preimage); +} diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index be77547b79c..04295073861 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1196,7 +1196,7 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User node_deserialized } -#[cfg(test)] +#[macro_export] macro_rules! reload_node { ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { let chanman_encoded = $chanman_encoded; diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index f076e6a9da4..c326cfca804 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -1172,8 +1172,8 @@ where } } - #[cfg(test)] - pub(crate) fn set_offers_handler(&mut self, offers_handler: OMH) { + #[cfg(any(test, feature = "_test_utils"))] + pub fn set_offers_handler(&mut self, offers_handler: OMH) { self.offers_handler = offers_handler; } From bc9ffe44332c1636c11ea3c0f65aeafbfd40ccb7 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 21 May 2025 15:42:16 +0000 Subject: [PATCH 04/21] Read `ChannelManager` even if we have no-peer post-update actions In 93b4479e472e6767af5df90fecdcdfb79074e260 we fixed an issue which could cause a `ChannelMonitorUpdate` to get marked as blocked on itself, leading to an eventual force-closure. One potential side-effect of that issue, however, is that any further `ChannelMonitorUpdate`s to the same channel while it is blocked will not have any post-update actions processed (as there is a pending blocked `ChannelMonitorUpdate` sitting in the channel). This can leave a dangling `MonitorUpdateCompletionAction` sitting around even after the channel is closed. In 0.1, because `ChannelMonitorUpdate`s to closed channels were finally fully tracked, we started enforcing that any post-update completion action we had on startup corresponded to a peer entry, while at the same time no longer creating peer entries just because we had serialized one in the data we were loading (only creating them if we had channel(s) or a `ChannelMonitor`). This can cause some `ChannelManager` to no longer deserialize on 0.1 as we might have a left-over dangling `MonitorUpdateCompletionAction` and will no longer always have a peer entry just because of it. Here we fix this issue by specifically checking for dangling `MonitorUpdateCompletionAction::PaymentClaim` entries and dropping them if there is no corresponding channel or peer state entry. We only check for `PaymentClaimed` actions rather than allowing for any dangling actions as 93b4479e472e6767af5df90fecdcdfb79074e260 was only triggerable with MPP claims, so dangling `MonitorUpdateCompletionAction`s for forwarded payments should be exceedingly rare. This also adds an upgrade test to test a slightly convoluted version of this scenario. Trivial conflicts addressed in: * lightning/src/ln/channelmanager.rs --- lightning-tests/Cargo.toml | 1 + .../src/upgrade_downgrade_tests.rs | 152 +++++++++++++++++- lightning/src/ln/channelmanager.rs | 34 +++- 3 files changed, 183 insertions(+), 4 deletions(-) diff --git a/lightning-tests/Cargo.toml b/lightning-tests/Cargo.toml index 75c68e03f5a..23c81fae4a3 100644 --- a/lightning-tests/Cargo.toml +++ b/lightning-tests/Cargo.toml @@ -15,6 +15,7 @@ lightning-invoice = { path = "../lightning-invoice", default-features = false } lightning-macros = { path = "../lightning-macros" } lightning = { path = "../lightning", features = ["_test_utils"] } lightning_0_1 = { package = "lightning", version = "0.1.1", features = ["_test_utils"] } +lightning_0_0_125 = { package = "lightning", version = "0.0.125", features = ["_test_utils"] } bitcoin = { version = "0.32.2", default-features = false } diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index 0a989553752..2b57cd23a9a 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -12,7 +12,21 @@ use lightning_0_1::get_monitor as get_monitor_0_1; use lightning_0_1::ln::functional_test_utils as lightning_0_1_utils; -use lightning_0_1::util::ser::Writeable; +use lightning_0_1::util::ser::Writeable as _; + +use lightning_0_0_125::chain::ChannelMonitorUpdateStatus as ChannelMonitorUpdateStatus_0_0_125; +use lightning_0_0_125::check_added_monitors as check_added_monitors_0_0_125; +use lightning_0_0_125::events::ClosureReason as ClosureReason_0_0_125; +use lightning_0_0_125::expect_payment_claimed as expect_payment_claimed_0_0_125; +use lightning_0_0_125::get_htlc_update_msgs as get_htlc_update_msgs_0_0_125; +use lightning_0_0_125::get_monitor as get_monitor_0_0_125; +use lightning_0_0_125::get_revoke_commit_msgs as get_revoke_commit_msgs_0_0_125; +use lightning_0_0_125::ln::channelmanager::PaymentId as PaymentId_0_0_125; +use lightning_0_0_125::ln::channelmanager::RecipientOnionFields as RecipientOnionFields_0_0_125; +use lightning_0_0_125::ln::functional_test_utils as lightning_0_0_125_utils; +use lightning_0_0_125::ln::msgs::ChannelMessageHandler as _; +use lightning_0_0_125::routing::router as router_0_0_125; +use lightning_0_0_125::util::ser::Writeable as _; use lightning::ln::functional_test_utils::*; @@ -63,3 +77,139 @@ fn simple_upgrade() { claim_payment(&nodes[0], &[&nodes[1]], preimage); } + +#[test] +fn test_125_dangling_post_update_actions() { + // Tests a failure of upgrading from 0.0.125 to 0.1 when there's a dangling + // `MonitorUpdateCompletionAction` due to the bug fixed in + // 93b4479e472e6767af5df90fecdcdfb79074e260. + let (node_d_ser, mon_ser); + { + // First, we get RAA-source monitor updates held by using async persistence (note that this + // issue was first identified as a consequence of the bug fixed in + // 93b4479e472e6767af5df90fecdcdfb79074e260 but in order to replicate that bug we need a + // complicated multi-threaded race that is not deterministic, thus we "cheat" here by using + // async persistence). We do this by simply claiming an MPP payment and not completing the + // second channel's `ChannelMonitorUpdate`, blocking RAA `ChannelMonitorUpdate`s from the + // first (which is ultimately a very similar bug to the one fixed in 93b4479e472e6767af5df). + // + // Then, we claim a second payment on the channel, which ultimately doesn't have its + // `ChannelMonitorUpdate` completion handled due to the presence of the blocked + // `ChannelMonitorUpdate`. The claim also generates a post-update completion action, but + // the `ChannelMonitorUpdate` isn't queued due to the RAA-update block. + let chanmon_cfgs = lightning_0_0_125_utils::create_chanmon_cfgs(4); + let node_cfgs = lightning_0_0_125_utils::create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_0_125_utils::create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = lightning_0_0_125_utils::create_network(4, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + + lightning_0_0_125_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 100_000, 0, + ); + lightning_0_0_125_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 2, 100_000, 0, + ); + let chan_id_1_3 = lightning_0_0_125_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 3, 100_000, 0, + ) + .2; + let chan_id_2_3 = lightning_0_0_125_utils::create_announced_chan_between_nodes_with_value( + &nodes, 2, 3, 100_000, 0, + ) + .2; + + let (preimage, hash, secret) = + lightning_0_0_125_utils::get_payment_preimage_hash(&nodes[3], Some(15_000_000), None); + + let pay_params = router_0_0_125::PaymentParameters::from_node_id( + node_d_id, + lightning_0_0_125_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[3].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_0_125::RouteParameters::from_payment_params_and_value(pay_params, 15_000_000); + let route = lightning_0_0_125_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_0_125::secret_only(secret); + let id = PaymentId_0_0_125(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + check_added_monitors_0_0_125!(nodes[0], 2); + let paths = &[&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]]]; + lightning_0_0_125_utils::pass_along_route(&nodes[0], paths, 15_000_000, hash, secret); + + let preimage_2 = lightning_0_0_125_utils::route_payment(&nodes[1], &[&nodes[3]], 100_000).0; + + chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus_0_0_125::InProgress); + chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus_0_0_125::InProgress); + nodes[3].node.claim_funds(preimage); + check_added_monitors_0_0_125!(nodes[3], 2); + + let (outpoint, update_id, _) = { + let latest_monitors = nodes[3].chain_monitor.latest_monitor_update_id.lock().unwrap(); + latest_monitors.get(&chan_id_1_3).unwrap().clone() + }; + nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, update_id).unwrap(); + expect_payment_claimed_0_0_125!(nodes[3], hash, 15_000_000); + + let ds_fulfill = get_htlc_update_msgs_0_0_125!(nodes[3], node_b_id); + // Due to an unrelated test bug in 0.0.125, we have to leave the `ChannelMonitorUpdate` for + // the previous node un-completed or we will panic when dropping the `Node`. + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus_0_0_125::InProgress); + nodes[1].node.handle_update_fulfill_htlc(&node_d_id, &ds_fulfill.update_fulfill_htlcs[0]); + check_added_monitors_0_0_125!(nodes[1], 1); + + nodes[1].node.handle_commitment_signed(&node_d_id, &ds_fulfill.commitment_signed); + check_added_monitors_0_0_125!(nodes[1], 1); + + // The `ChannelMonitorUpdate` generated by the RAA from node B to node D will be blocked. + let (bs_raa, _) = get_revoke_commit_msgs_0_0_125!(nodes[1], node_d_id); + nodes[3].node.handle_revoke_and_ack(&node_b_id, &bs_raa); + check_added_monitors_0_0_125!(nodes[3], 0); + + // Now that there is a blocked update in the B <-> D channel, we can claim the second + // payment across it, which, while it will generate a `ChannelMonitorUpdate`, will not + // complete its post-update actions. + nodes[3].node.claim_funds(preimage_2); + check_added_monitors_0_0_125!(nodes[3], 1); + + // Finally, we set up the failure by force-closing the channel in question, ensuring that + // 0.1 will not create a per-peer state for node B. + let err = "Force Closing Channel".to_owned(); + nodes[3].node.force_close_without_broadcasting_txn(&chan_id_1_3, &node_b_id, err).unwrap(); + let reason = + ClosureReason_0_0_125::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + let peers = &[node_b_id]; + lightning_0_0_125_utils::check_closed_event(&nodes[3], 1, reason, false, peers, 100_000); + lightning_0_0_125_utils::check_closed_broadcast(&nodes[3], 1, true); + check_added_monitors_0_0_125!(nodes[3], 1); + + node_d_ser = nodes[3].node.encode(); + mon_ser = get_monitor_0_0_125!(nodes[3], chan_id_2_3).encode(); + } + + // Create a dummy node to reload over with the 0.0.125 state + + let mut chanmon_cfgs = create_chanmon_cfgs(4); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[3].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let (persister, chain_mon); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let node; + let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + // Finally, reload the node in the latest LDK. This previously failed. + let config = test_default_channel_config(); + reload_node!(nodes[3], config, &node_d_ser, &[&mon_ser], persister, chain_mon, node); +} diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 12362878524..b973432056a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -13582,9 +13582,17 @@ where $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr ) => { { let mut max_in_flight_update_id = 0; + let starting_len = $chan_in_flight_upds.len(); $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id()); + if $chan_in_flight_upds.len() < starting_len { + log_debug!( + $logger, + "{} ChannelMonitorUpdates completed after ChannelManager was last serialized", + starting_len - $chan_in_flight_upds.len() + ); + } for update in $chan_in_flight_upds.iter() { - log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", + log_debug!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", update.update_id, $channel_info_log, &$monitor.channel_id()); max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id); pending_background_events.push( @@ -14148,11 +14156,31 @@ where debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue"); } } + // Note that we may have a post-update action for a channel that has no pending + // `ChannelMonitorUpdate`s, but unlike the no-peer-state case, it may simply be + // because we had a `ChannelMonitorUpdate` complete after the last time this + // `ChannelManager` was serialized. In that case, we'll run the post-update + // actions as soon as we get going. } peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions; } else { - log_error!(WithContext::from(&args.logger, Some(node_id), None, None), "Got blocked actions without a per-peer-state for {}", node_id); - return Err(DecodeError::InvalidValue); + for actions in monitor_update_blocked_actions.values() { + for action in actions.iter() { + if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) { + // If there are no state for this channel but we have pending + // post-update actions, its possible that one was left over from pre-0.1 + // payment claims where MPP claims led to a channel blocked on itself + // and later `ChannelMonitorUpdate`s didn't get their post-update + // actions run. + // This should only have happened for `PaymentClaimed` post-update actions, + // which we ignore here. + } else { + let logger = WithContext::from(&args.logger, Some(node_id), None, None); + log_error!(logger, "Got blocked actions {:?} without a per-peer-state for {}", monitor_update_blocked_actions, node_id); + return Err(DecodeError::InvalidValue); + } + } + } } } From edcd3761ae4edb0b1e5ce568667c07a3da98c776 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Fri, 23 May 2025 17:54:55 +0000 Subject: [PATCH 05/21] Do not dip into the funder's reserve to cover the two anchors At all times, the funder's balance should cover the commitment transaction fee, any non-zero-value anchors, and the fundee-selected channel reserve. Prior to this commit, we would allow the funder to dip into its reserve to pay for the two 330sat anchors. LDK sets reserves to at least 1000sat, so two 330 sat anchors would never overdraw this reserve. We now prevent any such dips, and ensure that the funder can pay for the complete sum of the transaction fee, the anchors, and the reserve. Substantial conflicts resulted in the `channel.rs` parts of this patch being rewriten. The `functional_tests.rs` changes also conflicted but were re-applied to the proper file. --- lightning/src/ln/channel.rs | 14 ++++++-- lightning/src/ln/functional_tests.rs | 51 ++++++++++++++++++++++------ 2 files changed, 52 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index c15a4bee643..96492ef97f2 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5039,7 +5039,12 @@ impl Channel where if update_fee { debug_assert!(!self.context.is_outbound()); let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000; - if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat { + let total_anchor_sats = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 + } else { + 0 + }; + if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + total_anchor_sats * 1000 + counterparty_reserve_we_require_msat { return Err(ChannelError::close("Funding remote cannot afford proposed new fee".to_owned())); } } @@ -5772,7 +5777,12 @@ impl Channel where let commitment_stats = self.context.build_commitment_transaction(self.holder_commitment_point.transaction_number(), &keys, true, true, logger); let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + htlc_stats.on_holder_tx_outbound_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000; let holder_balance_msat = commitment_stats.local_balance_msat - htlc_stats.outbound_holding_cell_msat; - if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { + let total_anchor_sats = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 + } else { + 0 + }; + if holder_balance_msat < buffer_fee_msat + total_anchor_sats * 1000 + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { //TODO: auto-close after a number of failures? log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw); return None; diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index bdb1621771f..fc8bf38519d 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -24,7 +24,7 @@ use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; -use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; +use crate::ln::channel::{ANCHOR_OUTPUT_VALUE_SATOSHI, DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; use crate::ln::{chan_utils, onion_utils}; use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment}; use crate::routing::gossip::{NetworkGraph, NetworkUpdate}; @@ -673,28 +673,49 @@ fn test_update_fee_vanilla() { check_added_monitors!(nodes[1], 1); } -#[test] -fn test_update_fee_that_funder_cannot_afford() { +pub fn do_test_update_fee_that_funder_cannot_afford(channel_type_features: ChannelTypeFeatures) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + + let mut default_config = test_default_channel_config(); + if channel_type_features == ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() { + default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + // this setting is also needed to create an anchor channel + default_config.manually_accept_inbound_channels = true; + } + + let node_chanmgrs = create_node_chanmgrs( + 2, + &node_cfgs, + &[Some(default_config.clone()), Some(default_config.clone())], + ); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_value = 5000; let push_sats = 700; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000); let channel_id = chan.2; let secp_ctx = Secp256k1::new(); - let default_config = UserConfig::default(); let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + let (anchor_outputs_value_sats, outputs_num_no_htlcs) = + if channel_type_features.supports_anchors_zero_fee_htlc_tx() { + (ANCHOR_OUTPUT_VALUE_SATOSHI * 2, 4) + } else { + (0, 2) + }; // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we // calculate two different feerates here - the expected local limit as well as the expected // remote limit. - let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32; - let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32; + let feerate = + ((channel_value - bs_channel_reserve_sats - push_sats - anchor_outputs_value_sats) * 1000 + / (commitment_tx_base_weight(&channel_type_features) + + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32; + let non_buffer_feerate = + ((channel_value - bs_channel_reserve_sats - push_sats - anchor_outputs_value_sats) * 1000 + / commitment_tx_base_weight(&channel_type_features)) as u32; { let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); *feerate_lock = feerate; @@ -711,8 +732,8 @@ fn test_update_fee_that_funder_cannot_afford() { { let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone(); - //We made sure neither party's funds are below the dust limit and there are no HTLCs here - assert_eq!(commitment_tx.output.len(), 2); + // We made sure neither party's funds are below the dust limit and there are no HTLCs here + assert_eq!(commitment_tx.output.len(), outputs_num_no_htlcs); let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000; let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value.to_sat()); actual_fee = channel_value - actual_fee; @@ -771,7 +792,7 @@ fn test_update_fee_that_funder_cannot_afford() { let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( INITIAL_COMMITMENT_NUMBER - 1, push_sats, - channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000, + channel_value - push_sats - anchor_outputs_value_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000, local_funding, remote_funding, commit_tx_keys.clone(), non_buffer_feerate + 4, @@ -808,6 +829,14 @@ fn test_update_fee_that_funder_cannot_afford() { [nodes[0].node.get_our_node_id()], channel_value); } +#[test] +pub fn test_update_fee_that_funder_cannot_afford() { + do_test_update_fee_that_funder_cannot_afford(ChannelTypeFeatures::only_static_remote_key()); + do_test_update_fee_that_funder_cannot_afford( + ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), + ); +} + #[test] fn test_update_fee_with_fundee_update_add_htlc() { let chanmon_cfgs = create_chanmon_cfgs(2); From 96e0f34bb4b3c61ab958bc554937077727c6300b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 23 May 2025 20:26:56 +0000 Subject: [PATCH 06/21] Bump the `lightning` crate version to 0.1.4 --- lightning/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index aaf6f60023c..d0c7336653a 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning" -version = "0.1.3" +version = "0.1.4" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" From 1fcaca29e20c5d17ea4e8cf231a39c295dfbb8de Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 23 May 2025 20:27:05 +0000 Subject: [PATCH 07/21] Add release notes for LDK 0.1.4 --- CHANGELOG.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 80652dcb743..7c68aba4c31 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,28 @@ +# 0.1.4 - May 23, 2025 - "Careful Validation of Bogus States" + +## Bug Fixes + * In cases where using synchronous persistence with higher latency than the + latency to communicate with peers caused issues fixed in 0.1.2, + `ChannelManager`s may have been left in a state which LDK 0.1.2 and later + would refuse to deserialize. This has been fixed and nodes which experienced + this issue prior to 0.1.2 should now deserialize fine (#3790). + * In some cases, when using synchronous persistence with higher latency than + the latency to communicate with peers, when receiving an MPP payment with + multiple parts received over the same channel, a channel could hang and not + make progress, eventually leading to a force-closure due to timed-out HTLCs. + This has now been fixed (#3680). + +## Security +0.1.4 fixes a funds-theft vulnerability in exceedingly rare cases. + * If an LDK-based node funds an anchor channel to a malicious peer, and that + peer sets the channel reserve on the LDK-based node to zero, the LDK-node + could overdraw its total balance upon increasing the feerate of the + commitment transaction. If the malicious peer forwards HTLCs through the + LDK-based node, this could leave the LDK-based node with no valid commitment + transaction to broadcast to claim its part of the forwarded HTLC. The + counterparty would have to forfeit their reserve value (#3796). + + # 0.1.3 - Apr 30, 2025 - "Routing Unicode in 2025" ## Bug Fixes From 502b9b5aff65670b6c058330eef14b15d52ceed9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 27 Mar 2025 19:45:24 +0000 Subject: [PATCH 08/21] Only run aggressive `test_node_counter_consistency` in tests `test_node_counter_consistency` can make gossip operations *really* slow. This makes it a pretty bad idea in a general node just running in debug mode. It also makes our `lightning-rapid-gossip-sync` real-world test painfully slow. Thus, here, we make `test_node_counter_consistency` only actually run in the `lightning`-crate tests, rather than always with `debug_assertions`. --- lightning/src/routing/gossip.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index fc097d5f915..7eb82722134 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -1779,7 +1779,7 @@ where } fn test_node_counter_consistency(&self) { - #[cfg(debug_assertions)] + #[cfg(test)] { let channels = self.channels.read().unwrap(); let nodes = self.nodes.read().unwrap(); From b1e9921c5750e8c69e2c8ffe33a6d6727631931c Mon Sep 17 00:00:00 2001 From: Fuyin Date: Fri, 13 Jun 2025 21:11:05 +0800 Subject: [PATCH 09/21] Fix `update_id` gap during `force_shutdown` When a channel is force-closed, there might be blocked monitor updates not yet applied. But `latest_monitor_update_id` has been incremented and assigned to these updates. This results in a panic when trying to apply the `ChannelForceClosed` update. Use the unblocked update id instead. Resolves: #3857 Conflicts resolved in: * lightning/src/ln/channel.rs due to `rustfmt`-induced changes. --- lightning/src/ln/channel.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 96492ef97f2..cbfb26daa49 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2452,6 +2452,13 @@ impl ChannelContext where SP::Target: SignerProvider { self.latest_monitor_update_id } + pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 { + if self.blocked_monitor_updates.is_empty() { + return self.get_latest_monitor_update_id(); + } + self.blocked_monitor_updates[0].update.update_id - 1 + } + pub fn should_announce(&self) -> bool { self.config.announce_for_forwarding } @@ -3890,7 +3897,7 @@ impl ChannelContext where SP::Target: SignerProvider { // monitor update to the user, even if we return one). // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. if !self.channel_state.is_pre_funded_state() { - self.latest_monitor_update_id += 1; + self.latest_monitor_update_id = self.get_latest_unblocked_monitor_update_id() + 1; Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate { update_id: self.latest_monitor_update_id, counterparty_node_id: Some(self.counterparty_node_id), @@ -7128,8 +7135,7 @@ impl Channel where /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight. pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 { - if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); } - self.context.blocked_monitor_updates[0].update.update_id - 1 + self.context.get_latest_unblocked_monitor_update_id() } /// Returns the next blocked monitor update, if one exists, and a bool which indicates a From d2f3d1f8db3381cdff276a9dea984cb1ff5ae472 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 25 Jun 2025 02:38:33 +0000 Subject: [PATCH 10/21] Skip storing an explicit `node_id` in `RouteGraphNode` `RouteGraphNode` is the main heap entry in our dijkstra's next-best heap. Thus, because its rather constantly being sorted, we care a good bit about its size as fitting more of them on a cache line can provide some additional speed. In 43d250dadcdad54836eacd8b447bb36d5c8e6cb5, we switched from tracking nodes during pathfinding by their `NodeId` to a "counter" which allows us to avoid `HashMap`s lookups for much of the pathfinding process. Because the `dist` lookup is now quite cheap (its just a `Vec`), there's no reason to track `NodeId`s in the heap entries. Instead, we simply fetch the `NodeId` of the node via the `dist` map by examining its `candidate`'s pointer to its source `NodeId`. This allows us to remove a `NodeId` in `RouteGraphNode`, moving it from 64 to 32 bytes. This allows us to expand the `score` field size in a coming commit without expanding `RouteGraphNode`'s size. While we were doing the `dist` lookup in `add_entries_to_cheapest_to_target_node` anyway, the `NodeId` lookup via the `candidate` may not be free. Still, avoiding expanding `RouteGraphNode` above 128 bytes in a few commits is a nice win. --- lightning/src/routing/router.rs | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index c09a014dc62..681e285dd7c 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1161,9 +1161,8 @@ impl_writeable_tlv_based!(RouteHintHop, { }); #[derive(Eq, PartialEq)] -#[repr(align(64))] // Force the size to 64 bytes +#[repr(align(32))] // Force the size to 32 bytes struct RouteGraphNode { - node_id: NodeId, node_counter: u32, score: u64, // The maximum value a yet-to-be-constructed payment path might flow through this node. @@ -1193,9 +1192,8 @@ impl cmp::PartialOrd for RouteGraphNode { } // While RouteGraphNode can be laid out with fewer bytes, performance appears to be improved -// substantially when it is laid out at exactly 64 bytes. -const _GRAPH_NODE_SMALL: usize = 64 - core::mem::size_of::(); -const _GRAPH_NODE_FIXED_SIZE: usize = core::mem::size_of::() - 64; +// substantially when it is laid out at exactly 32 bytes. +const _GRAPH_NODE_32: () = assert!(core::mem::size_of::() == 32); /// A [`CandidateRouteHop::FirstHop`] entry. #[derive(Clone, Debug)] @@ -2747,7 +2745,6 @@ where L::Target: Logger { } let new_graph_node = RouteGraphNode { - node_id: src_node_id, node_counter: src_node_counter, score: cmp::max(total_fee_msat, path_htlc_minimum_msat).saturating_add(path_penalty_msat), total_cltv_delta: hop_total_cltv_delta, @@ -2824,7 +2821,7 @@ where L::Target: Logger { // meaning how much will be paid in fees after this node (to the best of our knowledge). // This data can later be helpful to optimize routing (pay lower fees). macro_rules! add_entries_to_cheapest_to_target_node { - ( $node: expr, $node_counter: expr, $node_id: expr, $next_hops_value_contribution: expr, + ( $node_counter: expr, $node_id: expr, $next_hops_value_contribution: expr, $next_hops_cltv_delta: expr, $next_hops_path_length: expr ) => { let fee_to_target_msat; let next_hops_path_htlc_minimum_msat; @@ -2880,7 +2877,7 @@ where L::Target: Logger { } } - if let Some(node) = $node { + if let Some(node) = network_nodes.get(&$node_id) { let features = if let Some(node_info) = node.announcement_info.as_ref() { &node_info.features() } else { @@ -3007,7 +3004,7 @@ where L::Target: Logger { entry.value_contribution_msat = path_value_msat; } add_entries_to_cheapest_to_target_node!( - network_nodes.get(&payee), payee_node_counter, payee, path_value_msat, 0, 0 + payee_node_counter, payee, path_value_msat, 0, 0 ); } @@ -3082,11 +3079,11 @@ where L::Target: Logger { // Both these cases (and other cases except reaching recommended_value_msat) mean that // paths_collection will be stopped because found_new_path==false. // This is not necessarily a routing failure. - 'path_construction: while let Some(RouteGraphNode { node_id, node_counter, total_cltv_delta, mut value_contribution_msat, path_length_to_node, .. }) = targets.pop() { + 'path_construction: while let Some(RouteGraphNode { node_counter, total_cltv_delta, mut value_contribution_msat, path_length_to_node, .. }) = targets.pop() { // Since we're going payee-to-payer, hitting our node as a target means we should stop // traversing the graph and arrange the path out of what we found. - if node_id == our_node_id { + if node_counter == payer_node_counter { let mut new_entry = dist[payer_node_counter as usize].take().unwrap(); let mut ordered_hops: Vec<(PathBuildingHop, NodeFeatures)> = vec!((new_entry.clone(), default_node_features.clone())); @@ -3209,13 +3206,20 @@ where L::Target: Logger { // If we found a path back to the payee, we shouldn't try to process it again. This is // the equivalent of the `elem.was_processed` check in // add_entries_to_cheapest_to_target_node!() (see comment there for more info). - if node_id == maybe_dummy_payee_node_id { continue 'path_construction; } + if node_counter == payee_node_counter { continue 'path_construction; } + + let node_id = if let Some(entry) = &dist[node_counter as usize] { + entry.candidate.source() + } else { + debug_assert!(false, "Best nodes in the heap should have entries in dist"); + continue 'path_construction; + }; // Otherwise, since the current target node is not us, // keep "unrolling" the payment graph from payee to payer by // finding a way to reach the current target from the payer side. add_entries_to_cheapest_to_target_node!( - network_nodes.get(&node_id), node_counter, node_id, + node_counter, node_id, value_contribution_msat, total_cltv_delta, path_length_to_node ); From 6577c4a1f7a8abf3da45ead96247114b193d24c9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 25 Jun 2025 02:49:17 +0000 Subject: [PATCH 11/21] Reduce `total_cltv_delta` size in `RouteGraphNode` We track the total CLTV from the recipient to the current hop in `RouteGraphNode` so that we can limit its total during pathfinding. While its great to use a `u32` for that to match existing CLTV types, allowing a total CLTV limit of 64K blocks (455 days) is somewhat absurd, so here we swap the `total_cltv_delta` to a `u16`. This keeps `RouteGraphNode` to 32 bytes in a coming commit as we expand `score`. --- lightning/src/routing/router.rs | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 681e285dd7c..e6025fa4802 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1171,7 +1171,7 @@ struct RouteGraphNode { // - how much value can channels following this node (up to the destination) can contribute, // considering their capacity and fees value_contribution_msat: u64, - total_cltv_delta: u32, + total_cltv_delta: u16, /// The number of hops walked up to this node. path_length_to_node: u8, } @@ -2440,6 +2440,16 @@ where L::Target: Logger { // drop the requirement by setting this to 0. let mut channel_saturation_pow_half = payment_params.max_channel_saturation_power_of_half; + // In order to already account for some of the privacy enhancing random CLTV + // expiry delta offset we add on top later, we subtract a rough estimate + // (2*MEDIAN_HOP_CLTV_EXPIRY_DELTA) here. + let max_total_cltv_expiry_delta: u16 = + (payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta) + .checked_sub(2*MEDIAN_HOP_CLTV_EXPIRY_DELTA) + .unwrap_or(payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta) + .try_into() + .unwrap_or(u16::MAX); + // Keep track of how much liquidity has been used in selected channels or blinded paths. Used to // determine if the channel can be used by additional MPP paths or to inform path finding // decisions. It is aware of direction *only* to ensure that the correct htlc_maximum_msat value @@ -2529,15 +2539,9 @@ where L::Target: Logger { let exceeds_max_path_length = path_length_to_node > max_path_length; // Do not consider candidates that exceed the maximum total cltv expiry limit. - // In order to already account for some of the privacy enhancing random CLTV - // expiry delta offset we add on top later, we subtract a rough estimate - // (2*MEDIAN_HOP_CLTV_EXPIRY_DELTA) here. - let max_total_cltv_expiry_delta = (payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta) - .checked_sub(2*MEDIAN_HOP_CLTV_EXPIRY_DELTA) - .unwrap_or(payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta); let hop_total_cltv_delta = ($next_hops_cltv_delta as u32) .saturating_add(cltv_expiry_delta); - let exceeds_cltv_delta_limit = hop_total_cltv_delta > max_total_cltv_expiry_delta; + let exceeds_cltv_delta_limit = hop_total_cltv_delta > max_total_cltv_expiry_delta as u32; let value_contribution_msat = cmp::min(available_value_contribution_msat, $next_hops_value_contribution); // Includes paying fees for the use of the following channels. @@ -2742,12 +2746,13 @@ where L::Target: Logger { #[cfg(all(not(ldk_bench), any(test, fuzzing)))] { assert!(!old_entry.best_path_from_hop_selected); + assert!(hop_total_cltv_delta <= u16::MAX as u32); } let new_graph_node = RouteGraphNode { node_counter: src_node_counter, score: cmp::max(total_fee_msat, path_htlc_minimum_msat).saturating_add(path_penalty_msat), - total_cltv_delta: hop_total_cltv_delta, + total_cltv_delta: hop_total_cltv_delta as u16, value_contribution_msat, path_length_to_node, }; From e7c1f8fb754eb7e1d87104319c4b5842fcb9d280 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 25 Jun 2025 11:53:51 +0000 Subject: [PATCH 12/21] Use `cost / path amt limit` as the pathfinding score, not `cost` While walking nodes in our Dijkstra's pathfinding, we may find a channel which is amount-limited to less than the amount we're currently trying to send. This is fine, and when we encounter such nodes we simply limit the amount we'd send in this path if we pick the channel. When we encounter such a path, we keep summing the cost across hops as we go, keeping whatever scores we assigned to channels between the amount-limited one and the recipient, but using the new limited amount for any channels we look at later as we walk towards the sender. This leads to somewhat inconsistent scores, especially as our scorer assigns a large portion of its penalties and a portion of network fees are proportional to the amount. Thus, we end up with a somewhat higher score than we "should" for this path as later hops use a high proportional cost. We accepted this as a simple way to bias against small-value paths and many MPP parts. Sadly, in practice it appears our bias is not strong enough, as several users have reported that we often attempt far too many MPP parts. In practice, if we encounter a channel with a small limit early in the Dijkstra's pass (towards the end of the path), we may prefer it over many other paths as we start assigning very low costs early on before we've accumulated much cost from larger channels. Here, we swap the `cost` Dijkstra's score for `cost / path amount`. This should bias much stronger against many MPP parts by preferring larger paths proportionally to their amount. This somewhat better aligns with our goal - if we have to pick multiple paths, we should be searching for paths the optimize fee-per-sat-sent, not strictly the fee paid. However, it might bias us against smaller paths somewhat stronger than we want - because we're still using the fees/scores calculated with the sought amount for hops processed already, but are now dividing by a smaller sent amount when walking further hops, we will bias "incorrectly" (and fairly strongly) against smaller parts. Still, because of the complaints on pathfinding performance due to too many MPP paths, it seems like a worthwhile tradeoff, as ultimately MPP splitting is always the domain of heuristics anyway. --- lightning/src/routing/router.rs | 260 ++++++++++++++++++++++++++-- lightning/src/routing/test_utils.rs | 2 +- 2 files changed, 246 insertions(+), 16 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index e6025fa4802..33ad0f936ba 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1164,7 +1164,7 @@ impl_writeable_tlv_based!(RouteHintHop, { #[repr(align(32))] // Force the size to 32 bytes struct RouteGraphNode { node_counter: u32, - score: u64, + score: u128, // The maximum value a yet-to-be-constructed payment path might flow through this node. // This value is upper-bounded by us by: // - how much is needed for a path being constructed @@ -1877,6 +1877,22 @@ impl<'a> PaymentPath<'a> { return result; } + /// Gets the cost (fees plus scorer penalty in msats) of the path divided by the value we + /// can/will send over the path. This is also the heap score during our Dijkstra's walk. + fn get_cost_per_msat(&self) -> u128 { + let fee_cost = self.get_cost_msat(); + let value_msat = self.get_value_msat(); + debug_assert!(value_msat > 0, "Paths should always send more than 0 msat"); + if fee_cost == u64::MAX || value_msat == 0 { + u64::MAX.into() + } else { + // In order to avoid integer division precision loss, we simply shift the costs up to + // the top half of a u128 and divide by the value (which is, at max, just under a u64). + ((fee_cost as u128) << 64) / value_msat as u128 + } + } + + /// Gets the fees plus scorer penalty in msats of the path. fn get_cost_msat(&self) -> u64 { self.get_total_fee_paid_msat().saturating_add(self.get_path_penalty_msat()) } @@ -2531,8 +2547,6 @@ where L::Target: Logger { *used_liquidity_msat }); - // Verify the liquidity offered by this channel complies to the minimal contribution. - let contributes_sufficient_value = available_value_contribution_msat >= minimal_value_contribution_msat; // Do not consider candidate hops that would exceed the maximum path length. let path_length_to_node = $next_hops_path_length + if $candidate.blinded_hint_idx().is_some() { 0 } else { 1 }; @@ -2544,6 +2558,8 @@ where L::Target: Logger { let exceeds_cltv_delta_limit = hop_total_cltv_delta > max_total_cltv_expiry_delta as u32; let value_contribution_msat = cmp::min(available_value_contribution_msat, $next_hops_value_contribution); + // Verify the liquidity offered by this channel complies to the minimal contribution. + let contributes_sufficient_value = value_contribution_msat >= minimal_value_contribution_msat; // Includes paying fees for the use of the following channels. let amount_to_transfer_over_msat: u64 = match value_contribution_msat.checked_add($next_hops_fee_msat) { Some(result) => result, @@ -2693,7 +2709,7 @@ where L::Target: Logger { // Ignore hops if augmenting the current path to them would put us over `max_total_routing_fee_msat` if total_fee_msat > max_total_routing_fee_msat { if should_log_candidate { - log_trace!(logger, "Ignoring {} due to exceeding max total routing fee limit.", LoggedCandidateHop(&$candidate)); + log_trace!(logger, "Ignoring {} with fee {total_fee_msat} due to exceeding max total routing fee limit {max_total_routing_fee_msat}.", LoggedCandidateHop(&$candidate)); if let Some(_) = first_hop_details { log_trace!(logger, @@ -2734,15 +2750,31 @@ where L::Target: Logger { // but it may require additional tracking - we don't want to double-count // the fees included in $next_hops_path_htlc_minimum_msat, but also // can't use something that may decrease on future hops. - let old_cost = cmp::max(old_entry.total_fee_msat, old_entry.path_htlc_minimum_msat) + let old_fee_cost = cmp::max(old_entry.total_fee_msat, old_entry.path_htlc_minimum_msat) .saturating_add(old_entry.path_penalty_msat); - let new_cost = cmp::max(total_fee_msat, path_htlc_minimum_msat) + let new_fee_cost = cmp::max(total_fee_msat, path_htlc_minimum_msat) .saturating_add(path_penalty_msat); - let should_replace = - new_cost < old_cost - || (new_cost == old_cost && old_entry.value_contribution_msat < value_contribution_msat); + // The actual score we use for our heap is the cost divided by how + // much we are thinking of sending over this channel. This avoids + // prioritizing channels that have a very low fee because we aren't + // sending very much over them. + // In order to avoid integer division precision loss, we simply + // shift the costs up to the top half of a u128 and divide by the + // value (which is, at max, just under a u64). + let old_cost = if old_fee_cost != u64::MAX && old_entry.value_contribution_msat != 0 { + ((old_fee_cost as u128) << 64) / old_entry.value_contribution_msat as u128 + } else { + u128::MAX + }; + let new_cost = if new_fee_cost != u64::MAX { + // value_contribution_msat is always >= 1, checked above via + // `contributes_sufficient_value`. + ((new_fee_cost as u128) << 64) / value_contribution_msat as u128 + } else { + u128::MAX + }; - if !old_entry.was_processed && should_replace { + if !old_entry.was_processed && new_cost < old_cost { #[cfg(all(not(ldk_bench), any(test, fuzzing)))] { assert!(!old_entry.best_path_from_hop_selected); @@ -2751,7 +2783,7 @@ where L::Target: Logger { let new_graph_node = RouteGraphNode { node_counter: src_node_counter, - score: cmp::max(total_fee_msat, path_htlc_minimum_msat).saturating_add(path_penalty_msat), + score: new_cost, total_cltv_delta: hop_total_cltv_delta as u16, value_contribution_msat, path_length_to_node, @@ -3299,10 +3331,7 @@ where L::Target: Logger { // First, sort by the cost-per-value of the path, dropping the paths that cost the most for // the value they contribute towards the payment amount. // We sort in descending order as we will remove from the front in `retain`, next. - selected_route.sort_unstable_by(|a, b| - (((b.get_cost_msat() as u128) << 64) / (b.get_value_msat() as u128)) - .cmp(&(((a.get_cost_msat() as u128) << 64) / (a.get_value_msat() as u128))) - ); + selected_route.sort_unstable_by(|a, b| b.get_cost_per_msat().cmp(&a.get_cost_per_msat())); // We should make sure that at least 1 path left. let mut paths_left = selected_route.len(); @@ -8654,6 +8683,207 @@ mod tests { assert_eq!(route.paths[0].hops[0].short_channel_id, 44); } + + #[test] + fn prefers_paths_by_cost_amt_ratio() { + // Previously, we preferred paths during MPP selection based on their absolute cost, rather + // than the cost-per-amount-transferred. This could result in selecting many MPP paths with + // relatively low value contribution, rather than one large path which is ultimately + // cheaper. While this is a tradeoff (and not universally better), in practice the old + // behavior was problematic, so we shifted to a proportional cost. + // + // Here we check that the proportional cost is being used in a somewhat absurd setup where + // we have one good path and several cheaper, but smaller paths. + let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph(); + let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx); + let scorer = ln_test_utils::TestScorer::new(); + let random_seed_bytes = [42; 32]; + + // Enable channel 1 + let update_1 = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 1, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (1 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 10_000_000, + fee_base_msat: 0, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &our_privkey, update_1); + + // Set the fee on channel 3 to 1 sat, max HTLC to 1M msat + let update_3 = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 3, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (3 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000, + fee_base_msat: 1_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[0], update_3); + + // Set the fee on channel 13 to 1 sat, max HTLC to 1M msat + let update_13 = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 13, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (13 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000, + fee_base_msat: 1_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[7], update_13); + + // Set the fee on channel 4 to 1 sat, max HTLC to 1M msat + let update_4 = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 4, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (4 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000, + fee_base_msat: 1_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[1], update_4); + + // The router will attempt to gather 3x the requested amount, and if it finds the new path + // through channel 16, added below, it'll always prefer that, even prior to the changes + // which introduced this test. + // Instead, we add 6 additional channels so that the pathfinder always just gathers useless + // paths first. + for i in 0..6 { + // Finally, create a single channel with fee of 2 sat from node 1 to node 2 which allows + // for a larger payment. + let chan_features = ChannelFeatures::from_le_bytes(vec![]); + add_channel(&gossip_sync, &secp_ctx, &privkeys[7], &privkeys[2], chan_features, i + 42); + + // Set the fee on channel 16 to 2 sats, max HTLC to 3M msat + let update_a = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: i + 42, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (42 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000, + fee_base_msat: 1_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[7], update_a); + + // Enable channel 16 by providing an update in both directions + let update_b = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: i + 42, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 1, + cltv_expiry_delta: (42 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 10_000_000, + fee_base_msat: u32::MAX, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[2], update_b); + } + + // Ensure that we can build a route for 3M msat across the three paths to node 2. + let config = UserConfig::default(); + let mut payment_params = PaymentParameters::from_node_id(nodes[2], 42) + .with_bolt11_features(channelmanager::provided_bolt11_invoice_features(&config)) + .unwrap(); + payment_params.max_channel_saturation_power_of_half = 0; + let route_params = + RouteParameters::from_payment_params_and_value(payment_params, 3_000_000); + let route = get_route( + &our_id, + &route_params, + &network_graph.read_only(), + None, + Arc::clone(&logger), + &scorer, + &Default::default(), + &random_seed_bytes, + ) + .unwrap(); + assert_eq!(route.paths.len(), 3); + for path in route.paths { + assert_eq!(path.hops.len(), 2); + } + + // Finally, create a single channel with fee of 2 sat from node 1 to node 2 which allows + // for a larger payment. + let features_16 = ChannelFeatures::from_le_bytes(id_to_feature_flags(16)); + add_channel(&gossip_sync, &secp_ctx, &privkeys[1], &privkeys[2], features_16, 16); + + // Set the fee on channel 16 to 2 sats, max HTLC to 3M msat + let update_16_a = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 16, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (16 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 3_000_000, + fee_base_msat: 2_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[1], update_16_a); + + // Enable channel 16 by providing an update in both directions + let update_16_b = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 16, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 1, + cltv_expiry_delta: (16 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 10_000_000, + fee_base_msat: u32::MAX, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[2], update_16_b); + + // Ensure that we now build a route for 3M msat across just the new path + let route = get_route( + &our_id, + &route_params, + &network_graph.read_only(), + None, + Arc::clone(&logger), + &scorer, + &Default::default(), + &random_seed_bytes, + ) + .unwrap(); + assert_eq!(route.paths.len(), 1); + assert_eq!(route.paths[0].hops.len(), 2); + assert_eq!(route.paths[0].hops[1].short_channel_id, 16); + } } #[cfg(any(test, ldk_bench))] diff --git a/lightning/src/routing/test_utils.rs b/lightning/src/routing/test_utils.rs index 258652b575d..380f4dbe223 100644 --- a/lightning/src/routing/test_utils.rs +++ b/lightning/src/routing/test_utils.rs @@ -110,7 +110,7 @@ pub(crate) fn update_channel( match gossip_sync.handle_channel_update(Some(node_pubkey), &valid_channel_update) { Ok(res) => assert!(res), - Err(_) => panic!() + Err(e) => panic!("{e:?}") }; } From 2fdd07ec07ff2b596144ce4d9745a10c3dc5fd7b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 2 Jul 2025 22:51:28 +0000 Subject: [PATCH 13/21] Only mark all mon updates complete if there are no blocked updates In `handle_new_monitor_update!`, we correctly check that the channel doesn't have any blocked monitor updates pending before calling `handle_monitor_update_completion!` (which calls `Channel::monitor_updating_restored`, which in turn assumes that all generated `ChannelMonitorUpdate`s, including blocked ones, have completed). We, however, did not do the same check at several other places where we called `handle_monitor_update_completion!`. Specifically, after a monitor update completes during reload (processed via a `BackgroundEvent` or when monitor update completes async, we didn't check if there were any blocked monitor updates before completing). Here we add the missing check, as well as an assertion in `Channel::monitor_updating_restored`. Conflicts resolved in: * lightning/src/ln/chanmon_update_fail_tests.rs due to `rustfmt`-induced changes as well as other tests cleanups. * lightning/src/ln/channelmanager.rs due to upstream Channel object refactoring * lightning/src/ln/quiescence_tests.rs which were dropped as they were fixing a test which only exists upstream --- lightning/src/ln/chanmon_update_fail_tests.rs | 78 ++++++++++++++++--- lightning/src/ln/channel.rs | 1 + lightning/src/ln/channelmanager.rs | 12 ++- 3 files changed, 77 insertions(+), 14 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index ad1e6c26b98..657e089d293 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -2931,16 +2931,28 @@ fn test_inbound_reload_without_init_mon() { do_test_inbound_reload_without_init_mon(false, false); } -#[test] -fn test_blocked_chan_preimage_release() { +#[derive(PartialEq, Eq)] +enum BlockedUpdateComplMode { + Async, + AtReload, + Sync, +} + +fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode) { // Test that even if a channel's `ChannelMonitorUpdate` flow is blocked waiting on an event to // be handled HTLC preimage `ChannelMonitorUpdate`s will still go out. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_mon; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_reload; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000); @@ -2968,25 +2980,62 @@ fn test_blocked_chan_preimage_release() { expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000); let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + if completion_mode != BlockedUpdateComplMode::Sync { + // We use to incorrectly handle monitor update completion in cases where we completed a + // monitor update async or after reload. We test both based on the `completion_mode`. + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + } nodes[1].node.handle_update_fulfill_htlc(nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]); check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2)); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + if completion_mode == BlockedUpdateComplMode::AtReload { + let node_ser = nodes[1].node.encode(); + let chan_mon_0 = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_mon_1 = get_monitor!(nodes[1], chan_id_2).encode(); + + let mons = &[&chan_mon_0[..], &chan_mon_1[..]]; + reload_node!(nodes[1], &node_ser, mons, persister, new_chain_mon, nodes_1_reload); + + nodes[0].node.peer_disconnected(node_b_id); + nodes[2].node.peer_disconnected(node_b_id); + + let mut a_b_reconnect = ReconnectArgs::new(&nodes[0], &nodes[1]); + a_b_reconnect.pending_htlc_claims.1 = 1; + // Note that we will expect no final RAA monitor update in + // `commitment_signed_dance_through_cp_raa` during the reconnect, matching the below case. + reconnect_nodes(a_b_reconnect); + reconnect_nodes(ReconnectArgs::new(&nodes[2], &nodes[1])); + } else if completion_mode == BlockedUpdateComplMode::Async { + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_2).unwrap().clone(); + nodes[1] + .chain_monitor + .chain_monitor + .channel_monitor_updated(outpoint, latest_update) + .unwrap(); + } // Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the // update_fulfill_htlc + CS is held, even though the preimage is already on disk for the // channel. - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed); - check_added_monitors(&nodes[1], 1); - let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); - assert!(a.is_none()); + // Note that when completing as a side effect of a reload we completed the CS dance in + // `reconnect_nodes` above. + if completion_mode != BlockedUpdateComplMode::AtReload { + nodes[1].node.handle_commitment_signed( + node_a_id, + &as_htlc_fulfill_updates.commitment_signed, + ); + check_added_monitors(&nodes[1], 1); + let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + assert!(a.is_none()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa); - check_added_monitors(&nodes[1], 0); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); + check_added_monitors(&nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + } let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 3); + assert_eq!(events.len(), 3, "{events:?}"); if let Event::PaymentSent { .. } = events[0] {} else { panic!(); } if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); } if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); } @@ -3004,6 +3053,13 @@ fn test_blocked_chan_preimage_release() { expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true); } +#[test] +fn test_blocked_chan_preimage_release() { + do_test_blocked_chan_preimage_release(BlockedUpdateComplMode::AtReload); + do_test_blocked_chan_preimage_release(BlockedUpdateComplMode::Sync); + do_test_blocked_chan_preimage_release(BlockedUpdateComplMode::Async); +} + fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_commitment_dance: bool) { // When we forward a payment and receive `update_fulfill_htlc`+`commitment_signed` messages // from the downstream channel, we immediately claim the HTLC on the upstream channel, before diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index cbfb26daa49..d23363229f4 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5941,6 +5941,7 @@ impl Channel where { assert!(self.context.channel_state.is_monitor_update_in_progress()); self.context.channel_state.clear_monitor_update_in_progress(); + assert_eq!(self.blocked_monitor_updates_pending(), 0); // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to // (re-)broadcast the funding transaction as we may have declined to broadcast it when we diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b973432056a..a9e14f17f99 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6347,7 +6347,9 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { - handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + if chan.blocked_monitor_updates_pending() == 0 { + handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + } } else { let update_actions = peer_state.monitor_update_blocked_actions .remove(&channel_id).unwrap_or(Vec::new()); @@ -7625,8 +7627,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) { if chan.is_awaiting_monitor_update() { - log_trace!(logger, "Channel is open and awaiting update, resuming it"); - handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + if chan.blocked_monitor_updates_pending() == 0 { + log_trace!(logger, "Channel is open and awaiting update, resuming it"); + handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + } else { + log_trace!(logger, "Channel is open and awaiting update, leaving it blocked due to a blocked monitor update"); + } } else { log_trace!(logger, "Channel is open but not awaiting update"); } From 445550525a1e1309437f6af590dd92fcee299854 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 10 Jul 2025 00:41:37 +0000 Subject: [PATCH 14/21] Add a test utility to provide nodes with anchor reserves In a number of tests we require available UTXOs to do HTLC anchor claims by bringing our own fees. We previously wrote that out in each test, which is somewhat verbose, so here we simply add a test utility that gives each node a full BTC in a single UTXO. Trivial conflicts resolved in: * lightning/src/ln/monitor_tests.rs --- lightning/src/ln/functional_test_utils.rs | 22 ++++ lightning/src/ln/monitor_tests.rs | 148 ++-------------------- 2 files changed, 36 insertions(+), 134 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 04295073861..420978ad5fc 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -335,6 +335,28 @@ fn do_connect_block_without_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b } } +pub fn provide_anchor_reserves<'a, 'b, 'c>(nodes: &[Node<'a, 'b, 'c>]) -> Transaction { + let mut output = Vec::with_capacity(nodes.len()); + for node in nodes { + output.push(TxOut { + value: Amount::ONE_BTC, + script_pubkey: node.wallet_source.get_change_script().unwrap(), + }); + } + let tx = Transaction { + version: TxVersion::TWO, + lock_time: LockTime::ZERO, + input: vec![TxIn { ..Default::default() }], + output, + }; + let height = nodes[0].best_block_info().1 + 1; + let block = create_dummy_block(nodes[0].best_block_hash(), height, vec![tx.clone()]); + for node in nodes { + do_connect_block_with_consistency_checks(node, block.clone(), false); + } + tx +} + pub fn disconnect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, count: u32) { call_claimable_balances(node); eprintln!("Disconnecting {} blocks using Block Connection Style: {:?}", count, *node.connect_style.borrow()); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 92b19790be5..ad2b0599598 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -13,7 +13,7 @@ use crate::sign::{ecdsa::EcdsaChannelSigner, OutputSpender, SpendableOutputDescr use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATENCY_GRACE_PERIOD_BLOCKS, COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE, Balance, BalanceSource, ChannelMonitorUpdateStep}; use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; -use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; +use crate::events::bump_transaction::BumpTransactionEvent; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use crate::ln::channel; use crate::ln::types::ChannelId; @@ -462,25 +462,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, coinbase_tx.output[1].value); - } + let coinbase_tx = provide_anchor_reserves(&nodes); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000); @@ -865,25 +847,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, coinbase_tx.output[1].value); - } + let coinbase_tx = provide_anchor_reserves(&nodes); // Create a single channel with two pending HTLCs from nodes[0] to nodes[1], one which nodes[1] // knows the preimage for, one which it does not. @@ -1650,25 +1614,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, coinbase_tx.output[1].value); - } + let coinbase_tx = provide_anchor_reserves(&nodes); // Create some initial channels let (_, _, chan_id, funding_tx) = @@ -1951,16 +1897,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); + let coinbase_tx = provide_anchor_reserves(&nodes); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000); @@ -2241,25 +2178,7 @@ fn do_test_claimable_balance_correct_while_payment_pending(outbound_payment: boo let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(user_config), Some(user_config), Some(user_config)]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, coinbase_tx.output[1].value); - } + provide_anchor_reserves(&nodes); // Create a channel from A -> B let (_, _, chan_ab_id, funding_tx_ab) = @@ -2406,6 +2325,8 @@ fn do_test_monitor_rebroadcast_pending_claims(anchors: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let coinbase_tx = provide_anchor_reserves(&nodes); + let (_, _, _, chan_id, funding_tx) = create_chan_between_nodes_with_value( &nodes[0], &nodes[1], 1_000_000, 500_000_000 ); @@ -2424,17 +2345,6 @@ fn do_test_monitor_rebroadcast_pending_claims(anchors: bool) { false, [nodes[1].node.get_our_node_id()], 1000000); check_added_monitors(&nodes[0], 1); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![TxOut { // UTXO to attach fees to `htlc_tx` on anchors - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - // Set up a helper closure we'll use throughout our test. We should only expect retries without // bumps if fees have not increased after a block has been connected (assuming the height timer // re-evaluates at every block) or after `ChainMonitor::rebroadcast_pending_claims` is called. @@ -2538,6 +2448,8 @@ fn do_test_yield_anchors_events(have_htlcs: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config), Some(anchors_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let coinbase_tx = provide_anchor_reserves(&nodes); + let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value( &nodes, 0, 1, 1_000_000, 500_000_000 ); @@ -2613,16 +2525,6 @@ fn do_test_yield_anchors_events(have_htlcs: bool) { assert_eq!(holder_events.len(), 1); let (commitment_tx, anchor_tx) = match holder_events.pop().unwrap() { Event::BumpTransaction(event) => { - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![TxOut { // UTXO to attach fees to `anchor_tx` - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); nodes[0].bump_tx_handler.handle_event(&event); let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); assert_eq!(txn.len(), 2); @@ -2738,6 +2640,8 @@ fn test_anchors_aggregated_revoked_htlc_tx() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let coinbase_tx = provide_anchor_reserves(&nodes); + let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000); let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000); @@ -2796,18 +2700,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() { assert_eq!(events.len(), 2); let mut revoked_commitment_txs = Vec::with_capacity(events.len()); let mut anchor_txs = Vec::with_capacity(events.len()); - for (idx, event) in events.into_iter().enumerate() { - let utxo_value = Amount::ONE_BTC * (idx + 1) as u64; - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![TxOut { // UTXO to attach fees to `anchor_tx` - value: utxo_value, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }], - }; - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, utxo_value); + for event in events { match event { Event::BumpTransaction(event) => nodes[1].bump_tx_handler.handle_event(&event), _ => panic!("Unexpected event"), @@ -3125,20 +3018,7 @@ fn do_test_monitor_claims_with_random_signatures(anchors: bool, confirm_counterp let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - } + let coinbase_tx = provide_anchor_reserves(&nodes); // Open a channel and route a payment. We'll let it timeout to claim it. let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); From 56a9bf5a086991a1f4a6526f184630f3bf7263fd Mon Sep 17 00:00:00 2001 From: Fuyin Date: Fri, 11 Jul 2025 01:13:32 +0800 Subject: [PATCH 15/21] Prune locktimed packages when inputs are spent We have to prune locktimed packages when their inputs are spent, otherwise the notification of the watched outputs might be missed. This can lead to locktimed packages with spent inputs being added back to the pending claim requests in the future, and they are never cleaned up until node restart. Resolves: #3859 Conflicts resolved in: * lightning/src/ln/functional_tests.rs due to upstream changes of removed code * lightning/src/ln/monitor_tests.rs due to trivial upstream changes --- lightning/src/chain/onchaintx.rs | 21 ++++++++++++++ lightning/src/ln/functional_tests.rs | 42 ++++++++++------------------ lightning/src/ln/monitor_tests.rs | 15 +++------- 3 files changed, 39 insertions(+), 39 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 2a43b006920..9253975de42 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -278,6 +278,9 @@ pub struct OnchainTxHandler { #[cfg(not(test))] claimable_outpoints: HashMap, + #[cfg(any(test, feature = "_test_utils"))] + pub(crate) locktimed_packages: BTreeMap>, + #[cfg(not(any(test, feature = "_test_utils")))] locktimed_packages: BTreeMap>, onchain_events_awaiting_threshold_conf: Vec, @@ -969,6 +972,17 @@ impl OnchainTxHandler { panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map"); } } + + // Also remove/split any locktimed packages whose inputs have been spent by this transaction. + self.locktimed_packages.retain(|_locktime, packages|{ + packages.retain_mut(|package| { + if let Some(p) = package.split_package(&inp.previous_output) { + claimed_outputs_material.push(p); + } + !package.outpoints().is_empty() + }); + !packages.is_empty() + }); } for package in claimed_outputs_material.drain(..) { let entry = OnchainEventEntry { @@ -1104,6 +1118,13 @@ impl OnchainTxHandler { //- resurect outpoint back in its claimable set and regenerate tx match entry.event { OnchainEvent::ContentiousOutpoint { package } => { + // We pass 0 to `package_locktime` to get the actual required locktime. + let package_locktime = package.package_locktime(0); + if package_locktime >= height { + self.locktimed_packages.entry(package_locktime).or_default().push(package); + continue; + } + if let Some(pending_claim) = self.claimable_outpoints.get(package.outpoints()[0]) { if let Some(request) = self.pending_claim_requests.get_mut(&pending_claim.0) { assert!(request.merge_package(package, height).is_ok()); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index fc8bf38519d..2535a756c9c 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3329,33 +3329,9 @@ fn test_htlc_on_chain_success() { _ => panic!("Unexpected event"), } - macro_rules! check_tx_local_broadcast { - ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { { - let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap(); - // HTLC timeout claims for non-anchor channels are only aggregated when claimed from the - // remote commitment transaction. - if $htlc_offered { - assert_eq!(node_txn.len(), 2); - for tx in node_txn.iter() { - check_spends!(tx, $commitment_tx); - assert_ne!(tx.lock_time, LockTime::ZERO); - assert_eq!(tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - assert!(tx.output[0].script_pubkey.is_p2wsh()); // revokeable output - } - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); - } else { - assert_eq!(node_txn.len(), 1); - check_spends!(node_txn[0], $commitment_tx); - assert_ne!(node_txn[0].lock_time, LockTime::ZERO); - assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - assert!(node_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment - assert_ne!(node_txn[0].input[0].previous_output, node_txn[0].input[1].previous_output); - } - node_txn.clear(); - } } - } - // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success. - check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]); + // nodes[1] does not broadcast its own timeout-claim of the output as nodes[2] just claimed it + // via success. + assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); // Broadcast legit commitment tx from A on B's chain // Broadcast preimage tx by B on offered output from A commitment tx on A's chain @@ -3416,7 +3392,17 @@ fn test_htlc_on_chain_success() { _ => panic!("Unexpected event"), } } - check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]); + // HTLC timeout claims for non-anchor channels are only aggregated when claimed from the + // remote commitment transaction. + let mut node_txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(node_txn.len(), 2); + for tx in node_txn.iter() { + check_spends!(tx, node_a_commitment_tx[0]); + assert_ne!(tx.lock_time, LockTime::ZERO); + assert_eq!(tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(tx.output[0].script_pubkey.is_p2wsh()); // revokeable output + } + assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); } fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index ad2b0599598..d105d69edd2 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -711,8 +711,9 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { test_spendable_output(&nodes[0], &remote_txn[0], false); assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - // After broadcasting the HTLC claim transaction, node A will still consider the HTLC - // possibly-claimable up to ANTI_REORG_DELAY, at which point it will drop it. + // After confirming the HTLC claim transaction, node A will no longer attempt to claim said + // HTLC, unless the transaction is reorged. However, we'll still report a + // `MaybeTimeoutClaimableHTLC` balance for it until we reach `ANTI_REORG_DELAY` confirmations. mine_transaction(&nodes[0], &b_broadcast_txn[0]); if prev_commitment_tx { expect_payment_path_successful!(nodes[0]); @@ -728,18 +729,10 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { // When the HTLC timeout output is spendable in the next block, A should broadcast it connect_blocks(&nodes[0], htlc_cltv_timeout - nodes[0].best_block_info().1); let a_broadcast_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - // Aggregated claim transaction. assert_eq!(a_broadcast_txn.len(), 1); check_spends!(a_broadcast_txn[0], remote_txn[0]); - assert_eq!(a_broadcast_txn[0].input.len(), 2); - assert_ne!(a_broadcast_txn[0].input[0].previous_output.vout, a_broadcast_txn[0].input[1].previous_output.vout); - // a_broadcast_txn [0] and [1] should spend the HTLC outputs of the commitment tx - assert!(a_broadcast_txn[0].input.iter().any(|input| remote_txn[0].output[input.previous_output.vout as usize].value.to_sat() == 3_000)); + assert_eq!(a_broadcast_txn[0].input.len(), 1); assert!(a_broadcast_txn[0].input.iter().any(|input| remote_txn[0].output[input.previous_output.vout as usize].value.to_sat() == 4_000)); - - // Confirm node B's claim for node A to remove that claim from the aggregated claim transaction. - mine_transaction(&nodes[0], &b_broadcast_txn[0]); - let a_broadcast_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); let a_htlc_timeout_tx = a_broadcast_txn.into_iter().last().unwrap(); // Once the HTLC-Timeout transaction confirms, A will no longer consider the HTLC From 41c2b510699badff62590d74aeeacca50a01712b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 10 Jul 2025 17:03:12 +0000 Subject: [PATCH 16/21] Track outpoint creation height in `PackageSolvingData` When we have an outpoint to claim which is lock-timed and the locktime is reached, we add it to `OnchainTxHandler::claimable_outpoints` to indicate the outpoint is now being claimed. However, `claimable_outpoints` is supposed to track when the outpoint first appeared on chain so that we can remove the claim if the outpoint is reorged out. Sadly, in the handling for lock-timed packages, we incorrectly stored the current height in `claimable_outpoints`, causing such claims to be removed in case of a reorg right after they were generated, even if the output we intend to claim isn't removed at all. Here we start tracking when the outpoint we're spending was created in `PackageSolvingData`'s constituent types. While we could have tracked this information in `PackageTemplate`, it would preclude later merging packages that are spending outpoints included in different blocks, which we don't necessarily want to do. Conflicts resolved in: * lightning/src/chain/channelmonitor.rs, * lightning/src/chain/onchaintx.rs, and * lightning/src/chain/package.rs due to upstream changes to package struct fields. --- lightning/src/chain/channelmonitor.rs | 47 ++++++++------- lightning/src/chain/onchaintx.rs | 1 + lightning/src/chain/package.rs | 84 ++++++++++++++++++--------- 3 files changed, 83 insertions(+), 49 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 4c195b20a78..9522d06c638 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -3008,23 +3008,26 @@ impl ChannelMonitorImpl { (payment_preimage.clone(), payment_info.clone().into_iter().collect()) }); - let confirmed_spend_txid = self.funding_spend_confirmed.or_else(|| { - self.onchain_events_awaiting_threshold_conf.iter().find_map(|event| match event.event { - OnchainEvent::FundingSpendConfirmation { .. } => Some(event.txid), - _ => None, - }) - }); - let confirmed_spend_txid = if let Some(txid) = confirmed_spend_txid { - txid - } else { - return; - }; + let confirmed_spend_info = self.funding_spend_confirmed + .map(|txid| (txid, None)) + .or_else(|| { + self.onchain_events_awaiting_threshold_conf.iter().find_map(|event| match event.event { + OnchainEvent::FundingSpendConfirmation { .. } => Some((event.txid, Some(event.height))), + _ => None, + }) + }); + let (confirmed_spend_txid, confirmed_spend_height) = + if let Some((txid, height)) = confirmed_spend_info { + (txid, height) + } else { + return; + }; // If the channel is force closed, try to claim the output from this preimage. // First check if a counterparty commitment transaction has been broadcasted: macro_rules! claim_htlcs { ($commitment_number: expr, $txid: expr, $htlcs: expr) => { - let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None, $htlcs); + let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None, $htlcs, confirmed_spend_height); let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests(htlc_claim_reqs, self.best_block.height, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); } @@ -3542,7 +3545,7 @@ impl ChannelMonitorImpl { // First, process non-htlc outputs (to_holder & to_counterparty) for (idx, outp) in tx.output.iter().enumerate() { if outp.script_pubkey == revokeable_p2wsh { - let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_commitment_params.on_counterparty_tx_csv, self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx()); + let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_commitment_params.on_counterparty_tx_csv, self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx(), height); let justice_package = PackageTemplate::build_package( commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), @@ -3563,7 +3566,7 @@ impl ChannelMonitorImpl { // per_commitment_data is corrupt or our commitment signing key leaked! return (claimable_outpoints, to_counterparty_output_info); } - let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone(), &self.onchain_tx_handler.channel_transaction_parameters.channel_type_features); + let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone(), &self.onchain_tx_handler.channel_transaction_parameters.channel_type_features, height); let counterparty_spendable_height = if htlc.offered { htlc.cltv_expiry } else { @@ -3617,7 +3620,7 @@ impl ChannelMonitorImpl { (htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref())) ), logger); let (htlc_claim_reqs, counterparty_output_info) = - self.get_counterparty_output_claim_info(commitment_number, commitment_txid, Some(tx), per_commitment_option); + self.get_counterparty_output_claim_info(commitment_number, commitment_txid, Some(tx), per_commitment_option, Some(height)); to_counterparty_output_info = counterparty_output_info; for req in htlc_claim_reqs { claimable_outpoints.push(req); @@ -3628,7 +3631,7 @@ impl ChannelMonitorImpl { } /// Returns the HTLC claim package templates and the counterparty output info - fn get_counterparty_output_claim_info(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>, per_commitment_option: Option<&Vec<(HTLCOutputInCommitment, Option>)>>) + fn get_counterparty_output_claim_info(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>, per_commitment_option: Option<&Vec<(HTLCOutputInCommitment, Option>)>>, confirmation_height: Option) -> (Vec, CommitmentTxCounterpartyOutputInfo) { let mut claimable_outpoints = Vec::new(); let mut to_counterparty_output_info: CommitmentTxCounterpartyOutputInfo = None; @@ -3688,13 +3691,15 @@ impl ChannelMonitorImpl { CounterpartyOfferedHTLCOutput::build(*per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, - preimage.unwrap(), htlc.clone(), self.onchain_tx_handler.channel_type_features().clone())) + preimage.unwrap(), htlc.clone(), self.onchain_tx_handler.channel_type_features().clone(), + confirmation_height)) } else { PackageSolvingData::CounterpartyReceivedHTLCOutput( CounterpartyReceivedHTLCOutput::build(*per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, - htlc.clone(), self.onchain_tx_handler.channel_type_features().clone())) + htlc.clone(), self.onchain_tx_handler.channel_type_features().clone(), + confirmation_height)) }; let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry); claimable_outpoints.push(counterparty_package); @@ -3736,7 +3741,7 @@ impl ChannelMonitorImpl { per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, tx.output[idx].value, self.counterparty_commitment_params.on_counterparty_tx_csv, - false + false, height, ); let justice_package = PackageTemplate::build_package( htlc_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), @@ -3765,7 +3770,7 @@ impl ChannelMonitorImpl { if let Some(transaction_output_index) = htlc.transaction_output_index { let (htlc_output, counterparty_spendable_height) = if htlc.offered { let htlc_output = HolderHTLCOutput::build_offered( - htlc.amount_msat, htlc.cltv_expiry, self.onchain_tx_handler.channel_type_features().clone() + htlc.amount_msat, htlc.cltv_expiry, self.onchain_tx_handler.channel_type_features().clone(), conf_height ); (htlc_output, conf_height) } else { @@ -3776,7 +3781,7 @@ impl ChannelMonitorImpl { continue; }; let htlc_output = HolderHTLCOutput::build_accepted( - payment_preimage, htlc.amount_msat, self.onchain_tx_handler.channel_type_features().clone() + payment_preimage, htlc.amount_msat, self.onchain_tx_handler.channel_type_features().clone(), conf_height ); (htlc_output, htlc.cltv_expiry) }; diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 9253975de42..38385e6f4fe 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -1429,6 +1429,7 @@ mod tests { htlc.amount_msat, htlc.cltv_expiry, ChannelTypeFeatures::only_static_remote_key(), + 0, )), 0, )); diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index bd6912c21f8..90386ba194e 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -122,7 +122,7 @@ const HIGH_FREQUENCY_BUMP_INTERVAL: u32 = 1; /// /// CSV and pubkeys are used as part of a witnessScript redeeming a balance output, amount is used /// as part of the signature hash and revocation secret to generate a satisfying witness. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct RevokedOutput { per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, @@ -132,10 +132,12 @@ pub(crate) struct RevokedOutput { amount: Amount, on_counterparty_tx_csv: u16, is_counterparty_balance_on_anchors: Option<()>, + // Added in LDK 0.1.4/0.2 and always set since. + outpoint_confirmation_height: Option, } impl RevokedOutput { - pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, per_commitment_key: SecretKey, amount: Amount, on_counterparty_tx_csv: u16, is_counterparty_balance_on_anchors: bool) -> Self { + pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, per_commitment_key: SecretKey, amount: Amount, on_counterparty_tx_csv: u16, is_counterparty_balance_on_anchors: bool, outpoint_confirmation_height: u32) -> Self { RevokedOutput { per_commitment_point, counterparty_delayed_payment_base_key, @@ -144,13 +146,15 @@ impl RevokedOutput { weight: WEIGHT_REVOKED_OUTPUT, amount, on_counterparty_tx_csv, - is_counterparty_balance_on_anchors: if is_counterparty_balance_on_anchors { Some(()) } else { None } + is_counterparty_balance_on_anchors: if is_counterparty_balance_on_anchors { Some(()) } else { None }, + outpoint_confirmation_height: Some(outpoint_confirmation_height), } } } impl_writeable_tlv_based!(RevokedOutput, { (0, per_commitment_point, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2 and always set (2, counterparty_delayed_payment_base_key, required), (4, counterparty_htlc_base_key, required), (6, per_commitment_key, required), @@ -168,7 +172,7 @@ impl_writeable_tlv_based!(RevokedOutput, { /// /// CSV is used as part of a witnessScript redeeming a balance output, amount is used as part /// of the signature hash and revocation secret to generate a satisfying witness. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct RevokedHTLCOutput { per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, @@ -177,10 +181,12 @@ pub(crate) struct RevokedHTLCOutput { weight: u64, amount: u64, htlc: HTLCOutputInCommitment, + // Added in LDK 0.1.4/0.2 and always set since. + outpoint_confirmation_height: Option, } impl RevokedHTLCOutput { - pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, per_commitment_key: SecretKey, amount: u64, htlc: HTLCOutputInCommitment, channel_type_features: &ChannelTypeFeatures) -> Self { + pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, per_commitment_key: SecretKey, amount: u64, htlc: HTLCOutputInCommitment, channel_type_features: &ChannelTypeFeatures, outpoint_confirmation_height: u32) -> Self { let weight = if htlc.offered { weight_revoked_offered_htlc(channel_type_features) } else { weight_revoked_received_htlc(channel_type_features) }; RevokedHTLCOutput { per_commitment_point, @@ -189,13 +195,15 @@ impl RevokedHTLCOutput { per_commitment_key, weight, amount, - htlc + htlc, + outpoint_confirmation_height: Some(outpoint_confirmation_height), } } } impl_writeable_tlv_based!(RevokedHTLCOutput, { (0, per_commitment_point, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2 and always set (2, counterparty_delayed_payment_base_key, required), (4, counterparty_htlc_base_key, required), (6, per_commitment_key, required), @@ -212,7 +220,7 @@ impl_writeable_tlv_based!(RevokedHTLCOutput, { /// The preimage is used as part of the witness. /// /// Note that on upgrades, some features of existing outputs may be missed. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct CounterpartyOfferedHTLCOutput { per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, @@ -220,10 +228,12 @@ pub(crate) struct CounterpartyOfferedHTLCOutput { preimage: PaymentPreimage, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures, + // Added in LDK 0.1.4/0.2 and always set since. + outpoint_confirmation_height: Option, } impl CounterpartyOfferedHTLCOutput { - pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, preimage: PaymentPreimage, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures) -> Self { + pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, preimage: PaymentPreimage, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures, outpoint_confirmation_height: Option) -> Self { CounterpartyOfferedHTLCOutput { per_commitment_point, counterparty_delayed_payment_base_key, @@ -231,6 +241,7 @@ impl CounterpartyOfferedHTLCOutput { preimage, htlc, channel_type_features, + outpoint_confirmation_height, } } } @@ -240,6 +251,7 @@ impl Writeable for CounterpartyOfferedHTLCOutput { let legacy_deserialization_prevention_marker = chan_utils::legacy_deserialization_prevention_marker_for_channel_type_features(&self.channel_type_features); write_tlv_fields!(writer, { (0, self.per_commitment_point, required), + (1, self.outpoint_confirmation_height, option), // Added in 0.1.4/0.2, not always set (2, self.counterparty_delayed_payment_base_key, required), (4, self.counterparty_htlc_base_key, required), (6, self.preimage, required), @@ -260,9 +272,11 @@ impl Readable for CounterpartyOfferedHTLCOutput { let mut htlc = RequiredWrapper(None); let mut _legacy_deserialization_prevention_marker: Option<()> = None; let mut channel_type_features = None; + let mut outpoint_confirmation_height = None; read_tlv_fields!(reader, { (0, per_commitment_point, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2, not always set (2, counterparty_delayed_payment_base_key, required), (4, counterparty_htlc_base_key, required), (6, preimage, required), @@ -279,7 +293,8 @@ impl Readable for CounterpartyOfferedHTLCOutput { counterparty_htlc_base_key: counterparty_htlc_base_key.0.unwrap(), preimage: preimage.0.unwrap(), htlc: htlc.0.unwrap(), - channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()) + channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()), + outpoint_confirmation_height, }) } } @@ -290,23 +305,25 @@ impl Readable for CounterpartyOfferedHTLCOutput { /// witnessScript. /// /// Note that on upgrades, some features of existing outputs may be missed. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct CounterpartyReceivedHTLCOutput { per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures, + outpoint_confirmation_height: Option, } impl CounterpartyReceivedHTLCOutput { - pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures) -> Self { + pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures, outpoint_confirmation_height: Option) -> Self { CounterpartyReceivedHTLCOutput { per_commitment_point, counterparty_delayed_payment_base_key, counterparty_htlc_base_key, htlc, - channel_type_features + channel_type_features, + outpoint_confirmation_height, } } } @@ -316,6 +333,7 @@ impl Writeable for CounterpartyReceivedHTLCOutput { let legacy_deserialization_prevention_marker = chan_utils::legacy_deserialization_prevention_marker_for_channel_type_features(&self.channel_type_features); write_tlv_fields!(writer, { (0, self.per_commitment_point, required), + (1, self.outpoint_confirmation_height, option), // Added in 0.1.4/0.2, not always set (2, self.counterparty_delayed_payment_base_key, required), (4, self.counterparty_htlc_base_key, required), (6, self.htlc, required), @@ -334,9 +352,11 @@ impl Readable for CounterpartyReceivedHTLCOutput { let mut htlc = RequiredWrapper(None); let mut _legacy_deserialization_prevention_marker: Option<()> = None; let mut channel_type_features = None; + let mut outpoint_confirmation_height = None; read_tlv_fields!(reader, { (0, per_commitment_point, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2, not always set (2, counterparty_delayed_payment_base_key, required), (4, counterparty_htlc_base_key, required), (6, htlc, required), @@ -351,7 +371,8 @@ impl Readable for CounterpartyReceivedHTLCOutput { counterparty_delayed_payment_base_key: counterparty_delayed_payment_base_key.0.unwrap(), counterparty_htlc_base_key: counterparty_htlc_base_key.0.unwrap(), htlc: htlc.0.unwrap(), - channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()) + channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()), + outpoint_confirmation_height, }) } } @@ -362,31 +383,34 @@ impl Readable for CounterpartyReceivedHTLCOutput { /// Preimage is only included as part of the witness in former case. /// /// Note that on upgrades, some features of existing outputs may be missed. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct HolderHTLCOutput { preimage: Option, amount_msat: u64, /// Defaults to 0 for HTLC-Success transactions, which have no expiry cltv_expiry: u32, channel_type_features: ChannelTypeFeatures, + outpoint_confirmation_height: Option, } impl HolderHTLCOutput { - pub(crate) fn build_offered(amount_msat: u64, cltv_expiry: u32, channel_type_features: ChannelTypeFeatures) -> Self { + pub(crate) fn build_offered(amount_msat: u64, cltv_expiry: u32, channel_type_features: ChannelTypeFeatures, outpoint_confirmation_height: u32) -> Self { HolderHTLCOutput { preimage: None, amount_msat, cltv_expiry, channel_type_features, + outpoint_confirmation_height: Some(outpoint_confirmation_height), } } - pub(crate) fn build_accepted(preimage: PaymentPreimage, amount_msat: u64, channel_type_features: ChannelTypeFeatures) -> Self { + pub(crate) fn build_accepted(preimage: PaymentPreimage, amount_msat: u64, channel_type_features: ChannelTypeFeatures, outpoint_confirmation_height: u32) -> Self { HolderHTLCOutput { preimage: Some(preimage), amount_msat, cltv_expiry: 0, channel_type_features, + outpoint_confirmation_height: Some(outpoint_confirmation_height), } } } @@ -396,6 +420,7 @@ impl Writeable for HolderHTLCOutput { let legacy_deserialization_prevention_marker = chan_utils::legacy_deserialization_prevention_marker_for_channel_type_features(&self.channel_type_features); write_tlv_fields!(writer, { (0, self.amount_msat, required), + (1, self.outpoint_confirmation_height, option), // Added in 0.1.4/0.2 and always set (2, self.cltv_expiry, required), (4, self.preimage, option), (6, legacy_deserialization_prevention_marker, option), @@ -412,9 +437,11 @@ impl Readable for HolderHTLCOutput { let mut preimage = None; let mut _legacy_deserialization_prevention_marker: Option<()> = None; let mut channel_type_features = None; + let mut outpoint_confirmation_height = None; read_tlv_fields!(reader, { (0, amount_msat, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2 and always set (2, cltv_expiry, required), (4, preimage, option), (6, _legacy_deserialization_prevention_marker, option), @@ -427,7 +454,8 @@ impl Readable for HolderHTLCOutput { amount_msat: amount_msat.0.unwrap(), cltv_expiry: cltv_expiry.0.unwrap(), preimage, - channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()) + channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()), + outpoint_confirmation_height, }) } } @@ -437,7 +465,7 @@ impl Readable for HolderHTLCOutput { /// witnessScript is used as part of the witness redeeming the funding utxo. /// /// Note that on upgrades, some features of existing outputs may be missed. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct HolderFundingOutput { funding_redeemscript: ScriptBuf, pub(crate) funding_amount: Option, @@ -496,7 +524,7 @@ impl Readable for HolderFundingOutput { /// /// The generic API offers access to an outputs common attributes or allow transformation such as /// finalizing an input claiming the output. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) enum PackageSolvingData { RevokedOutput(RevokedOutput), RevokedHTLCOutput(RevokedHTLCOutput), @@ -737,7 +765,7 @@ impl_writeable_tlv_based_enum_legacy!(PackageSolvingData, ; /// That way we avoid claiming in too many discrete transactions while also avoiding /// unnecessarily exposing ourselves to pinning attacks or delaying claims when we could have /// claimed at least part of the available outputs quickly and without risk. -#[derive(Copy, Clone, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] enum AggregationCluster { /// Our counterparty can potentially claim this output. Pinnable, @@ -748,7 +776,7 @@ enum AggregationCluster { /// A malleable package might be aggregated with other packages to save on fees. /// A untractable package has been counter-signed and aggregable will break cached counterparty signatures. -#[derive(Copy, Clone, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] enum PackageMalleability { Malleable(AggregationCluster), Untractable, @@ -763,7 +791,7 @@ enum PackageMalleability { /// /// As packages are time-sensitive, we fee-bump and rebroadcast them at scheduled intervals. /// Failing to confirm a package translate as a loss of funds for the user. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct PackageTemplate { // List of onchain outputs and solving data to generate satisfying witnesses. inputs: Vec<(BitcoinOutPoint, PackageSolvingData)>, @@ -1394,7 +1422,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let dumb_scalar = SecretKey::from_slice(&>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap(); let dumb_point = PublicKey::from_secret_key(&secp_ctx, &dumb_scalar); - PackageSolvingData::RevokedOutput(RevokedOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), dumb_scalar, Amount::ZERO, 0, $is_counterparty_balance_on_anchors)) + PackageSolvingData::RevokedOutput(RevokedOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), dumb_scalar, Amount::ZERO, 0, $is_counterparty_balance_on_anchors, 0)) } } } @@ -1407,7 +1435,7 @@ mod tests { let dumb_point = PublicKey::from_secret_key(&secp_ctx, &dumb_scalar); let hash = PaymentHash([1; 32]); let htlc = HTLCOutputInCommitment { offered: false, amount_msat: 1_000_000, cltv_expiry: 0, payment_hash: hash, transaction_output_index: None }; - PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), dumb_scalar, 1_000_000 / 1_000, htlc, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies())) + PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), dumb_scalar, 1_000_000 / 1_000, htlc, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), 0)) } } } @@ -1420,7 +1448,7 @@ mod tests { let dumb_point = PublicKey::from_secret_key(&secp_ctx, &dumb_scalar); let hash = PaymentHash([1; 32]); let htlc = HTLCOutputInCommitment { offered: true, amount_msat: $amt, cltv_expiry: $expiry, payment_hash: hash, transaction_output_index: None }; - PackageSolvingData::CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), htlc, $features)) + PackageSolvingData::CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), htlc, $features, None)) } } } @@ -1434,7 +1462,7 @@ mod tests { let hash = PaymentHash([1; 32]); let preimage = PaymentPreimage([2;32]); let htlc = HTLCOutputInCommitment { offered: false, amount_msat: $amt, cltv_expiry: 0, payment_hash: hash, transaction_output_index: None }; - PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), preimage, htlc, $features)) + PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), preimage, htlc, $features, None)) } } } @@ -1443,7 +1471,7 @@ mod tests { ($features: expr) => { { let preimage = PaymentPreimage([2;32]); - PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build_accepted(preimage, 0, $features)) + PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build_accepted(preimage, 0, $features, 0)) } } } @@ -1451,7 +1479,7 @@ mod tests { macro_rules! dumb_offered_htlc_output { ($cltv_expiry: expr, $features: expr) => { { - PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build_offered(0, $cltv_expiry, $features)) + PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build_offered(0, $cltv_expiry, $features, 0)) } } } From 3463a0cd124794de8fd55c38fabbfd9bc4fa1c58 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 10 Jul 2025 17:04:07 +0000 Subject: [PATCH 17/21] Use outpoint creation height when restoring locktimed packages When we have an outpoint to claim which is lock-timed and the locktime is reached, we add it to `OnchainTxHandler::claimable_outpoints` to indicate the outpoint is now being claimed. However, `claimable_outpoints` is supposed to track when the outpoint first appeared on chain so that we can remove the claim if the outpoint is reorged out. Sadly, in the handling for lock-timed packages, we incorrectly stored the current height in `claimable_outpoints`, causing such claims to be removed in case of a reorg right after they were generated, even if the output we intend to claim isn't removed at all. Here we use the creation-height tracking added in the previous commit to actually address the issue, using the tracked height when adding a claim to `OnchainTxHandler::claimable_outpoints`. In cases where we have no information, we continue to use the current height, retaining the issue for locktimed packages on upgrades, but this simplifies cases where we actually don't have the information available anyway. Trivial conflicts resolved in: * lightning/src/chain/package.rs --- lightning/src/chain/onchaintx.rs | 7 ++++--- lightning/src/chain/package.rs | 35 ++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 38385e6f4fe..ae221c1c61d 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -865,9 +865,10 @@ impl OnchainTxHandler { // Because fuzzing can cause hash collisions, we can end up with conflicting claim // ids here, so we only assert when not fuzzing. debug_assert!(cfg!(fuzzing) || self.pending_claim_requests.get(&claim_id).is_none()); - for k in req.outpoints() { - log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout); - self.claimable_outpoints.insert(k.clone(), (claim_id, conf_height)); + for (k, outpoint_confirmation_height) in req.outpoints_and_creation_heights() { + let creation_height = outpoint_confirmation_height.unwrap_or(conf_height); + log_info!(logger, "Registering claiming request for {}:{}, which exists as of height {creation_height}", k.txid, k.vout); + self.claimable_outpoints.insert(k.clone(), (claim_id, creation_height)); } self.pending_claim_requests.insert(claim_id, req); } diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 90386ba194e..9fe16915be4 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -603,6 +603,35 @@ impl PackageSolvingData { } } + fn input_confirmation_height(&self) -> Option { + match self { + PackageSolvingData::RevokedOutput(RevokedOutput { + outpoint_confirmation_height, + .. + }) + | PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput { + outpoint_confirmation_height, + .. + }) + | PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput { + outpoint_confirmation_height, + .. + }) + | PackageSolvingData::CounterpartyReceivedHTLCOutput( + CounterpartyReceivedHTLCOutput { outpoint_confirmation_height, .. }, + ) + | PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput { + outpoint_confirmation_height, + .. + }) => *outpoint_confirmation_height, + // We don't bother to track `HolderFundingOutput`'s creation height as its the funding + // transaction itself and we build `HolderFundingOutput`s before we actually get the + // commitment transaction confirmed. + PackageSolvingData::HolderFundingOutput(_) => None, + } + } + + #[rustfmt::skip] fn as_tx_input(&self, previous_output: BitcoinOutPoint) -> TxIn { let sequence = match self { PackageSolvingData::RevokedOutput(_) => Sequence::ENABLE_RBF_NO_LOCKTIME, @@ -905,6 +934,12 @@ impl PackageTemplate { pub(crate) fn outpoints(&self) -> Vec<&BitcoinOutPoint> { self.inputs.iter().map(|(o, _)| o).collect() } + pub(crate) fn outpoints_and_creation_heights( + &self, + ) -> impl Iterator)> { + self.inputs.iter().map(|(o, p)| (o, p.input_confirmation_height())) + } + pub(crate) fn inputs(&self) -> impl ExactSizeIterator { self.inputs.iter().map(|(_, i)| i) } From a9597aa88031239a830776ee19ada45f5c00baad Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 10 Jul 2025 00:51:51 +0000 Subject: [PATCH 18/21] Add a test case for the issues fixed in the previous few commits This adds a single test which exercises both the ability to prune locktimed packages when inputs are spent as well as the creation-height tracking for locktimed packages. Trivial conflicts resolved in: * lightning/src/ln/reorg_tests.rs --- lightning/src/ln/reorg_tests.rs | 226 +++++++++++++++++++++++++++++++- 1 file changed, 225 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index b1b4f77c590..56760c510a3 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -10,13 +10,14 @@ //! Further functional tests which test blockchain reorganizations. use crate::chain::chaininterface::LowerBoundedFeeEstimator; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS}; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, Balance, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::transaction::OutPoint; use crate::chain::Confirm; use crate::events::{Event, MessageSendEventsProvider, ClosureReason, HTLCDestination, MessageSendEvent}; use crate::ln::msgs::{ChannelMessageHandler, Init}; use crate::ln::types::ChannelId; use crate::sign::OutputSpender; +use crate::types::payment::PaymentHash; use crate::util::ser::Writeable; use crate::util::string::UntrustedString; @@ -897,3 +898,226 @@ fn test_retries_own_commitment_broadcast_after_reorg() { do_test_retries_own_commitment_broadcast_after_reorg(true, false); do_test_retries_own_commitment_broadcast_after_reorg(true, true); } + +fn do_test_split_htlc_expiry_tracking(use_third_htlc: bool, reorg_out: bool) { + // Previously, we had a bug where if there were two HTLCs which expired at different heights, + // and a counterparty commitment transaction confirmed spending both of them, we'd continually + // rebroadcast attempted HTLC claims against the higher-expiry HTLC forever. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + + // This test relies on being able to consolidate HTLC claims into a single transaction, which + // requires anchors: + let mut config = test_default_channel_config(); + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + config.manually_accept_inbound_channels = true; + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let coinbase_tx = provide_anchor_reserves(&nodes); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let (_, _, chan_id, funding_tx) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0); + + // Route two non-dust HTLCs with different expiry, with a third having the same expiry as the + // second if `use_third_htlc` is set. + let (preimage_a, payment_hash_a, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000_000); + connect_blocks(&nodes[0], 2); + connect_blocks(&nodes[1], 2); + let (preimage_b, payment_hash_b, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000_000); + let payment_hash_c = if use_third_htlc { + route_payment(&nodes[0], &[&nodes[1]], 100_000_000).1 + } else { + PaymentHash([0; 32]) + }; + + // First disconnect peers so that we don't have to deal with messages: + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); + + // Give node B preimages so that it will claim the first two HTLCs on-chain. + nodes[1].node.claim_funds(preimage_a); + expect_payment_claimed!(nodes[1], payment_hash_a, 100_000_000); + nodes[1].node.claim_funds(preimage_b); + expect_payment_claimed!(nodes[1], payment_hash_b, 100_000_000); + check_added_monitors(&nodes[1], 2); + + let err = "Channel force-closed".to_string(); + + // Force-close and fetch node B's commitment transaction and the transaction claiming the first + // two HTLCs. + nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &node_a_id, err).unwrap(); + check_closed_broadcast(&nodes[1], 1, true); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 10_000_000); + + let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 1); + let commitment_tx = txn.pop().unwrap(); + check_spends!(commitment_tx, funding_tx); + + mine_transaction(&nodes[0], &commitment_tx); + check_closed_broadcast(&nodes[0], 1, true); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 10_000_000); + check_added_monitors(&nodes[0], 1); + + mine_transaction(&nodes[1], &commitment_tx); + let mut bump_events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(bump_events.len(), 1); + match bump_events.pop().unwrap() { + Event::BumpTransaction(bump_event) => { + nodes[1].bump_tx_handler.handle_event(&bump_event); + }, + ev => panic!("Unexpected event {ev:?}"), + } + + let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); + if nodes[1].connect_style.borrow().updates_best_block_first() { + assert_eq!(txn.len(), 2, "{txn:?}"); + check_spends!(txn[0], funding_tx); + } else { + assert_eq!(txn.len(), 1, "{txn:?}"); + } + let bs_htlc_spend_tx = txn.pop().unwrap(); + check_spends!(bs_htlc_spend_tx, commitment_tx, coinbase_tx); + + // Now connect blocks until the first HTLC expires + assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0); + connect_blocks(&nodes[0], TEST_FINAL_CLTV - 2); + let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 1); + let as_first_htlc_spend_tx = txn.pop().unwrap(); + check_spends!(as_first_htlc_spend_tx, commitment_tx); + + // But confirm B's dual-HTLC-claim transaction instead. A should now have nothing to broadcast + // as the third HTLC (if there is one) won't expire for another block. + mine_transaction(&nodes[0], &bs_htlc_spend_tx); + let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 0); + + let sent_events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(sent_events.len(), 4, "{sent_events:?}"); + let mut found_expected_events = [false, false, false, false]; + for event in sent_events { + match event { + Event::PaymentSent { payment_hash, .. }|Event::PaymentPathSuccessful { payment_hash: Some(payment_hash), .. } => { + let path_success = matches!(event, Event::PaymentPathSuccessful { .. }); + if payment_hash == payment_hash_a { + found_expected_events[0 + if path_success { 1 } else { 0 }] = true; + } else if payment_hash == payment_hash_b { + found_expected_events[2 + if path_success { 1 } else { 0 }] = true; + } else { + panic!("Wrong payment hash {event:?}"); + } + }, + _ => panic!("Wrong event {event:?}"), + } + } + assert_eq!(found_expected_events, [true, true, true, true]); + + // However if we connect one more block the third HTLC will time out and A should claim it + connect_blocks(&nodes[0], 1); + let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); + if use_third_htlc { + assert_eq!(txn.len(), 1); + let as_third_htlc_spend_tx = txn.pop().unwrap(); + check_spends!(as_third_htlc_spend_tx, commitment_tx); + // Previously, node A would generate a bogus claim here, trying to claim both HTLCs B and C in + // one transaction, so we check that the single input being spent was not already spent in node + // B's HTLC claim transaction. + assert_eq!(as_third_htlc_spend_tx.input.len(), 1, "{as_third_htlc_spend_tx:?}"); + for spent_input in bs_htlc_spend_tx.input.iter() { + let third_htlc_vout = as_third_htlc_spend_tx.input[0].previous_output.vout; + assert_ne!(third_htlc_vout, spent_input.previous_output.vout); + } + + mine_transaction(&nodes[0], &as_third_htlc_spend_tx); + + assert_eq!(&nodes[0].node.get_and_clear_pending_events(), &[]); + } else { + assert_eq!(txn.len(), 0); + // Connect a block so that both cases end with the same height + connect_blocks(&nodes[0], 1); + } + + // At this point all HTLCs have been resolved and no further transactions should be generated. + // We connect blocks until one block before `bs_htlc_spend_tx` reaches `ANTI_REORG_DELAY` + // confirmations. + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 4); + let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 0); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + + if reorg_out { + // Reorg out bs_htlc_spend_tx, letting node A claim all the HTLCs instead. + disconnect_blocks(&nodes[0], ANTI_REORG_DELAY - 2); + assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0); + + // As soon as bs_htlc_spend_tx is disconnected, node A should consider all HTLCs + // claimable-on-timeout. + disconnect_blocks(&nodes[0], 1); + let balances = nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]); + assert_eq!(balances.len(), if use_third_htlc { 3 } else { 2 }); + for balance in balances { + if let Balance::MaybeTimeoutClaimableHTLC { .. } = balance { + } else { + panic!("Unexpected balance {balance:?}"); + } + } + + connect_blocks(&nodes[0], 100); + let txn = nodes[0].tx_broadcaster.txn_broadcast(); + let mut claiming_outpoints = new_hash_set(); + for tx in txn.iter() { + for input in tx.input.iter() { + claiming_outpoints.insert(input.previous_output); + } + } + assert_eq!(claiming_outpoints.len(), if use_third_htlc { 3 } else { 2 }); + } else { + // Connect a final block, which puts `bs_htlc_spend_tx` at `ANTI_REORG_DELAY` and we wipe + // the claimable balances for the first two HTLCs. + connect_blocks(&nodes[0], 1); + let balances = nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]); + assert_eq!(balances.len(), if use_third_htlc { 1 } else { 0 }); + + // Connect two more blocks to get `as_third_htlc_spend_tx` to `ANTI_REORG_DELAY` confs. + connect_blocks(&nodes[0], 2); + if use_third_htlc { + let failed_events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(failed_events.len(), 2); + let mut found_expected_events = [false, false]; + for event in failed_events { + match event { + Event::PaymentFailed { payment_hash: Some(payment_hash), .. }|Event::PaymentPathFailed { payment_hash, .. } => { + let path_failed = matches!(event, Event::PaymentPathFailed { .. }); + if payment_hash == payment_hash_c { + found_expected_events[if path_failed { 1 } else { 0 }] = true; + } else { + panic!("Wrong payment hash {event:?}"); + } + }, + _ => panic!("Wrong event {event:?}"), + } + } + assert_eq!(found_expected_events, [true, true]); + } + + // Further, there should be no spendable balances. + assert!(nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]).is_empty()); + } +} + +#[test] +fn test_split_htlc_expiry_tracking() { + do_test_split_htlc_expiry_tracking(true, true); + do_test_split_htlc_expiry_tracking(false, true); + do_test_split_htlc_expiry_tracking(true, false); + do_test_split_htlc_expiry_tracking(false, false); +} From 382e71b1d2c659cb569554a630c30d20cf598a57 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Tue, 15 Jul 2025 12:40:33 -0700 Subject: [PATCH 19/21] Correct non-dust HTLC accounting in `next_remote_commit_tx_fee_msat` `next_remote_commit_tx_fee_msat` previously mistakenly classified HTLCs with values equal to the dust limit as dust. This did not cause any force closes because the code that builds commitment transactions for signing correctly trims dust HTLCs. Nonetheless, this can cause `next_remote_commit_tx_fee_msat` to predict a weight for the next remote commitment transaction that is significantly lower than the eventual weight. This allows a malicious channel funder to create an unbroadcastable commitment for the channel fundee by adding HTLCs with values equal to the dust limit to the commitment transaction; according to the fundee, the funder has not exhausted their reserve because all the added HTLCs are dust, while in reality all the HTLCs are non-dust, and the funder does not have the funds to pay the minimum feerate to enter the mempool. Conflicts resolved in: * lightning/src/ln/htlc_reserve_unit_tests.rs which is a new file upstream. The new test was instead moved to lightning/src/ln/functional_tests.rs and rewritten where the upstream API has changed (in some cases nontrivially). --- lightning/src/ln/channel.rs | 6 +- lightning/src/ln/functional_tests.rs | 290 +++++++++++++++++++++++++++ 2 files changed, 293 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index d23363229f4..16803a45bfd 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3216,7 +3216,7 @@ impl ChannelContext where SP::Target: SignerProvider { /// Creates a set of keys for build_commitment_transaction to generate a transaction which we /// will sign and send to our counterparty. /// If an Err is returned, it is a ChannelError::Close (for get_funding_created) - fn build_remote_transaction_keys(&self) -> TxCreationKeys { + pub fn build_remote_transaction_keys(&self) -> TxCreationKeys { let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint; let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; let counterparty_pubkeys = self.get_counterparty_pubkeys(); @@ -3774,14 +3774,14 @@ impl ChannelContext where SP::Target: SignerProvider { // committed outbound HTLCs, see below. let mut included_htlcs = 0; for ref htlc in context.pending_inbound_htlcs.iter() { - if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat { + if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat { continue } included_htlcs += 1; } for ref htlc in context.pending_outbound_htlcs.iter() { - if htlc.amount_msat / 1000 <= real_dust_limit_success_sat { + if htlc.amount_msat / 1000 < real_dust_limit_success_sat { continue } // We only include outbound HTLCs if it will not be included in their next commitment_signed, diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 2535a756c9c..2cbf04a40ff 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -11684,3 +11684,293 @@ fn test_funding_signed_event() { nodes[1].node.get_and_clear_pending_msg_events(); } +#[test] +pub fn test_dust_limit_fee_accounting() { + do_test_dust_limit_fee_accounting(false); + do_test_dust_limit_fee_accounting(true); +} + +pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { + // Test that when a channel funder sends HTLCs exactly on the dust limit + // of the funder, the fundee correctly accounts for the additional fee on the + // funder's commitment transaction due to those additional non-dust HTLCs when + // checking for any infrigements on the funder's reserve. + + let channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(); + + let chanmon_cfgs = create_chanmon_cfgs(2); + + let mut default_config = test_default_channel_config(); + default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + default_config.manually_accept_inbound_channels = true; + + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + // Set a HTLC amount that is equal to the dust limit of the funder + const HTLC_AMT_SAT: u64 = 354; + + const CHANNEL_VALUE_SAT: u64 = 100_000; + + const FEERATE_PER_KW: u32 = 253; + + let commit_tx_fee_sat = + chan_utils::commit_tx_fee_sat(FEERATE_PER_KW, MIN_AFFORDABLE_HTLC_COUNT, &channel_type); + + // By default the reserve is set to 1% or 1000sat, whichever is higher + let channel_reserve_satoshis = 1_000; + + // Set node 0's balance to pay for exactly MIN_AFFORDABLE_HTLC_COUNT non-dust HTLCs on the channel, minus some offset + let node_0_balance_sat = commit_tx_fee_sat + + channel_reserve_satoshis + + 2 * crate::ln::channel::ANCHOR_OUTPUT_VALUE_SATOSHI + + MIN_AFFORDABLE_HTLC_COUNT as u64 * HTLC_AMT_SAT + - if can_afford { 0 } else { 1 }; + let mut node_1_balance_sat = CHANNEL_VALUE_SAT - node_0_balance_sat; + + let chan_id = create_chan_between_nodes_with_value( + &nodes[0], + &nodes[1], + CHANNEL_VALUE_SAT, + node_1_balance_sat * 1000, + ) + .3; + + { + // Double check the reserve that node 0 has to maintain here + let per_peer_state_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[1], nodes[0], per_peer_state_lock, peer_state_lock, chan_id); + assert_eq!( + chan.context().holder_selected_channel_reserve_satoshis, + channel_reserve_satoshis + ); + } + { + // Double check the dust limit on node 0's commitment transactions; when node 0 + // adds a HTLC, node 1 will check that the fee on node 0's commitment transaction + // does not dip under the node 1 selected reserve. + let per_peer_state_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[0], nodes[1], per_peer_state_lock, peer_state_lock, chan_id); + assert_eq!(chan.context().holder_dust_limit_satoshis, HTLC_AMT_SAT); + } + + // Precompute the route to skip any router complaints when sending the last HTLC + let (route_0_1, payment_hash_0_1, _, payment_secret_0_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], HTLC_AMT_SAT * 1000); + + let mut htlcs = Vec::new(); + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT - 1 { + let (_payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], HTLC_AMT_SAT * 1000); + // Grab a snapshot of these HTLCs to manually build the commitment transaction later... + let accepted_htlc = chan_utils::HTLCOutputInCommitment { + offered: false, + amount_msat: HTLC_AMT_SAT * 1000, + // Hard-coded to match the expected value + cltv_expiry: 81, + payment_hash, + transaction_output_index: None, + }; + htlcs.push((accepted_htlc, ())); + } + + // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() + let secp_ctx = Secp256k1::new(); + let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!"); + + let cur_height = nodes[1].node.best_block.read().unwrap().height + 1; + + let onion_keys = + onion_utils::construct_onion_keys(&secp_ctx, &route_0_1.paths[0], &session_priv).unwrap(); + let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret_0_1); + let (onion_payloads, amount_msat, cltv_expiry) = onion_utils::build_onion_payloads( + &route_0_1.paths[0], + HTLC_AMT_SAT * 1000, + &recipient_onion_fields, + cur_height, + &None, + None, + ) + .unwrap(); + let onion_routing_packet = + onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash_0_1) + .unwrap(); + // Double check the hard-coded value + assert_eq!(cltv_expiry, 81); + let msg = msgs::UpdateAddHTLC { + channel_id: chan_id, + htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64 - 1, + amount_msat, + payment_hash: payment_hash_0_1, + cltv_expiry, + onion_routing_packet, + skimmed_fee_msat: None, + blinding_point: None, + }; + + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); + + if !can_afford { + let err = "Remote HTLC add would put them under remote reserve value".to_string(); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", &err, 3); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + let reason = ClosureReason::ProcessingError { err }; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], CHANNEL_VALUE_SAT); + check_added_monitors(&nodes[1], 1); + } else { + // Now manually create the commitment_signed message corresponding to the update_add + // nodes[0] just sent. In the code for construction of this message, "local" refers + // to the sender of the message, and "remote" refers to the receiver. + + const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; + + let (local_secret, next_local_point) = { + let per_peer_lock; + let mut peer_state_lock; + + let channel = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_id); + let local_chan = if let ChannelPhase::Funded(chan) = &*channel { + chan + } else { + panic!(); + }; + let chan_signer = local_chan.get_signer(); + // Make the signer believe we validated another commitment, so we can release the secret + chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; + + ( + chan_signer + .as_ref() + .release_commitment_secret( + INITIAL_COMMITMENT_NUMBER - MIN_AFFORDABLE_HTLC_COUNT as u64 + 1, + ) + .unwrap(), + chan_signer + .as_ref() + .get_per_commitment_point( + INITIAL_COMMITMENT_NUMBER - MIN_AFFORDABLE_HTLC_COUNT as u64, + &secp_ctx, + ) + .unwrap(), + ) + }; + + // Build the remote commitment transaction so we can sign it, and then later use the + // signature for the commitment_signed message. + let local_chan_balance = node_0_balance_sat + - HTLC_AMT_SAT * MIN_AFFORDABLE_HTLC_COUNT as u64 + - 2 * crate::ln::channel::ANCHOR_OUTPUT_VALUE_SATOSHI + - chan_utils::commit_tx_fee_sat( + FEERATE_PER_KW, + MIN_AFFORDABLE_HTLC_COUNT, + &channel_type, + ); + + let accepted_htlc_info = chan_utils::HTLCOutputInCommitment { + offered: false, + amount_msat: HTLC_AMT_SAT * 1000, + cltv_expiry, + payment_hash: payment_hash_0_1, + transaction_output_index: None, + }; + htlcs.push((accepted_htlc_info, ())); + + let commitment_number = INITIAL_COMMITMENT_NUMBER - MIN_AFFORDABLE_HTLC_COUNT as u64; + + let res = { + let per_peer_lock; + let mut peer_state_lock; + + let channel = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_id); + let chan_signer = if let ChannelPhase::Funded(chan) = &*channel { + chan.get_signer() + } else { + panic!(); + }; + + let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( + commitment_number, + node_1_balance_sat, + local_chan_balance, + channel.context().channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys.funding_pubkey, + channel.context().channel_transaction_parameters.holder_pubkeys.funding_pubkey, + channel.context().build_remote_transaction_keys(), + FEERATE_PER_KW, + &mut htlcs, + &channel.context().channel_transaction_parameters.as_counterparty_broadcastable(), + ); + chan_signer + .as_ecdsa() + .unwrap() + .sign_counterparty_commitment( + &commitment_tx, + Vec::new(), + Vec::new(), + &secp_ctx, + ) + .unwrap() + }; + + let commit_signed_msg = msgs::CommitmentSigned { + channel_id: chan_id, + signature: res.0, + htlc_signatures: res.1, + batch: None, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }; + + // Send the commitment_signed message to the nodes[1]. + nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); + let _ = nodes[1].node.get_and_clear_pending_msg_events(); + + // Send the RAA to nodes[1]. + let raa_msg = msgs::RevokeAndACK { + channel_id: chan_id, + per_commitment_secret: local_secret, + next_per_commitment_point: next_local_point, + #[cfg(taproot)] + next_local_nonce: None, + }; + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); + + // The HTLC actually fails here in `fn validate_commitment_signed` due to a fee spike buffer + // violation. It nonetheless passed all checks in `fn validate_update_add_htlc`. + + //expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::FailedPayment { payment_hash: payment_hash_0_1 }] + ); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + // Make sure the HTLC failed in the way we expect. + match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, + .. + } => { + assert_eq!(update_fail_htlcs.len(), 1); + update_fail_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + }; + nodes[1].logger.assert_log("lightning::ln::channel", + format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); + + check_added_monitors(&nodes[1], 2); + } +} From b6a8fbc97a32e2e9c690fa0b7406d5937755483c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 15 Jul 2025 18:38:47 +0000 Subject: [PATCH 20/21] Add CHANGELOG entry for 0.1.5 --- CHANGELOG.md | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c68aba4c31..e5edcd8eab6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,35 @@ +# 0.1.5 - Jul XXX, 2025 - "Async Path Reduction" + +## Performance Improvements + * `NetworkGraph`'s expensive internal consistency checks have now been + disabled in debug builds in addition to release builds (#3687). + +## Bug Fixes + * Pathfinding which results in a multi-path payment is now substantially + smarter, using fewer paths and better optimizing fees and successes (#3890). + * A counterparty delaying claiming multiple HTLCs with different expiries can + no longer cause our `ChannelMonitor` to continuously rebroadcast invalid + transactions or RBF bump attempts (#3923). + * Reorgs can no longer cause us to fail to claim HTLCs after a counterparty + delayed claiming multiple HTLCs with different expiries (#3923). + * Force-closing a channel while it is blocked on another channel's async + `ChannelMonitorUpdate` can no longer lead to a panic (#3858). + * `ChannelMonitorUpdate`s can no longer be released to storage too early when + doing async updates or on restart. This only impacts async + `ChannelMonitorUpdate` persistence and can lead to loss of funds only in rare + cases with `ChannelMonitorUpdate` persistence order inversions (#3907). + +## Security +0.1.5 fixes a vulnerability which could allow a peer to overdraw their reserve +value, potentially cutting into commitment transaction fees on channels with a +low reserve. + * Due to a bug in checking whether an HTLC is dust during acceptance, near-dust + HTLCs were not counted towards the commitment transaction fee, but did + eventually contribute to it when we built a commitment transaction. This can + be used by a counterparty to overdraw their reserve value, or, for channels + with a low reserve value, cut into the commitment transaction fee (#3933). + + # 0.1.4 - May 23, 2025 - "Careful Validation of Bogus States" ## Bug Fixes From 843a69fe624179937bdef62f45238912b3435372 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 15 Jul 2025 18:39:19 +0000 Subject: [PATCH 21/21] Bump the `lightning` crate version to 0.1.5 --- lightning/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index d0c7336653a..75835c92edc 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning" -version = "0.1.4" +version = "0.1.5" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/"