Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 38 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,41 @@
# 0.1.6 - Oct 10, 2025 - "Async Preimage Claims"

## Performance Improvements
* `NetworkGraph::remove_stale_channels_and_tracking` has been sped up by more
than 20x in cases where many entries need to be removed (such as after
initial gossip sync, #4080).

## Bug Fixes
* Delivery of on-chain resolutions of HTLCs to `ChannelManager` has been made
more robust to prevent loss in some exceedingly rare crash cases. This may
marginally increase payment resolution event replays on startup (#3984).
* Corrected forwarding of new gossip to peers which we are sending an initial
gossip sync to (#4107).
* A rare race condition may have resulted in outbound BOLT12 payments
spuriously failing while processing the `Bolt12Invoice` message (#4078).
* If a channel is updated multiple times after a payment is claimed while using
async persistence of the `ChannelMonitorUpdate`s, and the node then restarts
with a stale copy of its `ChannelManager`, the `PaymentClaimed` may have been
lost (#3988).
* If an async-persisted `ChannelMonitorUpdate` for one part of an MPP claim
does not complete before multiple `ChannelMonitorUpdate`s for another channel
in the same MPP claim complete, and the node restarts twice, the preimage may
be lost and the MPP payment part may not be claimed (#3928).

## Security
0.1.6 fixes a denial of service vulnerability and a funds-theft vulnerability.
* When a channel has been force-closed, we have already claimed some of its
HTLCs on-chain, and we later learn a new preimage allowing us to claim
further HTLCs on-chain, we could in some cases generate invalid claim
transactions leading to loss of funds (#4154).
* When a `ChannelMonitor` is created for a channel which is never funded with
a real transaction, `ChannelMonitor::get_claimable_balances` would never be
empty. As a result, `ChannelMonitor::check_and_update_full_resolution_status`
would never indicate the monitor is prunable, and thus
`ChainMonitor::archive_fully_resolved_channel_monitors` would never remove
it. This allows a peer which opens channels without funding them to bloat our
memory and disk space, eventually leading to denial-of-service (#4081).

# 0.1.5 - Jul 16, 2025 - "Async Path Reduction"

## Performance Improvements
Expand Down
2 changes: 1 addition & 1 deletion lightning/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "lightning"
version = "0.1.5"
version = "0.1.6"
authors = ["Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning/"
Expand Down
188 changes: 136 additions & 52 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3118,7 +3118,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
// First check if a counterparty commitment transaction has been broadcasted:
macro_rules! claim_htlcs {
($commitment_number: expr, $txid: expr, $htlcs: expr) => {
let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None, $htlcs, confirmed_spend_height);
let htlc_claim_reqs = self.get_counterparty_output_claims_for_preimage(*payment_preimage, $commitment_number, $txid, $htlcs, confirmed_spend_height);
let conf_target = self.closure_conf_target();
self.onchain_tx_handler.update_claims_view_from_requests(htlc_claim_reqs, self.best_block.height, self.best_block.height, broadcaster, conf_target, fee_estimator, logger);
}
Expand Down Expand Up @@ -3728,7 +3728,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
(htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref()))
), logger);
let (htlc_claim_reqs, counterparty_output_info) =
self.get_counterparty_output_claim_info(commitment_number, commitment_txid, Some(tx), per_commitment_option, Some(height));
self.get_counterparty_output_claim_info(commitment_number, commitment_txid, tx, per_commitment_claimable_data, Some(height));
to_counterparty_output_info = counterparty_output_info;
for req in htlc_claim_reqs {
claimable_outpoints.push(req);
Expand All @@ -3738,78 +3738,162 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
(claimable_outpoints, to_counterparty_output_info)
}

/// Returns the HTLC claim package templates and the counterparty output info
fn get_counterparty_output_claim_info(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>, per_commitment_option: Option<&Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>>, confirmation_height: Option<u32>)
-> (Vec<PackageTemplate>, CommitmentTxCounterpartyOutputInfo) {
let mut claimable_outpoints = Vec::new();
let mut to_counterparty_output_info: CommitmentTxCounterpartyOutputInfo = None;
fn get_point_for_commitment_number(&self, commitment_number: u64) -> Option<PublicKey> {
let per_commitment_points = &self.their_cur_per_commitment_points?;

// If the counterparty commitment tx is the latest valid state, use their latest
// per-commitment point
if per_commitment_points.0 == commitment_number {
Some(per_commitment_points.1)
} else if let Some(point) = per_commitment_points.2.as_ref() {
// If counterparty commitment tx is the state previous to the latest valid state, use
// their previous per-commitment point (non-atomicity of revocation means it's valid for
// them to temporarily have two valid commitment txns from our viewpoint)
if per_commitment_points.0 == commitment_number + 1 {
Some(*point)
} else {
None
}
} else {
None
}
}

fn get_counterparty_output_claims_for_preimage(
&self, preimage: PaymentPreimage, commitment_number: u64,
commitment_txid: Txid,
per_commitment_option: Option<&Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>>,
confirmation_height: Option<u32>,
) -> Vec<PackageTemplate> {
let per_commitment_claimable_data = match per_commitment_option {
Some(outputs) => outputs,
None => return (claimable_outpoints, to_counterparty_output_info),
None => return Vec::new(),
};
let per_commitment_points = match self.their_cur_per_commitment_points {
Some(points) => points,
None => return (claimable_outpoints, to_counterparty_output_info),
let per_commitment_point = match self.get_point_for_commitment_number(commitment_number) {
Some(point) => point,
None => return Vec::new(),
};

let per_commitment_point =
// If the counterparty commitment tx is the latest valid state, use their latest
// per-commitment point
if per_commitment_points.0 == commitment_number { &per_commitment_points.1 }
else if let Some(point) = per_commitment_points.2.as_ref() {
// If counterparty commitment tx is the state previous to the latest valid state, use
// their previous per-commitment point (non-atomicity of revocation means it's valid for
// them to temporarily have two valid commitment txns from our viewpoint)
if per_commitment_points.0 == commitment_number + 1 {
point
} else { return (claimable_outpoints, to_counterparty_output_info); }
} else { return (claimable_outpoints, to_counterparty_output_info); };

if let Some(transaction) = tx {
let revocation_pubkey = RevocationKey::from_basepoint(
&self.onchain_tx_handler.secp_ctx, &self.holder_revocation_basepoint, &per_commitment_point);

let delayed_key = DelayedPaymentKey::from_basepoint(&self.onchain_tx_handler.secp_ctx, &self.counterparty_commitment_params.counterparty_delayed_payment_base_key, &per_commitment_point);

let revokeable_p2wsh = chan_utils::get_revokeable_redeemscript(&revocation_pubkey,
self.counterparty_commitment_params.on_counterparty_tx_csv,
&delayed_key).to_p2wsh();
for (idx, outp) in transaction.output.iter().enumerate() {
if outp.script_pubkey == revokeable_p2wsh {
to_counterparty_output_info =
Some((idx.try_into().expect("Can't have > 2^32 outputs"), outp.value));
let matching_payment_hash = PaymentHash::from(preimage);
per_commitment_claimable_data
.iter()
.filter_map(|(htlc, _)| {
if let Some(transaction_output_index) = htlc.transaction_output_index {
if htlc.offered && htlc.payment_hash == matching_payment_hash {
let htlc_data = PackageSolvingData::CounterpartyOfferedHTLCOutput(
CounterpartyOfferedHTLCOutput::build(
per_commitment_point,
self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
self.counterparty_commitment_params.counterparty_htlc_base_key,
preimage,
htlc.clone(),
self.onchain_tx_handler.channel_type_features().clone(),
confirmation_height,
),
);
Some(PackageTemplate::build_package(
commitment_txid,
transaction_output_index,
htlc_data,
htlc.cltv_expiry,
))
} else {
None
}
} else {
None
}
})
.collect()
}

/// Returns the HTLC claim package templates and the counterparty output info
fn get_counterparty_output_claim_info(
&self, commitment_number: u64, commitment_txid: Txid,
tx: &Transaction,
per_commitment_claimable_data: &[(HTLCOutputInCommitment, Option<Box<HTLCSource>>)],
confirmation_height: Option<u32>,
) -> (Vec<PackageTemplate>, CommitmentTxCounterpartyOutputInfo) {
let mut claimable_outpoints = Vec::new();
let mut to_counterparty_output_info: CommitmentTxCounterpartyOutputInfo = None;

let per_commitment_point = match self.get_point_for_commitment_number(commitment_number) {
Some(point) => point,
None => return (claimable_outpoints, to_counterparty_output_info),
};

let revocation_pubkey = RevocationKey::from_basepoint(
&self.onchain_tx_handler.secp_ctx,
&self.holder_revocation_basepoint,
&per_commitment_point,
);
let delayed_key = DelayedPaymentKey::from_basepoint(
&self.onchain_tx_handler.secp_ctx,
&self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
&per_commitment_point,
);
let revokeable_p2wsh = chan_utils::get_revokeable_redeemscript(
&revocation_pubkey,
self.counterparty_commitment_params.on_counterparty_tx_csv,
&delayed_key,
)
.to_p2wsh();
for (idx, outp) in tx.output.iter().enumerate() {
if outp.script_pubkey == revokeable_p2wsh {
to_counterparty_output_info =
Some((idx.try_into().expect("Can't have > 2^32 outputs"), outp.value));
}
}

for &(ref htlc, _) in per_commitment_claimable_data.iter() {
for &(ref htlc, _) in per_commitment_claimable_data.iter() {
if let Some(transaction_output_index) = htlc.transaction_output_index {
if let Some(transaction) = tx {
if transaction_output_index as usize >= transaction.output.len() ||
transaction.output[transaction_output_index as usize].value != htlc.to_bitcoin_amount() {
// per_commitment_data is corrupt or our commitment signing key leaked!
return (claimable_outpoints, to_counterparty_output_info);
}
if transaction_output_index as usize >= tx.output.len()
|| tx.output[transaction_output_index as usize].value
!= htlc.to_bitcoin_amount()
{
// per_commitment_data is corrupt or our commitment signing key leaked!
return (claimable_outpoints, to_counterparty_output_info);
}
let preimage = if htlc.offered { if let Some((p, _)) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None };
let preimage = if htlc.offered {
if let Some((p, _)) = self.payment_preimages.get(&htlc.payment_hash) {
Some(*p)
} else {
None
}
} else {
None
};
if preimage.is_some() || !htlc.offered {
let counterparty_htlc_outp = if htlc.offered {
PackageSolvingData::CounterpartyOfferedHTLCOutput(
CounterpartyOfferedHTLCOutput::build(*per_commitment_point,
CounterpartyOfferedHTLCOutput::build(
per_commitment_point,
self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
self.counterparty_commitment_params.counterparty_htlc_base_key,
preimage.unwrap(), htlc.clone(), self.onchain_tx_handler.channel_type_features().clone(),
confirmation_height))
preimage.unwrap(),
htlc.clone(),
self.onchain_tx_handler.channel_type_features().clone(),
confirmation_height,
),
)
} else {
PackageSolvingData::CounterpartyReceivedHTLCOutput(
CounterpartyReceivedHTLCOutput::build(*per_commitment_point,
CounterpartyReceivedHTLCOutput::build(
per_commitment_point,
self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
self.counterparty_commitment_params.counterparty_htlc_base_key,
htlc.clone(), self.onchain_tx_handler.channel_type_features().clone(),
confirmation_height))
htlc.clone(),
self.onchain_tx_handler.channel_type_features().clone(),
confirmation_height,
),
)
};
let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry);
let counterparty_package = PackageTemplate::build_package(
commitment_txid,
transaction_output_index,
counterparty_htlc_outp,
htlc.cltv_expiry,
);
claimable_outpoints.push(counterparty_package);
}
}
Expand Down
6 changes: 0 additions & 6 deletions lightning/src/events/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -229,11 +229,6 @@ impl_writeable_tlv_based_enum_legacy!(PaymentPurpose,
/// Information about an HTLC that is part of a payment that can be claimed.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ClaimedHTLC {
/// The counterparty of the channel.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This commit seems like it should go earlier in the PR, probably doesn't matter though

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I mean the commit shouldn't have been backported at all, but oh well :)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, that was an oversight :( It's possible we should've broken up that backports PR, it was huge and there were some nontrivial backports.

I re-scanned and noticed the indexed_map's public API changed as well in that PR, though I doubt anyone uses that directly, I think that would also qualify as a semver violation?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Its mostly a CI failure. We have a semver check CI job now, and it did fail here (which is why I noticed it). We should have checked it in the previous PR (and maybe should mark it required on the 0.1/0.2 branches).

///
/// This value will always be `None` for objects serialized with LDK versions prior to 0.2 and
/// `Some` otherwise.
pub counterparty_node_id: Option<PublicKey>,
/// The `channel_id` of the channel over which the HTLC was received.
pub channel_id: ChannelId,
/// The `user_channel_id` of the channel over which the HTLC was received. This is the value
Expand Down Expand Up @@ -264,7 +259,6 @@ impl_writeable_tlv_based!(ClaimedHTLC, {
(0, channel_id, required),
(1, counterparty_skimmed_fee_msat, (default_value, 0u64)),
(2, user_channel_id, required),
(3, counterparty_node_id, option),
(4, cltv_expiry, required),
(6, value_msat, required),
});
Expand Down
1 change: 0 additions & 1 deletion lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,6 @@ struct ClaimableHTLC {
impl From<&ClaimableHTLC> for events::ClaimedHTLC {
fn from(val: &ClaimableHTLC) -> Self {
events::ClaimedHTLC {
counterparty_node_id: val.prev_hop.counterparty_node_id,
channel_id: val.prev_hop.channel_id,
user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
cltv_expiry: val.cltv_expiry,
Expand Down
82 changes: 82 additions & 0 deletions lightning/src/ln/monitor_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3693,3 +3693,85 @@ fn test_lost_timeout_monitor_events() {
do_test_lost_timeout_monitor_events(CommitmentType::LocalWithLastHTLC, false);
do_test_lost_timeout_monitor_events(CommitmentType::LocalWithLastHTLC, true);
}

#[test]
fn test_ladder_preimage_htlc_claims() {
// Tests that when we learn of a preimage via a monitor update we only claim HTLCs with the
// corresponding payment hash. This test is a reproduction of a scenario that happened in
// production where the second HTLC claim also included the first HTLC (even though it was
// already claimed) resulting in an invalid claim transaction.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);

let node_id_0 = nodes[0].node.get_our_node_id();
let node_id_1 = nodes[1].node.get_our_node_id();

let (_, _, channel_id, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);

let (payment_preimage1, payment_hash1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
let (payment_preimage2, payment_hash2, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);

nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &node_id_1, "test".to_string()).unwrap();
check_added_monitors(&nodes[0], 1);
check_closed_broadcast(&nodes[0], 1, true);
let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) };
check_closed_event(&nodes[0], 1, reason, false, &[node_id_1], 1_000_000);

let commitment_tx = {
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
txn.remove(0)
};
mine_transaction(&nodes[0], &commitment_tx);
mine_transaction(&nodes[1], &commitment_tx);

check_closed_broadcast(&nodes[1], 1, true);
check_added_monitors(&nodes[1], 1);
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[node_id_0], 1_000_000);

nodes[1].node.claim_funds(payment_preimage1);
expect_payment_claimed!(&nodes[1], payment_hash1, 1_000_000);
check_added_monitors(&nodes[1], 1);

let (htlc1, htlc_claim_tx1) = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
let htlc_claim_tx = txn.remove(0);
assert_eq!(htlc_claim_tx.input.len(), 1);
check_spends!(htlc_claim_tx, commitment_tx);
(htlc_claim_tx.input[0].previous_output, htlc_claim_tx)
};
mine_transaction(&nodes[0], &htlc_claim_tx1);
mine_transaction(&nodes[1], &htlc_claim_tx1);

connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);

expect_payment_sent(&nodes[0], payment_preimage1, None, true, false);
check_added_monitors(&nodes[0], 1);

nodes[1].node.claim_funds(payment_preimage2);
expect_payment_claimed!(&nodes[1], payment_hash2, 1_000_000);
check_added_monitors(&nodes[1], 1);

let (htlc2, htlc_claim_tx2) = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1, "{:?}", txn.iter().map(|tx| tx.compute_txid()).collect::<Vec<_>>());
let htlc_claim_tx = txn.remove(0);
assert_eq!(htlc_claim_tx.input.len(), 1);
check_spends!(htlc_claim_tx, commitment_tx);
(htlc_claim_tx.input[0].previous_output, htlc_claim_tx)
};
assert_ne!(htlc1, htlc2);

mine_transaction(&nodes[0], &htlc_claim_tx2);
mine_transaction(&nodes[1], &htlc_claim_tx2);

connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);

expect_payment_sent(&nodes[0], payment_preimage2, None, true, false);
check_added_monitors(&nodes[0], 1);
}
Loading