Skip to content

Commit

Permalink
Refactor commitment broadcast to always go through OnchainTxHandler
Browse files Browse the repository at this point in the history
Currently, our holder commitment broadcast only goes through the
`OnchainTxHandler` for anchor outputs channels because we can actually
bump the commitment transaction fees with it. For non-anchor outputs
channels, we would just broadcast once directly via the
`ChannelForceClosed` monitor update, without going through the
`OnchainTxHandler`.

As we add support for async signing, we need to be tolerable to signing
failures. A signing failure of our holder commitment will currently
panic, but once the panic is removed, we must be able to retry signing
once the signer is available. We can easily achieve this via the
existing `OnchainTxHandler::rebroadcast_pending_claims`, but this
requires that we first queue our holder commitment as a claim. This
commit ensures we do so everywhere we need to broadcast a holder
commitment transaction, regardless of the channel type.

Co-authored-by: Rachel Malonson <rachel@lightspark.com>
  • Loading branch information
wpaulino and rmalonson committed Nov 29, 2023
1 parent 1876e67 commit d9422ca
Show file tree
Hide file tree
Showing 5 changed files with 160 additions and 130 deletions.
110 changes: 56 additions & 54 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2604,18 +2604,59 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
}
}

pub(crate) fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(&mut self, broadcaster: &B, logger: &L)
where B::Target: BroadcasterInterface,
L::Target: Logger,
{
let commit_txs = self.get_latest_holder_commitment_txn(logger);
let mut txs = vec![];
for tx in commit_txs.iter() {
log_info!(logger, "Broadcasting local {}", log_tx!(tx));
txs.push(tx);
}
broadcaster.broadcast_transactions(&txs);
fn generate_claimable_outpoints_and_watch_outputs(&mut self) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
let funding_outp = HolderFundingOutput::build(
self.funding_redeemscript.clone(),
self.channel_value_satoshis,
self.onchain_tx_handler.channel_type_features().clone()
);
let commitment_package = PackageTemplate::build_package(
self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
PackageSolvingData::HolderFundingOutput(funding_outp),
self.best_block.height(), self.best_block.height()
);
let mut claimable_outpoints = vec![commitment_package];
self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
// Although we aren't signing the transaction directly here, the transaction will be signed
// in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
// new channel updates.
self.holder_tx_signed = true;
let mut watch_outputs = Vec::new();
// We can't broadcast our HTLC transactions while the commitment transaction is
// unconfirmed. We'll delay doing so until we detect the confirmed commitment in
// `transactions_confirmed`.
if !self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
// Because we're broadcasting a commitment transaction, we should construct the package
// assuming it gets confirmed in the next block. Sadly, we have code which considers
// "not yet confirmed" things as discardable, so we cannot do that here.
let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(
&self.current_holder_commitment_tx, self.best_block.height()
);
let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
let new_outputs = self.get_broadcasted_holder_watch_outputs(
&self.current_holder_commitment_tx, &unsigned_commitment_tx
);
if !new_outputs.is_empty() {
watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
}
claimable_outpoints.append(&mut new_outpoints);
}
(claimable_outpoints, watch_outputs)
}

pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast<B: Deref, F: Deref, L: Deref>(
&mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
)
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs();
self.onchain_tx_handler.update_claims_view_from_requests(
claimable_outpoints, self.best_block.height(), self.best_block.height(), broadcaster,
fee_estimator, logger
);
}

pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), ()>
Expand Down Expand Up @@ -2703,26 +2744,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
log_trace!(logger, "Avoiding commitment broadcast, already detected confirmed spend onchain");
continue;
}
self.broadcast_latest_holder_commitment_txn(broadcaster, logger);
// If the channel supports anchor outputs, we'll need to emit an external
// event to be consumed such that a child transaction is broadcast with a
// high enough feerate for the parent commitment transaction to confirm.
if self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
let funding_output = HolderFundingOutput::build(
self.funding_redeemscript.clone(), self.channel_value_satoshis,
self.onchain_tx_handler.channel_type_features().clone(),
);
let best_block_height = self.best_block.height();
let commitment_package = PackageTemplate::build_package(
self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
PackageSolvingData::HolderFundingOutput(funding_output),
best_block_height, best_block_height
);
self.onchain_tx_handler.update_claims_view_from_requests(
vec![commitment_package], best_block_height, best_block_height,
broadcaster, &bounded_fee_estimator, logger,
);
}
self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger);
} else if !self.holder_tx_signed {
log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
log_error!(logger, " in channel monitor for channel {}!", &self.funding_info.0.to_channel_id());
Expand Down Expand Up @@ -3625,29 +3647,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {

let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
if should_broadcast {
let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone(), self.channel_value_satoshis, self.onchain_tx_handler.channel_type_features().clone());
let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), self.best_block.height());
claimable_outpoints.push(commitment_package);
self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
// Although we aren't signing the transaction directly here, the transaction will be signed
// in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
// new channel updates.
self.holder_tx_signed = true;
// We can't broadcast our HTLC transactions while the commitment transaction is
// unconfirmed. We'll delay doing so until we detect the confirmed commitment in
// `transactions_confirmed`.
if !self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
// Because we're broadcasting a commitment transaction, we should construct the package
// assuming it gets confirmed in the next block. Sadly, we have code which considers
// "not yet confirmed" things as discardable, so we cannot do that here.
let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &unsigned_commitment_tx);
if !new_outputs.is_empty() {
watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
}
claimable_outpoints.append(&mut new_outpoints);
}
let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs();
claimable_outpoints.append(&mut new_outpoints);
watch_outputs.append(&mut new_outputs);
}

// Find which on-chain events have reached their confirmation threshold.
Expand Down
65 changes: 41 additions & 24 deletions lightning/src/ln/functional_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2270,9 +2270,15 @@ fn channel_monitor_network_test() {
nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
assert_eq!(node_txn.len(), 1);
mine_transaction(&nodes[1], &node_txn[0]);
if nodes[1].connect_style.borrow().updates_best_block_first() {
let _ = nodes[1].tx_broadcaster.txn_broadcast();
}

mine_transaction(&nodes[0], &node_txn[0]);
check_added_monitors!(nodes[0], 1);
test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
Expand All @@ -2281,7 +2287,6 @@ fn channel_monitor_network_test() {
assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 1);
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);

// One pending HTLC is discarded by the force-close:
let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
Expand Down Expand Up @@ -3552,7 +3557,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
// connect_style.
return;
}
create_announced_chan_between_nodes(&nodes, 0, 1);
let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;

route_payment(&nodes[0], &[&nodes[1]], 10000000);
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
Expand All @@ -3561,11 +3566,12 @@ fn test_htlc_ignore_latest_remote_commitment() {
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);

let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 3);
assert_eq!(node_txn[0].txid(), node_txn[1].txid());
let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
assert_eq!(node_txn.len(), 2);
check_spends!(node_txn[0], funding_tx);
check_spends!(node_txn[1], node_txn[0]);

let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[1].clone()]);
let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]);
connect_block(&nodes[1], &block);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
Expand Down Expand Up @@ -3622,7 +3628,7 @@ fn test_force_close_fail_back() {
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
let tx = {
let commitment_tx = {
let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
// Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
// have a use for it unless nodes[2] learns the preimage somehow, the funds will go
Expand All @@ -3631,7 +3637,7 @@ fn test_force_close_fail_back() {
node_txn.remove(0)
};

mine_transaction(&nodes[1], &tx);
mine_transaction(&nodes[1], &commitment_tx);

// Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
check_closed_broadcast!(nodes[1], true);
Expand All @@ -3643,15 +3649,16 @@ fn test_force_close_fail_back() {
get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
.provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
}
mine_transaction(&nodes[2], &tx);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);
assert_eq!(node_txn[0].input.len(), 1);
assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
assert_eq!(node_txn[0].lock_time, LockTime::ZERO); // Must be an HTLC-Success
assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
mine_transaction(&nodes[2], &commitment_tx);
let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast();
assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
let htlc_tx = node_txn.pop().unwrap();
assert_eq!(htlc_tx.input.len(), 1);
assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.txid());
assert_eq!(htlc_tx.lock_time, LockTime::ZERO); // Must be an HTLC-Success
assert_eq!(htlc_tx.input[0].witness.len(), 5); // Must be an HTLC-Success

check_spends!(node_txn[0], tx);
check_spends!(htlc_tx, commitment_tx);
}

#[test]
Expand Down Expand Up @@ -8876,7 +8883,12 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
assert_eq!(bob_txn.len(), 1);
check_spends!(bob_txn[0], txn_to_broadcast[0]);
} else {
assert_eq!(bob_txn.len(), 2);
if nodes[1].connect_style.borrow().updates_best_block_first() {
assert_eq!(bob_txn.len(), 3);
assert_eq!(bob_txn[0].txid(), bob_txn[1].txid());
} else {
assert_eq!(bob_txn.len(), 2);
}
check_spends!(bob_txn[0], chan_ab.3);
}
}
Expand All @@ -8892,15 +8904,16 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
// If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
// Bob force-closed and broadcasts the commitment transaction along with a
// HTLC-output-claiming transaction.
let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
if broadcast_alice {
assert_eq!(bob_txn.len(), 1);
check_spends!(bob_txn[0], txn_to_broadcast[0]);
assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
} else {
assert_eq!(bob_txn.len(), 2);
check_spends!(bob_txn[1], txn_to_broadcast[0]);
assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight);
assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 });
let htlc_tx = bob_txn.pop().unwrap();
check_spends!(htlc_tx, txn_to_broadcast[0]);
assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), script_weight);
}
}
}
Expand Down Expand Up @@ -9376,8 +9389,12 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
// We should broadcast an HTLC transaction spending our funding transaction first
let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(spending_txn.len(), 2);
assert_eq!(spending_txn[0].txid(), node_txn[0].txid());
check_spends!(spending_txn[1], node_txn[0]);
let htlc_tx = if spending_txn[0].txid() == node_txn[0].txid() {
&spending_txn[1]
} else {
&spending_txn[0]
};
check_spends!(htlc_tx, node_txn[0]);
// We should also generate a SpendableOutputs event with the to_self output (as its
// timelock is up).
let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
Expand All @@ -9387,7 +9404,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
// should immediately fail-backwards the HTLC to the previous hop, without waiting for an
// additional block built on top of the current chain.
nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
&nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
&nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
check_added_monitors!(nodes[1], 1);

Expand Down

0 comments on commit d9422ca

Please sign in to comment.