From 8e57ada8474f47c10f65d83e5ef128b7df374a95 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 15:33:44 +0200 Subject: [PATCH 01/12] lightning: introduce singular claim requests Have ChannelMonitor hand singular ClaimRequests to OnchainTxHandler. Convert them to PackageTemplates only after duplicate filtering. This makes the single-outpoint invariant explicit at that boundary. --- lightning/src/chain/channelmonitor.rs | 53 ++++++++++++++------------- lightning/src/chain/onchaintx.rs | 31 ++++++---------- lightning/src/chain/package.rs | 42 +++++++++++++++++++++ 3 files changed, 81 insertions(+), 45 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index c3e20ef5e6f..2442f9c6af1 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -38,8 +38,8 @@ use crate::chain::chaininterface::{ }; use crate::chain::onchaintx::{ClaimEvent, FeerateStrategy, OnchainTxHandler}; use crate::chain::package::{ - CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, - HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedHTLCOutput, RevokedOutput, + ClaimRequest, CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, + HolderFundingOutput, HolderHTLCOutput, PackageSolvingData, RevokedHTLCOutput, RevokedOutput, }; use crate::chain::transaction::{OutPoint, TransactionData}; use crate::chain::{BlockLocator, WatchedOutput}; @@ -3861,7 +3861,7 @@ impl ChannelMonitorImpl { fn generate_claimable_outpoints_and_watch_outputs( &mut self, generate_monitor_event_with_reason: Option, require_funding_seen: bool, - ) -> (Vec, Vec) { + ) -> (Vec, Vec) { let funding = get_confirmed_funding_scope!(self); let holder_commitment_tx = &funding.current_holder_commitment_tx; let funding_outp = HolderFundingOutput::build( @@ -3869,7 +3869,7 @@ impl ChannelMonitorImpl { funding.channel_parameters.clone(), ); let funding_outpoint = funding.funding_outpoint(); - let commitment_package = PackageTemplate::build_package( + let commitment_package = ClaimRequest::new( funding_outpoint.txid.clone(), funding_outpoint.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height, @@ -3908,9 +3908,9 @@ impl ChannelMonitorImpl { let zero_fee_commitments = self.channel_type_features().supports_anchor_zero_fee_commitments(); if !zero_fee_htlcs && !zero_fee_commitments { - // Because we're broadcasting a commitment transaction, we should construct the package - // assuming it gets confirmed in the next block. Sadly, we have code which considers - // "not yet confirmed" things as discardable, so we cannot do that here. + // Because we're broadcasting a commitment transaction, we should construct claim + // requests assuming it gets confirmed in the next block. Sadly, we have code which + // considers "not yet confirmed" things as discardable, so we cannot do that here. let (mut new_outpoints, _) = self.get_broadcasted_holder_claims( funding, holder_commitment_tx, self.best_block.height, ); @@ -4759,11 +4759,11 @@ impl ChannelMonitorImpl { /// height > height + CLTV_SHARED_CLAIM_BUFFER. In any case, will install monitoring for /// HTLC-Success/HTLC-Timeout transactions. /// - /// Returns packages to claim the revoked output(s) and general information about the output that - /// is to the counterparty in the commitment transaction. + /// Returns claim requests for the revoked output(s) and general information about the output + /// that is to the counterparty in the commitment transaction. #[rustfmt::skip] fn check_spend_counterparty_transaction(&mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) - -> (Vec, CommitmentTxCounterpartyOutputInfo) + -> (Vec, CommitmentTxCounterpartyOutputInfo) { // Most secp and related errors trying to create keys means we have no hope of constructing // a spend transaction...so we return no transactions to broadcast @@ -4803,7 +4803,7 @@ impl ChannelMonitorImpl { per_commitment_point, per_commitment_key, outp.value, funding_spent.channel_parameters.clone(), height, ); - let justice_package = PackageTemplate::build_package( + let justice_package = ClaimRequest::new( commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, @@ -4832,7 +4832,7 @@ impl ChannelMonitorImpl { } else { height }; - let justice_package = PackageTemplate::build_package( + let justice_package = ClaimRequest::new( commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), @@ -4921,7 +4921,7 @@ impl ChannelMonitorImpl { commitment_txid: Txid, per_commitment_option: Option<&Vec<(HTLCOutputInCommitment, Option>)>>, confirmation_height: Option, - ) -> Vec { + ) -> Vec { let per_commitment_claimable_data = match per_commitment_option { Some(outputs) => outputs, None => return Vec::new(), @@ -4946,7 +4946,7 @@ impl ChannelMonitorImpl { confirmation_height, ), ); - Some(PackageTemplate::build_package( + Some(ClaimRequest::new( commitment_txid, transaction_output_index, htlc_data, @@ -4962,13 +4962,13 @@ impl ChannelMonitorImpl { .collect() } - /// Returns the HTLC claim package templates and the counterparty output info + /// Returns the HTLC claim requests and the counterparty output info. fn get_counterparty_output_claim_info( &self, funding_spent: &FundingScope, commitment_number: u64, commitment_txid: Txid, tx: &Transaction, per_commitment_claimable_data: &[(HTLCOutputInCommitment, Option>)], confirmation_height: Option, - ) -> (Vec, CommitmentTxCounterpartyOutputInfo) { + ) -> (Vec, CommitmentTxCounterpartyOutputInfo) { let mut claimable_outpoints = Vec::new(); let mut to_counterparty_output_info: CommitmentTxCounterpartyOutputInfo = None; @@ -5039,7 +5039,7 @@ impl ChannelMonitorImpl { ), ) }; - let counterparty_package = PackageTemplate::build_package( + let counterparty_package = ClaimRequest::new( commitment_txid, transaction_output_index, counterparty_htlc_outp, @@ -5057,7 +5057,7 @@ impl ChannelMonitorImpl { #[rustfmt::skip] fn check_spend_counterparty_htlc( &mut self, tx: &Transaction, commitment_number: u64, commitment_txid: &Txid, height: u32, logger: &L - ) -> (Vec, Option) { + ) -> (Vec, Option) { let secret = if let Some(secret) = self.get_secret(commitment_number) { secret } else { return (Vec::new(), None); }; let per_commitment_key = match SecretKey::from_slice(&secret) { Ok(key) => key, @@ -5088,7 +5088,7 @@ impl ChannelMonitorImpl { per_commitment_point, per_commitment_key, tx.output[idx].value, self.funding.channel_parameters.clone(), height, ); - let justice_package = PackageTemplate::build_package( + let justice_package = ClaimRequest::new( htlc_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, ); @@ -5140,13 +5140,14 @@ impl ChannelMonitorImpl { htlcs } - // Returns (1) `PackageTemplate`s that can be given to the OnchainTxHandler, so that the handler can - // broadcast transactions claiming holder HTLC commitment outputs and (2) a holder revokable - // script so we can detect whether a holder transaction has been seen on-chain. + // Returns (1) `ClaimRequest`s that can be given to the OnchainTxHandler, so that the + // handler can broadcast transactions claiming holder HTLC commitment outputs and (2) a + // holder revokable script so we can detect whether a holder transaction has been seen + // on-chain. #[rustfmt::skip] fn get_broadcasted_holder_claims( &self, funding: &FundingScope, holder_tx: &HolderCommitmentTransaction, conf_height: u32, - ) -> (Vec, Option<(ScriptBuf, PublicKey, RevocationKey)>) { + ) -> (Vec, Option<(ScriptBuf, PublicKey, RevocationKey)>) { let tx = holder_tx.trust(); let keys = tx.keys(); let redeem_script = chan_utils::get_revokeable_redeemscript( @@ -5165,7 +5166,7 @@ impl ChannelMonitorImpl { }; let transaction_output_index = htlc_descriptor.htlc.transaction_output_index .expect("Expected transaction output index for non-dust HTLC"); - PackageTemplate::build_package( + ClaimRequest::new( tx.txid(), transaction_output_index, PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build(htlc_descriptor, conf_height)), counterparty_spendable_height, @@ -5201,7 +5202,7 @@ impl ChannelMonitorImpl { fn check_spend_holder_transaction( &mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L, - ) -> Option<(Vec, TransactionOutputs)> { + ) -> Option<(Vec, TransactionOutputs)> { let funding_spent = get_confirmed_funding_scope!(self); // HTLCs set may differ between last and previous holder commitment txn, in case of one them hitting chain, ensure we cancel all HTLCs backward @@ -5712,7 +5713,7 @@ impl ChannelMonitorImpl { conf_hash: BlockHash, txn_matched: Vec<&Transaction>, mut watch_outputs: Vec, - mut claimable_outpoints: Vec, + mut claimable_outpoints: Vec, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 3eb6d64f3a2..823b81936ce 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -27,7 +27,7 @@ use crate::chain::chaininterface::{ BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator, TransactionType, }; use crate::chain::channelmonitor::ANTI_REORG_DELAY; -use crate::chain::package::{PackageSolvingData, PackageTemplate}; +use crate::chain::package::{ClaimRequest, PackageSolvingData, PackageTemplate}; use crate::chain::transaction::MaybeSignedTransaction; use crate::chain::ClaimId; use crate::ln::chan_utils::{ @@ -791,7 +791,7 @@ impl OnchainTxHandler { /// `cur_height`, however it must never be higher than `cur_height`. #[rustfmt::skip] pub(super) fn update_claims_view_from_requests( - &mut self, mut requests: Vec, conf_height: u32, cur_height: u32, + &mut self, mut requests: Vec, conf_height: u32, cur_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) { @@ -801,33 +801,26 @@ impl OnchainTxHandler { // First drop any duplicate claims. requests.retain(|req| { - debug_assert_eq!( - req.outpoints().len(), - 1, - "Claims passed to `update_claims_view_from_requests` should not be aggregated" - ); - let mut all_outpoints_claiming = true; - for outpoint in req.outpoints() { - if self.claimable_outpoints.get(outpoint).is_none() { - all_outpoints_claiming = false; - } - } - if all_outpoints_claiming { + let outpoint = req.outpoint(); + if self.claimable_outpoints.get(outpoint).is_some() { log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", - req.outpoints()[0].txid, req.outpoints()[0].vout); + outpoint.txid, outpoint.vout); false } else { let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten() - .find(|locked_package| locked_package.outpoints() == req.outpoints()); + .find(|locked_package| locked_package.outpoints().len() == 1 && locked_package.contains_outpoint(outpoint)); if let Some(package) = timelocked_equivalent_package { log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", - req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_locktime(cur_height)); + outpoint.txid, outpoint.vout, package.package_locktime(cur_height)); false } else { true } } }); + let mut requests = requests.into_iter() + .map(ClaimRequest::into_package_template) + .collect::>(); // Then try to maximally aggregate `requests`. for i in (1..requests.len()).rev() { @@ -1290,7 +1283,7 @@ mod tests { use types::features::ChannelTypeFeatures; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator}; - use crate::chain::package::{HolderHTLCOutput, PackageSolvingData, PackageTemplate}; + use crate::chain::package::{ClaimRequest, HolderHTLCOutput, PackageSolvingData}; use crate::chain::transaction::OutPoint; use crate::ln::chan_utils::{ ChannelPublicKeys, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, @@ -1412,7 +1405,7 @@ mod tests { let holder_commit_txid = holder_commit.trust().txid(); let mut requests = Vec::new(); for (htlc, counterparty_sig) in holder_commit.nondust_htlcs().iter().zip(holder_commit.counterparty_htlc_sigs.iter()) { - requests.push(PackageTemplate::build_package( + requests.push(ClaimRequest::new( holder_commit_txid, htlc.transaction_output_index.unwrap(), PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build(HTLCDescriptor { diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 0ef8855242b..06be5750367 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -1097,6 +1097,19 @@ enum PackageMalleability { Untractable, } +/// A single on-chain output claim generated by [`ChannelMonitor`]. +/// +/// These requests are converted to [`PackageTemplate`]s once [`OnchainTxHandler`] has deduplicated +/// them and is ready to aggregate compatible claims. +/// +/// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor +/// [`OnchainTxHandler`]: crate::chain::onchaintx::OnchainTxHandler +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct ClaimRequest { + input: (BitcoinOutPoint, PackageSolvingData), + counterparty_spendable_height: u32, +} + /// A structure to describe a package content that is generated by ChannelMonitor and /// used by OnchainTxHandler to generate and broadcast transactions settling onchain claims. /// @@ -1179,6 +1192,32 @@ impl PartialEq for PackageTemplate { } } +impl ClaimRequest { + pub(crate) fn new( + txid: Txid, vout: u32, input_solving_data: PackageSolvingData, + counterparty_spendable_height: u32, + ) -> Self { + Self { + input: (BitcoinOutPoint { txid, vout }, input_solving_data), + counterparty_spendable_height, + } + } + + pub(crate) fn outpoint(&self) -> &BitcoinOutPoint { + &self.input.0 + } + + pub(crate) fn into_package_template(self) -> PackageTemplate { + let (outpoint, input_solving_data) = self.input; + PackageTemplate::build_package( + outpoint.txid, + outpoint.vout, + input_solving_data, + self.counterparty_spendable_height, + ) + } +} + impl PackageTemplate { #[rustfmt::skip] pub(crate) fn can_merge_with(&self, other: &PackageTemplate, cur_height: u32) -> bool { @@ -1265,6 +1304,9 @@ impl PackageTemplate { pub(crate) fn outpoints(&self) -> Vec<&BitcoinOutPoint> { self.inputs.iter().map(|(o, _)| o).collect() } + pub(crate) fn contains_outpoint(&self, outpoint: &BitcoinOutPoint) -> bool { + self.inputs.iter().any(|(input, _)| input == outpoint) + } pub(crate) fn outpoints_and_creation_heights( &self, ) -> impl Iterator)> { From c8ce370fcde5729c76ac17b9acdb053d8acd9f5b Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 4 May 2026 10:36:53 +0200 Subject: [PATCH 02/12] lightning: clarify channelmonitor event thresholds Clarify ChannelMonitor comments around on-chain event thresholds. Some events only wait for anti-reorg finality, while CSV-delayed outputs wait until spendable through the same threshold queue. --- lightning/src/chain/channelmonitor.rs | 51 ++++++++++++++------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 2442f9c6af1..e311ebcb059 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -472,7 +472,8 @@ impl Readable for CounterpartyCommitmentParameters { /// An entry for an [`OnchainEvent`], stating the block height and hash when the event was /// observed, as well as the transaction causing it. /// -/// Used to determine when the on-chain event can be considered safe from a chain reorganization. +/// Used to determine when the on-chain event can be considered safe from a chain reorganization +/// or, for CSV-delayed outputs, spendable. #[derive(Clone, PartialEq, Eq)] struct OnchainEventEntry { txid: Txid, @@ -490,14 +491,14 @@ impl OnchainEventEntry { OnchainEvent::MaturingOutput { descriptor: SpendableOutputDescriptor::DelayedPaymentOutput(ref descriptor) } => { - // A CSV'd transaction is confirmable in block (input height) + CSV delay, which means - // it's broadcastable when we see the previous block. + // A CSV-delayed output is spendable in block (input height) + CSV delay, which + // means we can hand it upstream when we see the previous block. conf_threshold = cmp::max(conf_threshold, self.height + descriptor.to_self_delay as u32 - 1); }, OnchainEvent::FundingSpendConfirmation { on_local_output_csv: Some(csv), .. } | OnchainEvent::HTLCSpendConfirmation { on_to_local_output_csv: Some(csv), .. } => { - // A CSV'd transaction is confirmable in block (input height) + CSV delay, which means - // it's broadcastable when we see the previous block. + // A CSV-delayed output is spendable in block (input height) + CSV delay, which + // means we can act on the event when we see the previous block. conf_threshold = cmp::max(conf_threshold, self.height + csv as u32 - 1); }, _ => {}, @@ -516,7 +517,7 @@ impl OnchainEventEntry { type CommitmentTxCounterpartyOutputInfo = Option<(u32, Amount)>; /// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it -/// once they mature to enough confirmations (ANTI_REORG_DELAY) +/// once they reach anti-reorg finality or, for CSV-delayed outputs, CSV maturity. #[derive(Clone, PartialEq, Eq)] enum OnchainEvent { /// An outbound HTLC failing after a transaction is confirmed. Used @@ -533,8 +534,8 @@ enum OnchainEvent { /// transaction which appeared on chain. commitment_tx_output_idx: Option, }, - /// An output waiting on [`ANTI_REORG_DELAY`] confirmations before we hand the user the - /// [`SpendableOutputDescriptor`]. + /// An output waiting until it is anti-reorg final and, for CSV-delayed outputs, spendable + /// before we hand the user the [`SpendableOutputDescriptor`]. MaturingOutput { descriptor: SpendableOutputDescriptor }, /// A spend of the funding output, either a commitment transaction or a cooperative closing /// transaction. @@ -565,8 +566,8 @@ enum OnchainEvent { /// If the claim was made by either party with a preimage, this is filled in preimage: Option, /// If the claim was made by us on an inbound HTLC against a local commitment transaction, - /// we set this to the output CSV value which we will have to wait until to spend the - /// output (and generate a SpendableOutput event). + /// this records the CSV delay for the delayed output. While present, the event reaches + /// its threshold once the output is spendable. on_to_local_output_csv: Option, }, /// An alternative funding transaction (due to a splice/RBF) has confirmed but can no longer be @@ -1000,7 +1001,7 @@ impl Balance { } } -/// An HTLC which has been irrevocably resolved on-chain, and has reached ANTI_REORG_DELAY. +/// An HTLC whose on-chain outcome has reached the threshold for irrevocable resolution. #[derive(Clone, PartialEq, Eq)] struct IrrevocablyResolvedHTLC { commitment_tx_output_idx: Option, @@ -1286,8 +1287,9 @@ pub(crate) struct ChannelMonitorImpl { pub(super) is_processing_pending_events: bool, // Used to track on-chain events (i.e., transactions part of channels confirmed on chain) on - // which to take actions once they reach enough confirmations. Each entry includes the - // transaction's id and the height when the transaction was confirmed on chain. + // which to take actions once they reach anti-reorg finality or, for CSV-delayed outputs, + // CSV maturity. Each entry includes the transaction's id and the height when the transaction + // was confirmed on chain. onchain_events_awaiting_threshold_conf: Vec, // If we get serialized out and re-read, we need to make sure that the chain monitoring @@ -1324,8 +1326,8 @@ pub(crate) struct ChannelMonitorImpl { /// Added in 0.0.124. holder_pays_commitment_tx_fee: Option, - /// Set to `Some` of the confirmed transaction spending the funding input of the channel after - /// reaching `ANTI_REORG_DELAY` confirmations. + /// Set to `Some` once the confirmed transaction spending the funding input of the channel has + /// reached its event threshold. funding_spend_confirmed: Option, confirmed_commitment_tx_counterparty_output: CommitmentTxCounterpartyOutputInfo, @@ -2745,11 +2747,10 @@ impl ChannelMonitorImpl { source: BalanceSource::Htlc, }); } else if htlc_resolved && !htlc_output_spend_pending { - // Funding transaction spends should be fully confirmed by the time any - // HTLC transactions are resolved, unless we're talking about a holder - // commitment tx, whose resolution is delayed until the CSV timeout is - // reached, even though HTLCs may be resolved after only - // ANTI_REORG_DELAY confirmations. + // Funding transaction spends should have reached their event threshold by the time any + // HTLC transactions are irrevocably resolved, unless we're talking about a holder + // commitment tx, whose resolution is delayed until CSV maturity, even though HTLCs + // may be resolved after anti-reorg finality. debug_assert!(holder_commitment || self.funding_spend_confirmed.is_some()); } else if counterparty_revoked_commitment { let htlc_output_claim_pending = self.onchain_events_awaiting_threshold_conf.iter().any(|event| { @@ -2871,7 +2872,7 @@ impl ChannelMonitor { }); if let Some((txid, conf_thresh)) = funding_spend_pending { debug_assert!(us.funding_spend_confirmed.is_none(), - "We have a pending funding spend awaiting anti-reorg confirmation, we can't have confirmed it already!"); + "We have a pending funding spend awaiting its event threshold, it cannot have reached it already!"); confirmed_txid = Some(txid); pending_commitment_tx_conf_thresh = Some(conf_thresh); } @@ -3329,7 +3330,7 @@ macro_rules! fail_unbroadcast_htlcs { commitment_tx_output_idx: None, }, }; - log_trace!($logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of {} commitment transaction {}, waiting for confirmation (at height {})", + log_trace!($logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of {} commitment transaction {}, event reaches threshold at height {}", &htlc.payment_hash, $commitment_tx, $commitment_tx_type, $commitment_txid_confirmed, entry.confirmation_threshold()); $self.onchain_events_awaiting_threshold_conf.push(entry); @@ -4466,7 +4467,7 @@ impl ChannelMonitorImpl { // event for the same source. self.failed_back_htlc_ids.insert(SentHTLCId::from_source(source)); if let Some(confirmed_txid) = self.funding_spend_confirmed { - // Funding spend already confirmed past ANTI_REORG_DELAY: resolve immediately. + // Funding spend already reached its event threshold: resolve immediately. log_trace!( logger, "Failing HTLC from late counterparty commitment update immediately \ @@ -4502,7 +4503,7 @@ impl ChannelMonitorImpl { log_trace!( logger, "Failing HTLC from late counterparty commitment update, \ - waiting for confirmation (at height {})", + event reaches threshold at height {}", entry.confirmation_threshold() ); self.onchain_events_awaiting_threshold_conf.push(entry); @@ -6357,7 +6358,7 @@ impl ChannelMonitorImpl { commitment_tx_output_idx: Some(input.previous_output.vout), }, }; - log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", &payment_hash, entry.confirmation_threshold()); + log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, event reaches threshold at height {}", &payment_hash, entry.confirmation_threshold()); self.onchain_events_awaiting_threshold_conf.push(entry); } } From ac00ab6d245751df1b1cb5857a25c7ce782a8daa Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 16:02:00 +0200 Subject: [PATCH 03/12] lightning: refactor onchain tx handler tests Move repeated OnchainTxHandler setup into shared test helpers so the claim-replay coverage can focus on the behavior under test. --- lightning/src/chain/onchaintx.rs | 111 +++++++++++++++++-------------- 1 file changed, 62 insertions(+), 49 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 823b81936ce..e559f093922 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -1298,12 +1298,9 @@ mod tests { use super::OnchainTxHandler; - // Test that all claims with locktime equal to or less than the current height are broadcast - // immediately while claims with locktime greater than the current height are only broadcast - // once the locktime is reached. - #[test] - #[rustfmt::skip] - fn test_broadcast_height() { + fn new_test_tx_handler( + channel_type_features: ChannelTypeFeatures, nondust_htlcs: Vec, + ) -> OnchainTxHandler { let secp_ctx = Secp256k1::new(); let signer = InMemorySigner::new( SecretKey::from_slice(&[41; 32]).unwrap(), @@ -1340,9 +1337,6 @@ mod tests { )), }; let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::MAX }; - - // Use non-anchor channels so that HTLC-Timeouts are broadcast immediately instead of sent - // to the user for external funding. let chan_params = ChannelTransactionParameters { holder_pubkeys: signer.pubkeys(&secp_ctx), holder_selected_contest_delay: 66, @@ -1353,66 +1347,45 @@ mod tests { }), funding_outpoint: Some(funding_outpoint), splice_parent_funding_txid: None, - channel_type_features: ChannelTypeFeatures::only_static_remote_key(), + channel_type_features, channel_value_satoshis: 0, }; - - // Create an OnchainTxHandler for a commitment containing HTLCs with CLTV expiries of 0, 1, - // and 2 blocks. - let mut nondust_htlcs = Vec::new(); - for i in 0..3 { - let preimage = PaymentPreimage([i; 32]); - let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); - nondust_htlcs.push( - HTLCOutputInCommitment { - offered: true, - amount_msat: 10000, - cltv_expiry: i as u32, - payment_hash: hash, - transaction_output_index: Some(i as u32), - } - ); - } - let holder_commit = HolderCommitmentTransaction::dummy(1000000, funding_outpoint, nondust_htlcs); - let destination_script = ScriptBuf::new(); + let holder_commit = + HolderCommitmentTransaction::dummy(1000000, funding_outpoint, nondust_htlcs); let counterparty_node_id = PublicKey::from_slice(&[2; 33]).unwrap(); - let mut tx_handler = OnchainTxHandler::new( + OnchainTxHandler::new( ChannelId::from_bytes([0; 32]), counterparty_node_id, 1000000, [0; 32], - destination_script.clone(), + ScriptBuf::new(), signer, chan_params, holder_commit, secp_ctx, - ); - - // Create a broadcaster with current block height 1. - let broadcaster = TestBroadcaster::new(Network::Testnet); - { - let mut blocks = broadcaster.blocks.lock().unwrap(); - let genesis_hash = blocks[0].0.block_hash(); - blocks.push((create_dummy_block(genesis_hash, 0, Vec::new()), 1)); - } - - let fee_estimator = TestFeeEstimator::new(253); - let fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator); - let logger = TestLogger::new(); + ) + } - // Request claiming of each HTLC on the holder's commitment, with current block height 1. + fn build_offered_holder_htlc_requests( + tx_handler: &OnchainTxHandler, + ) -> Vec { let holder_commit = tx_handler.current_holder_commitment_tx(); let holder_commit_txid = holder_commit.trust().txid(); let mut requests = Vec::new(); - for (htlc, counterparty_sig) in holder_commit.nondust_htlcs().iter().zip(holder_commit.counterparty_htlc_sigs.iter()) { + for (htlc, counterparty_sig) in + holder_commit.nondust_htlcs().iter().zip(holder_commit.counterparty_htlc_sigs.iter()) + { requests.push(ClaimRequest::new( holder_commit_txid, htlc.transaction_output_index.unwrap(), - PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build(HTLCDescriptor { + PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build( + HTLCDescriptor { channel_derivation_parameters: ChannelDerivationParameters { value_satoshis: tx_handler.channel_value_satoshis, keys_id: tx_handler.channel_keys_id, - transaction_parameters: tx_handler.channel_transaction_parameters.clone(), + transaction_parameters: tx_handler + .channel_transaction_parameters + .clone(), }, commitment_txid: holder_commit_txid, per_commitment_number: holder_commit.commitment_number(), @@ -1422,11 +1395,51 @@ mod tests { preimage: None, counterparty_sig: *counterparty_sig, }, - 0 + 0, )), 0, )); } + requests + } + + // Test that all claims with locktime equal to or less than the current height are broadcast + // immediately while claims with locktime greater than the current height are only broadcast + // once the locktime is reached. + #[test] + fn test_broadcast_height() { + // Create an OnchainTxHandler for a commitment containing HTLCs with CLTV expiries of 0, 1, + // and 2 blocks. + let mut nondust_htlcs = Vec::new(); + for i in 0..3 { + let preimage = PaymentPreimage([i; 32]); + let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); + nondust_htlcs.push(HTLCOutputInCommitment { + offered: true, + amount_msat: 10000, + cltv_expiry: i as u32, + payment_hash: hash, + transaction_output_index: Some(i as u32), + }); + } + let destination_script = ScriptBuf::new(); + let mut tx_handler = + new_test_tx_handler(ChannelTypeFeatures::only_static_remote_key(), nondust_htlcs); + + // Create a broadcaster with current block height 1. + let broadcaster = TestBroadcaster::new(Network::Testnet); + { + let mut blocks = broadcaster.blocks.lock().unwrap(); + let genesis_hash = blocks[0].0.block_hash(); + blocks.push((create_dummy_block(genesis_hash, 0, Vec::new()), 1)); + } + + let fee_estimator = TestFeeEstimator::new(253); + let fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator); + let logger = TestLogger::new(); + + // Request claiming of each HTLC on the holder's commitment, with current block height 1. + let requests = build_offered_holder_htlc_requests(&tx_handler); tx_handler.update_claims_view_from_requests( requests, 1, From 685e989a95b546b388e5de8643f26c6449c163f3 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 4 May 2026 11:10:15 +0200 Subject: [PATCH 04/12] lightning: cover delayed preimage claim balance Add a monitor test for an inbound HTLC claimed by preimage from a holder commitment. Confirm that the claimable balance remains unchanged after the HTLC-success spend reaches anti-reorg finality but before the CSV-delayed output is spendable. --- lightning/src/ln/monitor_tests.rs | 67 +++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index f52f093917b..4fd40df3a45 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -115,6 +115,73 @@ fn test_spendable_output<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, spendable_t } else { panic!(); } } +#[test] +fn preimage_claim_balance_unchanged_between_anti_reorg_and_csv() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let (_, _, chan_id, funding_tx) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); + // Route an inbound HTLC to node 0 so its preimage claim spends an HTLC output from node 0's + // holder commitment and creates a CSV-delayed output. + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 12_000_000); + nodes[1].node.send_payment_with_route(route, payment_hash, + RecipientOnionFields::secret_only(payment_secret, 12_000_000), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[1], 1); + let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); + expect_and_process_pending_htlcs(&nodes[0], false); + expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 12_000_000); + + // Confirm node 0's holder commitment before claiming the HTLC so the preimage claim has a + // delayed output that remains tracked as an HTLC balance until it becomes spendable. + let message = "Channel force-closed".to_owned(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); + check_added_monitors(&nodes[0], 1); + check_closed_broadcast(&nodes[0], 1, true); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1000000); + let commitment_txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); + assert_eq!(commitment_txn.len(), 1); + check_spends!(commitment_txn[0], funding_tx); + mine_transaction(&nodes[0], &commitment_txn[0]); + nodes[0].tx_broadcaster.clear(); + + // Claiming the HTLC with the preimage broadcasts the HTLC-Success transaction. Once it + // confirms, the resulting delayed output should be reported as an HTLC balance awaiting + // confirmations. + nodes[0].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[0], 1); + expect_payment_claimed!(nodes[0], payment_hash, 12_000_000); + let htlc_claim_txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); + assert_eq!(htlc_claim_txn.len(), 1); + check_spends!(htlc_claim_txn[0], commitment_txn[0]); + mine_transaction(&nodes[0], &htlc_claim_txn[0]); + + let htlc_claim_balances = sorted_vec(nodes[0].chain_monitor.chain_monitor + .get_monitor(chan_id).unwrap().get_claimable_balances()); + assert!(htlc_claim_balances.iter().any(|balance| matches!(balance, + Balance::ClaimableAwaitingConfirmations { + amount_satoshis: 12_000, + source: BalanceSource::Htlc, + .. + } + ))); + + // Advance only to anti-reorg finality for the HTLC-Success transaction. The CSV-delayed + // output is not spendable yet, so the claimable HTLC balance should remain unchanged. + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + assert_eq!(htlc_claim_balances, sorted_vec(nodes[0].chain_monitor.chain_monitor + .get_monitor(chan_id).unwrap().get_claimable_balances())); +} + #[test] fn revoked_output_htlc_resolution_timing() { // Tests that HTLCs which were present in a broadcasted remote revoked commitment transaction From e83d6fe9bc3a9dc93fa4a15ad07816e6810b4750 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 4 May 2026 09:45:31 +0200 Subject: [PATCH 05/12] lightning: resolve HTLC spends at anti-reorg finality Treat HTLCSpendConfirmation entries as irrevocably resolved once the commitment HTLC output spend reaches anti-reorg finality. Do not wait for CSV maturity of any delayed output created by that spend. Delayed outputs remain tracked separately as MaturingOutput entries, keeping claimable balances alive until they are CSV-mature and can be surfaced as SpendableOutputs. --- lightning/src/chain/channelmonitor.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index e311ebcb059..877d601373b 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -495,8 +495,7 @@ impl OnchainEventEntry { // means we can hand it upstream when we see the previous block. conf_threshold = cmp::max(conf_threshold, self.height + descriptor.to_self_delay as u32 - 1); }, - OnchainEvent::FundingSpendConfirmation { on_local_output_csv: Some(csv), .. } | - OnchainEvent::HTLCSpendConfirmation { on_to_local_output_csv: Some(csv), .. } => { + OnchainEvent::FundingSpendConfirmation { on_local_output_csv: Some(csv), .. } => { // A CSV-delayed output is spendable in block (input height) + CSV delay, which // means we can act on the event when we see the previous block. conf_threshold = cmp::max(conf_threshold, self.height + csv as u32 - 1); @@ -566,8 +565,9 @@ enum OnchainEvent { /// If the claim was made by either party with a preimage, this is filled in preimage: Option, /// If the claim was made by us on an inbound HTLC against a local commitment transaction, - /// this records the CSV delay for the delayed output. While present, the event reaches - /// its threshold once the output is spendable. + /// this records the CSV delay for the delayed output. The CSV-mature output remains + /// tracked via the corresponding [`OnchainEvent::MaturingOutput`]; the HTLC spend itself + /// reaches anti-reorg finality. on_to_local_output_csv: Option, }, /// An alternative funding transaction (due to a splice/RBF) has confirmed but can no longer be @@ -1331,9 +1331,10 @@ pub(crate) struct ChannelMonitorImpl { funding_spend_confirmed: Option, confirmed_commitment_tx_counterparty_output: CommitmentTxCounterpartyOutputInfo, - /// The set of HTLCs which have been either claimed or failed on chain and have reached - /// the requisite confirmations on the claim/fail transaction (either ANTI_REORG_DELAY or the - /// spending CSV for revocable outputs). + /// The set of HTLCs whose on-chain claim or fail outcome is irrevocably resolved because the + /// commitment transaction HTLC output spend has reached anti-reorg finality. Any resulting + /// output that is still waiting on CSV maturity is tracked separately as an + /// [`OnchainEvent::MaturingOutput`]. htlcs_resolved_on_chain: Vec, /// When a payment is resolved through an on-chain transaction, we tell the `ChannelManager` @@ -6252,10 +6253,9 @@ impl ChannelMonitorImpl { commitment_tx_output_idx: input.previous_output.vout, preimage: if accepted_preimage_claim || offered_preimage_claim { Some(payment_preimage) } else { None }, - // If this is a payment to us (ie !outbound_htlc), wait for - // the CSV delay before dropping the HTLC from claimable - // balance if the claim was an HTLC-Success transaction (ie - // accepted_preimage_claim). + // If this is a payment to us (ie !outbound_htlc), keep a + // record of the CSV delay. The delayed output is tracked + // separately as a MaturingOutput until it is spendable. on_to_local_output_csv: if accepted_preimage_claim && !outbound_htlc { Some(self.on_holder_tx_csv) } else { None }, }, From b34121be960a31a01f45c21b77fbdf73e3dcb100 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 6 May 2026 17:05:12 +0200 Subject: [PATCH 06/12] f: assert delayed output for HTLC spends Check that any HTLCSpendConfirmation carrying a local-output CSV has a matching delayed MaturingOutput. Scan spendable outputs before recording HTLC spend confirmations so the invariant is present when the assertion runs. --- lightning/src/chain/channelmonitor.rs | 32 ++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 877d601373b..07f88eca4f0 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -5680,9 +5680,9 @@ impl ChannelMonitorImpl { break; } } - self.is_resolving_htlc_output(&tx, height, &block_hash, logger); - self.check_tx_and_push_spendable_outputs(&tx, height, &block_hash, logger); + + self.is_resolving_htlc_output(&tx, height, &block_hash, logger); } } @@ -6161,6 +6161,7 @@ impl ChannelMonitorImpl { &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithContext, ) { let funding_spent = get_confirmed_funding_scope!(self); + let txid = tx.compute_txid(); 'outer_loop: for input in &tx.input { let mut payment_data = None; @@ -6247,8 +6248,17 @@ impl ChannelMonitorImpl { if payment_data.is_none() { log_claim!($tx_info, $holder_tx, htlc_output, false); let outbound_htlc = $holder_tx == htlc_output.offered; + let on_to_local_output_csv = if accepted_preimage_claim && !outbound_htlc { + Some(self.on_holder_tx_csv) } else { None }; + #[cfg(debug_assertions)] + if let Some(csv) = on_to_local_output_csv { + debug_assert!( + self.has_delayed_maturing_output_for_tx(txid, csv), + "CSV-delayed HTLC spend confirmation should have a matching MaturingOutput" + ); + } self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry { - txid: tx.compute_txid(), height, block_hash: Some(*block_hash), transaction: Some(tx.clone()), + txid, height, block_hash: Some(*block_hash), transaction: Some(tx.clone()), event: OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx: input.previous_output.vout, preimage: if accepted_preimage_claim || offered_preimage_claim { @@ -6256,8 +6266,7 @@ impl ChannelMonitorImpl { // If this is a payment to us (ie !outbound_htlc), keep a // record of the CSV delay. The delayed output is tracked // separately as a MaturingOutput until it is spendable. - on_to_local_output_csv: if accepted_preimage_claim && !outbound_htlc { - Some(self.on_holder_tx_csv) } else { None }, + on_to_local_output_csv, }, }); continue 'outer_loop; @@ -6410,6 +6419,19 @@ impl ChannelMonitorImpl { spendable_outputs } + #[cfg(debug_assertions)] + fn has_delayed_maturing_output_for_tx(&self, txid: Txid, csv: u16) -> bool { + self.onchain_events_awaiting_threshold_conf.iter().any(|entry| { + entry.txid == txid + && match &entry.event { + OnchainEvent::MaturingOutput { + descriptor: SpendableOutputDescriptor::DelayedPaymentOutput(descriptor), + } => descriptor.to_self_delay == csv, + _ => false, + } + }) + } + /// Checks if the confirmed transaction is paying funds back to some address we can assume to /// own. #[rustfmt::skip] From c002e2a207ec46fc835d0af3f1547f0d6885c0bc Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 14:21:04 +0200 Subject: [PATCH 07/12] lightning: dedupe delayed claims by outpoint coverage A replayed holder HTLC claim may arrive as a single-outpoint request after earlier requests were merged into a delayed package. Check whether an existing delayed package already covers the new request instead of requiring exact outpoint-set equality. Add focused OnchainTxHandler coverage and a ChannelMonitor regression through claim_funds for both current anchor variants. --- lightning/src/chain/onchaintx.rs | 95 ++++++++++++++++++++++++- lightning/src/ln/monitor_tests.rs | 113 ++++++++++++++++++++++++++++++ 2 files changed, 205 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index e559f093922..2cdad08de1e 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -807,9 +807,10 @@ impl OnchainTxHandler { outpoint.txid, outpoint.vout); false } else { - let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten() - .find(|locked_package| locked_package.outpoints().len() == 1 && locked_package.contains_outpoint(outpoint)); - if let Some(package) = timelocked_equivalent_package { + let timelocked_covering_package = self.locktimed_packages.values() + .flat_map(|packages| packages.iter()) + .find(|locked_package| locked_package.contains_outpoint(outpoint)); + if let Some(package) = timelocked_covering_package { log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", outpoint.txid, outpoint.vout, package.package_locktime(cur_height)); false @@ -1480,4 +1481,92 @@ mod tests { assert_eq!(txs_broadcasted.len(), 1); assert_eq!(txs_broadcasted[0].lock_time.to_consensus_u32(), 2); } + + #[test] + fn test_duplicate_pending_claim_request_after_force_close_replay() { + let claim_height = 21; + let locktime = 42; + let mut nondust_htlcs = Vec::new(); + for i in 0..2 { + let preimage = PaymentPreimage([i + 1; 32]); + let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); + nondust_htlcs.push(HTLCOutputInCommitment { + offered: true, + amount_msat: 10000, + cltv_expiry: locktime, + payment_hash: hash, + transaction_output_index: Some(i as u32), + }); + } + + let mut tx_handler = new_test_tx_handler( + ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), + nondust_htlcs, + ); + let requests = build_offered_holder_htlc_requests(&tx_handler); + let destination_script = ScriptBuf::new(); + let broadcaster = TestBroadcaster::new(Network::Testnet); + let fee_estimator = TestFeeEstimator::new(253); + let fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator); + let logger = TestLogger::new(); + + // Simulate the force-close path registering the two holder HTLC claims as + // a single delayed package. + tx_handler.update_claims_view_from_requests( + requests.clone(), + claim_height, + claim_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + assert_eq!( + tx_handler.locktimed_packages.get(&locktime).map(|packages| packages.len()), + Some(1), + ); + + // Replaying the same per-HTLC claim requests must match by outpoint + // coverage, otherwise each single-outpoint request would be added again. + tx_handler.update_claims_view_from_requests( + requests, + claim_height, + claim_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + assert_eq!( + tx_handler.locktimed_packages.get(&locktime).map(|packages| packages.len()), + Some(1), + ); + + // At locktime, the delayed package should still yield one bump event + // covering both HTLCs. + tx_handler.update_claims_view_from_requests( + Vec::new(), + locktime, + locktime, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + + let pending_events = tx_handler.get_and_clear_pending_claim_events(); + assert_eq!(pending_events.len(), 1); + assert_eq!(tx_handler.pending_claim_requests.len(), 1); + assert_eq!(tx_handler.claimable_outpoints.len(), 2); + match &pending_events[0].1 { + super::ClaimEvent::BumpHTLC { htlcs, tx_lock_time, .. } => { + assert_eq!(htlcs.len(), 2); + assert_eq!(tx_lock_time.to_consensus_u32(), locktime); + }, + _ => panic!("expected a single HTLC bump event"), + } + } } diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 4fd40df3a45..436bb01c907 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -2455,6 +2455,119 @@ fn test_restored_packages_retry() { do_test_restored_packages_retry(true); } +fn do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(p2a_anchor: bool) { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut anchors_config = test_default_channel_config(); + anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + anchors_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let coinbase_tx = provide_anchor_reserves(&nodes); + let (_, _, chan_id, funding_tx) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 50_000_000); + + // Seed two unresolved outbound HTLCs that will be aggregated into one + // delayed holder-commitment package after force close. + route_payment(&nodes[0], &[&nodes[1]], 10_000_000); + route_payment(&nodes[0], &[&nodes[1]], 11_000_000); + + // Add a third incoming HTLC which will later be claimed by preimage after + // the commitment transaction confirms, reproducing the replay path. + let (claim_route, claim_hash, claim_preimage, claim_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 12_000_000); + nodes[1] + .node + .send_payment_with_route( + claim_route, + claim_hash, + RecipientOnionFields::secret_only(claim_secret, 12_000_000), + PaymentId(claim_hash.0), + ) + .unwrap(); + check_added_monitors(&nodes[1], 1); + let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + nodes[0] + .node + .handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); + expect_and_process_pending_htlcs(&nodes[0], false); + expect_payment_claimable!(nodes[0], claim_hash, claim_secret, 12_000_000); + + // Force-close node 0 so its holder commitment hits chain and its HTLC + // claims are fed into OnchainTxHandler as delayed requests. + let message = "Channel force-closed".to_owned(); + nodes[0] + .node + .force_close_broadcasting_latest_txn( + &chan_id, + &nodes[1].node.get_our_node_id(), + message.clone(), + ) + .unwrap(); + check_added_monitors(&nodes[0], 1); + check_closed_broadcast(&nodes[0], 1, true); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1_000_000); + handle_bump_close_event(&nodes[0]); + + let (commitment_tx, anchor_tx) = { + let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); + assert_eq!(txn.len(), if p2a_anchor { 2 } else { 1 }); + let anchor_tx = p2a_anchor.then(|| txn.pop().unwrap()); + let commitment_tx = txn.pop().unwrap(); + check_spends!(commitment_tx, funding_tx); + if p2a_anchor { + check_spends!(anchor_tx.as_ref().unwrap(), commitment_tx, coinbase_tx); + } + (commitment_tx, anchor_tx) + }; + + let _ = mine_transaction(&nodes[0], &commitment_tx); + if p2a_anchor { + let _ = mine_transaction(&nodes[0], anchor_tx.as_ref().unwrap()); + } + + // Claim the incoming HTLC after the commitment is confirmed. This + // regenerates a single-outpoint claim request alongside the existing + // delayed package covering the two earlier HTLCs. + nodes[0].node.claim_funds(claim_preimage); + check_added_monitors(&nodes[0], 1); + expect_payment_claimed!(nodes[0], claim_hash, 12_000_000); + + // Once all holder HTLCs reach their timelock, we should see the original two-HTLC + // delayed package plus the replayed single-HTLC claim, not duplicates of + // the delayed package's outpoints. + connect_blocks(&nodes[0], TEST_FINAL_CLTV + 1); + + let mut htlc_event_sizes = nodes[0] + .chain_monitor + .chain_monitor + .get_and_clear_pending_events() + .into_iter() + .filter_map(|event| { + if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { + htlc_descriptors, .. + }) = event + { + Some(htlc_descriptors.len()) + } else { + None + } + }) + .collect::>(); + htlc_event_sizes.sort_unstable(); + assert_eq!(htlc_event_sizes, vec![1, 2]); +} + +#[test] +fn test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay() { + do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(false); + do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(true); +} + fn do_test_monitor_rebroadcast_pending_claims(keyed_anchors: bool, p2a_anchor: bool) { // Test that we will retry broadcasting pending claims for a force-closed channel on every // `ChainMonitor::rebroadcast_pending_claims` call. From c7d923dd2440da5cf3525bd720f0a97032f5497e Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 14:23:24 +0200 Subject: [PATCH 08/12] lightning: ignore claims for pending spent outpoints When a transaction spends one outpoint from a delayed package, the split outpoint is tracked as a ContentiousOutpoint until the spend reaches anti-reorg finality. Reject replayed claim requests for those pending-spent outpoints so they are not added back before the spend reaches anti-reorg finality or reorgs out. Add an OnchainTxHandler regression that replays a holder claim during that pending-spent window and verifies reorg resurrection still works. --- lightning/src/chain/onchaintx.rs | 140 ++++++++++++++++++++++++++++++- 1 file changed, 137 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 2cdad08de1e..1a5ec61b0c7 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -576,6 +576,16 @@ impl OnchainTxHandler { self.pending_claim_requests.len() != 0 } + fn is_outpoint_spend_waiting_threshold_conf(&self, outpoint: &BitcoinOutPoint) -> bool { + self.onchain_events_awaiting_threshold_conf.iter().any(|entry| { + if let OnchainEvent::ContentiousOutpoint { ref package } = entry.event { + package.contains_outpoint(outpoint) + } else { + false + } + }) + } + /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty /// onchain) lays on the assumption of claim transactions getting confirmed before timelock /// expiration (CSV or CLTV following cases). In case of high-fee spikes, claim tx may get stuck @@ -802,7 +812,15 @@ impl OnchainTxHandler { // First drop any duplicate claims. requests.retain(|req| { let outpoint = req.outpoint(); - if self.claimable_outpoints.get(outpoint).is_some() { + if self.is_outpoint_spend_waiting_threshold_conf(outpoint) { + // This is a package-layer guard. ChannelMonitor filters regenerated + // HTLC claims using HTLC resolution state, while this keeps outpoints + // split from an existing package from being re-added during the reorg + // window. + log_info!(logger, "Ignoring claim for outpoint {}:{}, it is already spent by a transaction awaiting anti-reorg finality", + outpoint.txid, outpoint.vout); + false + } else if self.claimable_outpoints.get(outpoint).is_some() { log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", outpoint.txid, outpoint.vout); false @@ -1276,11 +1294,14 @@ impl OnchainTxHandler { #[cfg(test)] mod tests { - use bitcoin::hash_types::Txid; + use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; + use bitcoin::locktime::absolute::LockTime; + use bitcoin::transaction::{OutPoint as BitcoinOutPoint, Version}; use bitcoin::Network; - use bitcoin::{key::Secp256k1, secp256k1::PublicKey, secp256k1::SecretKey, ScriptBuf}; + use bitcoin::{key::Secp256k1, secp256k1::PublicKey, secp256k1::SecretKey}; + use bitcoin::{Amount, ScriptBuf, Transaction, TxIn, TxOut}; use types::features::ChannelTypeFeatures; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator}; @@ -1404,6 +1425,18 @@ mod tests { requests } + fn locked_outpoints( + tx_handler: &OnchainTxHandler, locktime: u32, + ) -> Vec { + tx_handler + .locktimed_packages + .get(&locktime) + .into_iter() + .flat_map(|packages| packages.iter()) + .flat_map(|package| package.outpoints().into_iter().map(|outpoint| *outpoint)) + .collect() + } + // Test that all claims with locktime equal to or less than the current height are broadcast // immediately while claims with locktime greater than the current height are only broadcast // once the locktime is reached. @@ -1569,4 +1602,105 @@ mod tests { _ => panic!("expected a single HTLC bump event"), } } + + #[test] + fn test_replayed_claim_ignored_for_pending_spent_outpoint() { + let claim_height = 21; + let spend_height = 22; + let locktime = 42; + let mut nondust_htlcs = Vec::new(); + for i in 0..2 { + let preimage = PaymentPreimage([i + 1; 32]); + let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); + nondust_htlcs.push(HTLCOutputInCommitment { + offered: true, + amount_msat: 10000, + cltv_expiry: locktime, + payment_hash: hash, + transaction_output_index: Some(i as u32), + }); + } + + let mut tx_handler = new_test_tx_handler( + ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), + nondust_htlcs, + ); + let requests = build_offered_holder_htlc_requests(&tx_handler); + let spent_outpoint = *requests[0].outpoint(); + let still_delayed_outpoint = *requests[1].outpoint(); + let destination_script = ScriptBuf::new(); + let broadcaster = TestBroadcaster::new(Network::Testnet); + let fee_estimator = TestFeeEstimator::new(253); + let fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator); + let logger = TestLogger::new(); + + // Register both holder HTLC claims as one delayed package before any + // individual outpoint spends are observed. + tx_handler.update_claims_view_from_requests( + requests.clone(), + claim_height, + claim_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + assert_eq!(locked_outpoints(&tx_handler, locktime).len(), 2); + + // Spend one outpoint before the package reaches its timelock. The handler + // should split it into a ContentiousOutpoint until the spend reaches + // anti-reorg finality. + let spend_tx = Transaction { + version: Version::TWO, + lock_time: LockTime::ZERO, + input: vec![TxIn { previous_output: spent_outpoint, ..Default::default() }], + output: vec![TxOut { value: Amount::from_sat(1000), script_pubkey: ScriptBuf::new() }], + }; + tx_handler.update_claims_view_from_matched_txn( + &[&spend_tx], + spend_height, + BlockHash::all_zeros(), + spend_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + let locked = locked_outpoints(&tx_handler, locktime); + assert_eq!(locked, vec![still_delayed_outpoint]); + + // Replaying both original claim requests during that window must not + // re-add the already-spent outpoint to the delayed package. + tx_handler.update_claims_view_from_requests( + requests, + spend_height, + spend_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + let locked = locked_outpoints(&tx_handler, locktime); + assert_eq!(locked, vec![still_delayed_outpoint]); + assert!(tx_handler.pending_claim_requests.is_empty()); + assert!(tx_handler.claimable_outpoints.is_empty()); + + // If the spend reorgs out, the contentious outpoint is resurrected into + // the delayed package. + tx_handler.blocks_disconnected( + spend_height - 1, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + let locked = locked_outpoints(&tx_handler, locktime); + assert_eq!(locked.len(), 2); + assert!(locked.contains(&spent_outpoint)); + assert!(locked.contains(&still_delayed_outpoint)); + } } From 41af7d9ea64c1f0791d8cc4a01d5efd5badf82f6 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 6 May 2026 17:57:23 +0200 Subject: [PATCH 09/12] f: fold timelocked outpoint claim check Classify duplicate outpoint state in one helper. Preserve existing filter ordering and timelock logging. --- lightning/src/chain/onchaintx.rs | 73 +++++++++++++++++++++----------- 1 file changed, 48 insertions(+), 25 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 1a5ec61b0c7..e4d23d77e88 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -91,6 +91,12 @@ enum OnchainEvent { ContentiousOutpoint { package: PackageTemplate }, } +enum OutpointClaimState { + WaitingThresholdConf, + ClaimingRequestRegistered, + WaitingTimelock(u32), +} + impl Writeable for OnchainEventEntry { fn write(&self, writer: &mut W) -> Result<(), io::Error> { write_tlv_fields!(writer, { @@ -576,14 +582,30 @@ impl OnchainTxHandler { self.pending_claim_requests.len() != 0 } - fn is_outpoint_spend_waiting_threshold_conf(&self, outpoint: &BitcoinOutPoint) -> bool { - self.onchain_events_awaiting_threshold_conf.iter().any(|entry| { + fn outpoint_claim_state( + &self, outpoint: &BitcoinOutPoint, cur_height: u32, + ) -> Option { + if self.onchain_events_awaiting_threshold_conf.iter().any(|entry| { if let OnchainEvent::ContentiousOutpoint { ref package } = entry.event { package.contains_outpoint(outpoint) } else { false } - }) + }) { + return Some(OutpointClaimState::WaitingThresholdConf); + } + + if self.claimable_outpoints.get(outpoint).is_some() { + return Some(OutpointClaimState::ClaimingRequestRegistered); + } + + self.locktimed_packages + .values() + .flat_map(|packages| packages.iter()) + .find(|locked_package| locked_package.contains_outpoint(outpoint)) + .map(|package| { + OutpointClaimState::WaitingTimelock(package.package_locktime(cur_height)) + }) } /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty @@ -812,29 +834,30 @@ impl OnchainTxHandler { // First drop any duplicate claims. requests.retain(|req| { let outpoint = req.outpoint(); - if self.is_outpoint_spend_waiting_threshold_conf(outpoint) { - // This is a package-layer guard. ChannelMonitor filters regenerated - // HTLC claims using HTLC resolution state, while this keeps outpoints - // split from an existing package from being re-added during the reorg - // window. - log_info!(logger, "Ignoring claim for outpoint {}:{}, it is already spent by a transaction awaiting anti-reorg finality", - outpoint.txid, outpoint.vout); - false - } else if self.claimable_outpoints.get(outpoint).is_some() { - log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", - outpoint.txid, outpoint.vout); - false - } else { - let timelocked_covering_package = self.locktimed_packages.values() - .flat_map(|packages| packages.iter()) - .find(|locked_package| locked_package.contains_outpoint(outpoint)); - if let Some(package) = timelocked_covering_package { - log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", - outpoint.txid, outpoint.vout, package.package_locktime(cur_height)); - false - } else { - true + if let Some(claim_state) = self.outpoint_claim_state(outpoint, cur_height) { + match claim_state { + OutpointClaimState::WaitingThresholdConf => { + // This is a package-layer guard. ChannelMonitor filters regenerated + // HTLC claims using HTLC resolution state, while this keeps outpoints + // split from an existing package from being re-added during the reorg + // window. + log_info!(logger, "Ignoring claim for outpoint {}:{}, it is already spent by a transaction awaiting anti-reorg finality", + outpoint.txid, outpoint.vout); + false + }, + OutpointClaimState::ClaimingRequestRegistered => { + log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", + outpoint.txid, outpoint.vout); + false + }, + OutpointClaimState::WaitingTimelock(locktime) => { + log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", + outpoint.txid, outpoint.vout, locktime); + false + }, } + } else { + true } }); let mut requests = requests.into_iter() From 8aed7bbf58442883cbb0cd0472a19c751f42ffd7 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 14:29:33 +0200 Subject: [PATCH 10/12] lightning: skip resolved HTLC claim replays Filter regenerated HTLC claim requests once ChannelMonitor has persisted anti-reorg finality for the commitment HTLC output spend. This keeps replayed preimage updates from recreating claims after OnchainTxHandler has cleaned up its active retry state, relying on the monitor's persisted HTLC resolution state. --- lightning/src/chain/channelmonitor.rs | 25 +++++++- lightning/src/ln/monitor_tests.rs | 84 ++++++++++++++++++++++++++- 2 files changed, 106 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 07f88eca4f0..8bd78815c85 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -4938,7 +4938,10 @@ impl ChannelMonitorImpl { .iter() .filter_map(|(htlc, _)| { if let Some(transaction_output_index) = htlc.transaction_output_index { - if htlc.offered && htlc.payment_hash == matching_payment_hash { + if htlc.offered + && htlc.payment_hash == matching_payment_hash + && !self.is_htlc_output_resolved_on_chain(htlc) + { let htlc_data = PackageSolvingData::CounterpartyOfferedHTLCOutput( CounterpartyOfferedHTLCOutput::build( per_commitment_point, @@ -4964,6 +4967,20 @@ impl ChannelMonitorImpl { .collect() } + fn is_htlc_output_resolved_on_chain(&self, htlc: &HTLCOutputInCommitment) -> bool { + if let Some(transaction_output_index) = htlc.transaction_output_index { + // Only suppress claims once the commitment HTLC output spend has + // reached anti-reorg finality. Any output created by that spend may + // still be CSV-delayed, but the original HTLC outpoint should not be + // re-claimed. + self.htlcs_resolved_on_chain.iter().any(|resolved_htlc| { + resolved_htlc.commitment_tx_output_idx == Some(transaction_output_index) + }) + } else { + false + } + } + /// Returns the HTLC claim requests and the counterparty output info. fn get_counterparty_output_claim_info( &self, funding_spent: &FundingScope, commitment_number: u64, commitment_txid: Txid, @@ -5011,6 +5028,9 @@ impl ChannelMonitorImpl { // per_commitment_data is corrupt or our commitment signing key leaked! return (claimable_outpoints, to_counterparty_output_info); } + if self.is_htlc_output_resolved_on_chain(htlc) { + continue; + } let preimage = if htlc.offered { if let Some((p, _)) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) @@ -5112,6 +5132,9 @@ impl ChannelMonitorImpl { let mut htlcs = Vec::with_capacity(holder_tx.nondust_htlcs().len()); debug_assert_eq!(holder_tx.nondust_htlcs().len(), holder_tx.counterparty_htlc_sigs.len()); for (htlc, counterparty_sig) in holder_tx.nondust_htlcs().iter().zip(holder_tx.counterparty_htlc_sigs.iter()) { + if self.is_htlc_output_resolved_on_chain(htlc) { + continue; + } assert!(htlc.transaction_output_index.is_some(), "Expected transaction output index for non-dust HTLC"); let preimage = if htlc.offered { diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 436bb01c907..111d1fbfd81 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -2458,12 +2458,15 @@ fn test_restored_packages_retry() { fn do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(p2a_anchor: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_deserialized; let mut anchors_config = test_default_channel_config(); anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; anchors_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config)]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let coinbase_tx = provide_anchor_reserves(&nodes); let (_, _, chan_id, funding_tx) = @@ -2542,11 +2545,14 @@ fn do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(p2a_anc // the delayed package's outpoints. connect_blocks(&nodes[0], TEST_FINAL_CLTV + 1); - let mut htlc_event_sizes = nodes[0] + let events = nodes[0] .chain_monitor .chain_monitor .get_and_clear_pending_events() .into_iter() + .collect::>(); + let mut htlc_event_sizes = events + .iter() .filter_map(|event| { if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { htlc_descriptors, .. @@ -2560,6 +2566,80 @@ fn do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(p2a_anc .collect::>(); htlc_event_sizes.sort_unstable(); assert_eq!(htlc_event_sizes, vec![1, 2]); + + // Drive only the replayed single-HTLC event on-chain so we can replay the + // preimage once the spend is anti-reorg final, then again after reload. + for event in events { + if let Event::BumpTransaction(event) = event { + let is_single_htlc = if let BumpTransactionEvent::HTLCResolution { + ref htlc_descriptors, + .. + } = event + { + htlc_descriptors.len() == 1 + } else { + false + }; + if is_single_htlc { + nodes[0].bump_tx_handler.handle_event(&event); + break; + } + } + } + let mut htlc_txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); + assert_eq!(htlc_txn.len(), 1); + let htlc_tx = htlc_txn.pop().unwrap(); + mine_transaction(&nodes[0], &htlc_tx); + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + + // The spend has passed anti-reorg finality, but its CSV-delayed output is + // not yet spendable. Replaying the preimage in this window must not create + // a new conflicting claim for the already-spent commitment HTLC output. + get_monitor!(nodes[0], chan_id).provide_payment_preimage_unsafe_legacy( + &claim_hash, + &claim_preimage, + &node_cfgs[0].tx_broadcaster, + &LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), + &nodes[0].logger, + ); + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + let balances = nodes[0] + .chain_monitor + .chain_monitor + .get_monitor(chan_id) + .unwrap() + .get_claimable_balances(); + assert!(balances.iter().any(|balance| matches!( + balance, + Balance::ClaimableAwaitingConfirmations { + amount_satoshis: 12_000, + source: BalanceSource::Htlc, + .. + } + ))); + + connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - ANTI_REORG_DELAY); + let _ = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); + + // Reload before replaying the preimage so the regression covers persisted + // resolution state, not only in-memory filtering. + let serialized_channel_manager = nodes[0].node.encode(); + let serialized_monitor = get_monitor!(nodes[0], chan_id).encode(); + reload_node!( + nodes[0], &serialized_channel_manager, &[&serialized_monitor], persister, + new_chain_monitor, node_deserialized + ); + + // Replaying the preimage update must not regenerate a claim for the HTLC + // whose commitment output has anti-reorg persisted resolution state. + get_monitor!(nodes[0], chan_id).provide_payment_preimage_unsafe_legacy( + &claim_hash, &claim_preimage, &node_cfgs[0].tx_broadcaster, + &LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), &nodes[0].logger, + ); + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + expect_payment_claimed!(nodes[0], claim_hash, 12_000_000); + check_added_monitors(&nodes[0], 1); } #[test] From 50b52e84557aa12758ea7948d496a6405b8fb5b1 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 6 May 2026 17:20:10 +0200 Subject: [PATCH 11/12] f: log resolved HTLC preimage losses Log when a replayed preimage claim is skipped because the HTLC output reached anti-reorg finality without that preimage. --- lightning/src/chain/channelmonitor.rs | 78 ++++++++++++++++++++++----- 1 file changed, 64 insertions(+), 14 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 8bd78815c85..8597d236111 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -3795,7 +3795,7 @@ impl ChannelMonitorImpl { // First check if a counterparty commitment transaction has been broadcasted: macro_rules! claim_htlcs { ($commitment_number: expr, $txid: expr, $htlcs: expr) => { - let htlc_claim_reqs = self.get_counterparty_output_claims_for_preimage(*payment_preimage, funding_spent, $commitment_number, $txid, $htlcs, confirmed_spend_height); + let htlc_claim_reqs = self.get_counterparty_output_claims_for_preimage(*payment_preimage, funding_spent, $commitment_number, $txid, $htlcs, confirmed_spend_height, logger); let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests( htlc_claim_reqs, self.best_block.height, self.best_block.height, broadcaster, @@ -3844,6 +3844,9 @@ impl ChannelMonitorImpl { None }; if let Some(holder_commitment_tx) = holder_commitment_tx { + self.log_holder_preimage_claim_after_htlc_resolved_on_chain( + logger, holder_commitment_tx, *payment_preimage, + ); // Assume that the broadcasted commitment transaction confirmed in the current best // block. Even if not, its a reasonable metric for the bump criteria on the HTLC // transactions. @@ -4918,11 +4921,11 @@ impl ChannelMonitorImpl { } } - fn get_counterparty_output_claims_for_preimage( + fn get_counterparty_output_claims_for_preimage( &self, preimage: PaymentPreimage, funding_spent: &FundingScope, commitment_number: u64, commitment_txid: Txid, per_commitment_option: Option<&Vec<(HTLCOutputInCommitment, Option>)>>, - confirmation_height: Option, + confirmation_height: Option, logger: &L, ) -> Vec { let per_commitment_claimable_data = match per_commitment_option { Some(outputs) => outputs, @@ -4938,10 +4941,17 @@ impl ChannelMonitorImpl { .iter() .filter_map(|(htlc, _)| { if let Some(transaction_output_index) = htlc.transaction_output_index { - if htlc.offered - && htlc.payment_hash == matching_payment_hash - && !self.is_htlc_output_resolved_on_chain(htlc) - { + if htlc.offered && htlc.payment_hash == matching_payment_hash { + if let Some(resolved_htlc) = self.htlc_output_resolution_on_chain(htlc) { + self.log_preimage_claim_after_htlc_resolved_on_chain( + logger, + commitment_txid, + htlc, + preimage, + resolved_htlc, + ); + return None; + } let htlc_data = PackageSolvingData::CounterpartyOfferedHTLCOutput( CounterpartyOfferedHTLCOutput::build( per_commitment_point, @@ -4967,17 +4977,57 @@ impl ChannelMonitorImpl { .collect() } - fn is_htlc_output_resolved_on_chain(&self, htlc: &HTLCOutputInCommitment) -> bool { - if let Some(transaction_output_index) = htlc.transaction_output_index { + fn htlc_output_resolution_on_chain( + &self, htlc: &HTLCOutputInCommitment, + ) -> Option<&IrrevocablyResolvedHTLC> { + htlc.transaction_output_index.and_then(|transaction_output_index| { // Only suppress claims once the commitment HTLC output spend has // reached anti-reorg finality. Any output created by that spend may // still be CSV-delayed, but the original HTLC outpoint should not be // re-claimed. - self.htlcs_resolved_on_chain.iter().any(|resolved_htlc| { + self.htlcs_resolved_on_chain.iter().find(|resolved_htlc| { resolved_htlc.commitment_tx_output_idx == Some(transaction_output_index) }) - } else { - false + }) + } + + fn log_preimage_claim_after_htlc_resolved_on_chain( + &self, logger: &L, commitment_txid: Txid, htlc: &HTLCOutputInCommitment, + preimage: PaymentPreimage, resolved_htlc: &IrrevocablyResolvedHTLC, + ) { + if resolved_htlc.payment_preimage == Some(preimage) { + return; + } + if let Some(transaction_output_index) = htlc.transaction_output_index { + let logger = WithContext::from(logger, None, None, Some(htlc.payment_hash)); + if let Some(resolving_txid) = resolved_htlc.resolving_txid.as_ref() { + log_error!(logger, "WE HAVE LIKELY LOST FUNDS: HTLC output {}:{} was irrevocably resolved on-chain by transaction {} without the payment preimage we now know; not replaying the claim", + commitment_txid, transaction_output_index, resolving_txid); + } else { + log_error!(logger, "WE HAVE LIKELY LOST FUNDS: HTLC output {}:{} was irrevocably resolved on-chain by an unknown transaction without the payment preimage we now know; not replaying the claim", + commitment_txid, transaction_output_index); + } + } + } + + fn log_holder_preimage_claim_after_htlc_resolved_on_chain( + &self, logger: &L, holder_tx: &HolderCommitmentTransaction, preimage: PaymentPreimage, + ) { + let matching_payment_hash = PaymentHash::from(preimage); + let tx = holder_tx.trust(); + for htlc in holder_tx.nondust_htlcs() { + if htlc.offered || htlc.payment_hash != matching_payment_hash { + continue; + } + if let Some(resolved_htlc) = self.htlc_output_resolution_on_chain(htlc) { + self.log_preimage_claim_after_htlc_resolved_on_chain( + logger, + tx.txid(), + htlc, + preimage, + resolved_htlc, + ); + } } } @@ -5028,7 +5078,7 @@ impl ChannelMonitorImpl { // per_commitment_data is corrupt or our commitment signing key leaked! return (claimable_outpoints, to_counterparty_output_info); } - if self.is_htlc_output_resolved_on_chain(htlc) { + if self.htlc_output_resolution_on_chain(htlc).is_some() { continue; } let preimage = if htlc.offered { @@ -5132,7 +5182,7 @@ impl ChannelMonitorImpl { let mut htlcs = Vec::with_capacity(holder_tx.nondust_htlcs().len()); debug_assert_eq!(holder_tx.nondust_htlcs().len(), holder_tx.counterparty_htlc_sigs.len()); for (htlc, counterparty_sig) in holder_tx.nondust_htlcs().iter().zip(holder_tx.counterparty_htlc_sigs.iter()) { - if self.is_htlc_output_resolved_on_chain(htlc) { + if self.htlc_output_resolution_on_chain(htlc).is_some() { continue; } assert!(htlc.transaction_output_index.is_some(), "Expected transaction output index for non-dust HTLC"); From f73f75c29e36039f9d32c70be3108214963e7cc8 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 14:33:27 +0200 Subject: [PATCH 12/12] lightning: canonicalize htlc claim ids Hash HTLC claim outpoints in canonical order so the same logical HTLC set produces the same ClaimId regardless of descriptor order. Add a unit test covering reversed descriptor order. --- lightning/src/chain/mod.rs | 64 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index d72d58b3149..5ff96e46953 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -563,10 +563,18 @@ pub struct ClaimId(pub [u8; 32]); impl ClaimId { pub(crate) fn from_htlcs(htlcs: &[HTLCDescriptor]) -> ClaimId { + let mut htlc_outpoints = htlcs + .iter() + .map(|htlc| { + (htlc.commitment_txid.to_byte_array(), htlc.htlc.transaction_output_index.unwrap()) + }) + .collect::>(); + htlc_outpoints.sort_unstable(); + let mut engine = Sha256::engine(); - for htlc in htlcs { - engine.input(&htlc.commitment_txid.to_byte_array()); - engine.input(&htlc.htlc.transaction_output_index.unwrap().to_be_bytes()); + for (commitment_txid, transaction_output_index) in htlc_outpoints { + engine.input(&commitment_txid); + engine.input(&transaction_output_index.to_be_bytes()); } ClaimId(Sha256::from_engine(engine).to_byte_array()) } @@ -581,8 +589,45 @@ impl ClaimId { #[cfg(test)] mod tests { use super::*; + use crate::ln::chan_utils::{ + ChannelTransactionParameters, HTLCOutputInCommitment, HolderCommitmentTransaction, + }; + use crate::sign::ChannelDerivationParameters; + use crate::types::payment::{PaymentHash, PaymentPreimage}; use bitcoin::hashes::Hash; + fn dummy_htlc_descriptor( + commitment_txid: Txid, transaction_output_index: u32, + ) -> HTLCDescriptor { + let channel_parameters = ChannelTransactionParameters::test_dummy(100_000); + let htlc = HTLCOutputInCommitment { + offered: true, + amount_msat: 1000, + cltv_expiry: 100, + payment_hash: PaymentHash::from(PaymentPreimage([1; 32])), + transaction_output_index: Some(transaction_output_index), + }; + let funding_outpoint = channel_parameters.funding_outpoint.unwrap(); + let commitment_tx = + HolderCommitmentTransaction::dummy(100_000, funding_outpoint, vec![htlc.clone()]); + let trusted_tx = commitment_tx.trust(); + + HTLCDescriptor { + channel_derivation_parameters: ChannelDerivationParameters { + value_satoshis: channel_parameters.channel_value_satoshis, + keys_id: [1; 32], + transaction_parameters: channel_parameters, + }, + commitment_txid, + per_commitment_number: trusted_tx.commitment_number(), + per_commitment_point: trusted_tx.per_commitment_point(), + feerate_per_kw: trusted_tx.negotiated_feerate_per_kw(), + htlc, + preimage: None, + counterparty_sig: commitment_tx.counterparty_htlc_sigs[0], + } + } + #[test] fn test_best_block() { let hash1 = BlockHash::from_slice(&[1; 32]).unwrap(); @@ -618,4 +663,17 @@ mod tests { let chain_c = BlockLocator::new(hash_other, 200); assert_eq!(chain_a.find_common_ancestor(&chain_c), None); } + + #[test] + fn test_htlc_claim_id_is_descriptor_order_independent() { + // Use opposite txid and vout ordering so the assertion would fail if + // ClaimId still hashed descriptors in caller-provided order. + let first = dummy_htlc_descriptor(Txid::from_slice(&[1; 32]).unwrap(), 2); + let second = dummy_htlc_descriptor(Txid::from_slice(&[2; 32]).unwrap(), 1); + + assert_eq!( + ClaimId::from_htlcs(&[first.clone(), second.clone()]), + ClaimId::from_htlcs(&[second, first]) + ); + } }