From ae6edc577e32d88919cd8203b0a870369fc29cb8 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 5 Mar 2026 17:00:54 -0500 Subject: [PATCH 01/12] Helper for closed channel post-event monitor updates Useful in the next commit because we'll need to repurpose the same handling code for a new monitor update that is similar to ReleasePaymentComplete, but for inbound payments to avoid regenerating redundant PaymentClaimed events. --- lightning/src/ln/channelmanager.rs | 102 ++++++++++++++++------------- 1 file changed, 57 insertions(+), 45 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ada27af749f..93409ec903a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -14809,56 +14809,68 @@ impl< htlc_id, }, ) => { - let per_peer_state = self.per_peer_state.read().unwrap(); - let mut peer_state_lock = per_peer_state - .get(&counterparty_node_id) - .map(|state| state.lock().unwrap()) - .expect("Channels originating a payment resolution must have peer state"); - let peer_state = &mut *peer_state_lock; - let update_id = peer_state - .closed_channel_monitor_update_ids - .get_mut(&channel_id) - .expect("Channels originating a payment resolution must have a monitor"); - // Note that for channels closed pre-0.1, the latest update_id is `u64::MAX`. - *update_id = update_id.saturating_add(1); - - let update = ChannelMonitorUpdate { - update_id: *update_id, - channel_id: Some(channel_id), - updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { - htlc: htlc_id, - }], - }; - - let during_startup = - !self.background_events_processed_since_startup.load(Ordering::Acquire); - if during_startup { - let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, - funding_txo: channel_funding_outpoint, - channel_id, - update, - }; - self.pending_background_events.lock().unwrap().push(event); - } else { - if let Some(actions) = self.handle_post_close_monitor_update( - &mut peer_state.in_flight_monitor_updates, - &mut peer_state.monitor_update_blocked_actions, - channel_funding_outpoint, - update, - counterparty_node_id, - channel_id, - ) { - mem::drop(peer_state_lock); - mem::drop(per_peer_state); - self.handle_monitor_update_completion_actions(actions); - } - } + let update_step = + ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id }; + self.handle_closed_channel_monitor_update_for_event( + counterparty_node_id, + channel_funding_outpoint, + channel_id, + update_step, + ); }, } } } + /// Helper for handling closed-channel monitor updates triggered by [`EventCompletionAction`]s. + fn handle_closed_channel_monitor_update_for_event( + &self, counterparty_node_id: PublicKey, funding_outpoint: OutPoint, channel_id: ChannelId, + update_step: ChannelMonitorUpdateStep, + ) { + let per_peer_state = self.per_peer_state.read().unwrap(); + let mut peer_state_lock = per_peer_state + .get(&counterparty_node_id) + .map(|state| state.lock().unwrap()) + .expect("Channels originating a payment resolution must have peer state"); + let peer_state = &mut *peer_state_lock; + let update_id = peer_state + .closed_channel_monitor_update_ids + .get_mut(&channel_id) + .expect("Channels originating a payment resolution must have a monitor"); + // Note that for channels closed pre-0.1, the latest update_id is `u64::MAX`. + *update_id = update_id.saturating_add(1); + + let update = ChannelMonitorUpdate { + update_id: *update_id, + channel_id: Some(channel_id), + updates: vec![update_step], + }; + + let during_startup = + !self.background_events_processed_since_startup.load(Ordering::Acquire); + if during_startup { + let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo: funding_outpoint, + channel_id, + update, + }; + self.pending_background_events.lock().unwrap().push(event); + } else { + if let Some(actions) = self.handle_post_close_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + funding_outpoint, + update, + counterparty_node_id, + channel_id, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(actions); + } + } + } /// Processes any events asynchronously in the order they were generated since the last call /// using the given event handler. /// From a4b5c7ec9396b16b65ab3ac085a53dd8e18df080 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 6 Mar 2026 11:44:50 -0500 Subject: [PATCH 02/12] Add ChannelMonitorUpdateStep::InboundPaymentClaimed When an Event::PaymentClaimed is processed by the user, we need to track that so we don't keep regenerating the event redundantly on startup. Here we add a monitor update for that. Note that this update will only be generated for closed channels -- if the channel is open, the inbound payment is pruned automatically when the HTLC is no longer present in any unrevoked commitment transaction, which stops the redundant event regeneration. Upcoming commits will add generation and handling of this monitor update --- lightning/src/chain/channelmonitor.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index a8d055a9c5b..1b43159630d 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -706,6 +706,17 @@ pub(crate) enum ChannelMonitorUpdateStep { ReleasePaymentComplete { htlc: SentHTLCId, }, + /// When an [`Event::PaymentClaimed`] is processed by the user, we need to track that so we don't + /// keep regenerating the event redundantly on startup. + /// + /// This will remove the HTLC from [`ChannelMonitor::get_stored_preimages`]. + /// + /// Note that this is only generated for closed channels -- if the channel is open, the inbound + /// payment is pruned automatically when the HTLC is no longer present in any unrevoked + /// commitment transaction. + InboundPaymentClaimed { + payment_hash: PaymentHash, + }, } impl ChannelMonitorUpdateStep { @@ -723,6 +734,7 @@ impl ChannelMonitorUpdateStep { ChannelMonitorUpdateStep::RenegotiatedFunding { .. } => "RenegotiatedFunding", ChannelMonitorUpdateStep::RenegotiatedFundingLocked { .. } => "RenegotiatedFundingLocked", ChannelMonitorUpdateStep::ReleasePaymentComplete { .. } => "ReleasePaymentComplete", + ChannelMonitorUpdateStep::InboundPaymentClaimed { .. } => "InboundPaymentClaimed", } } } @@ -769,6 +781,9 @@ impl_writeable_tlv_based_enum_upgradable!(ChannelMonitorUpdateStep, (3, htlc_data, required), (5, claimed_htlcs, required_vec), }, + (9, InboundPaymentClaimed) => { + (1, payment_hash, required), + }, (10, RenegotiatedFunding) => { (1, channel_parameters, (required: ReadableArgs, None)), (3, holder_commitment_tx, required), @@ -4150,6 +4165,7 @@ impl ChannelMonitorImpl { assert_eq!(updates.updates.len(), 1); match updates.updates[0] { ChannelMonitorUpdateStep::ReleasePaymentComplete { .. } => {}, + ChannelMonitorUpdateStep::InboundPaymentClaimed { .. } => {}, ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {}, // We should have already seen a `ChannelForceClosed` update if we're trying to // provide a preimage at this point. @@ -4281,6 +4297,8 @@ impl ChannelMonitorImpl { log_trace!(logger, "HTLC {htlc:?} permanently and fully resolved"); self.htlcs_resolved_to_user.insert(*htlc); }, + ChannelMonitorUpdateStep::InboundPaymentClaimed { payment_hash } => { + }, } } @@ -4313,6 +4331,7 @@ impl ChannelMonitorImpl { ChannelMonitorUpdateStep::PaymentPreimage { .. } => {}, ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {}, ChannelMonitorUpdateStep::ReleasePaymentComplete { .. } => {}, + ChannelMonitorUpdateStep::InboundPaymentClaimed { .. } => {}, } } From e74429823ad534c34b02580638fc28446860424d Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 6 Mar 2026 11:53:45 -0500 Subject: [PATCH 03/12] ChannelMonitor: track user-processed payment claims When an Event::PaymentClaimed is processed by the user, we need to track that so we don't keep regenerating the event redundantly on startup. Here we add a map to claims in this state, similar to the existing htlcs_resolved_to_user map. Note that this map will only be updated for closed channels -- if the channel is open, the inbound payment is pruned automatically when the HTLC is no longer present in any unrevoked commitment transaction, which stops the redundant event regeneration. Upcoming commits will add the actual tracking that uses this map, as well as generating the relevant monitor update to trigger this tracking --- lightning/src/chain/channelmonitor.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 1b43159630d..87d2c6e3343 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1357,6 +1357,10 @@ pub(crate) struct ChannelMonitorImpl { /// this and we'll store the set of fully resolved payments here. htlcs_resolved_to_user: HashSet, + /// The set of inbound payments for which the user has processed an [`Event::PaymentClaimed`]. + /// This is used to avoid regenerating the event redundantly on restart for closed channels. + inbound_payments_claimed: HashSet, + /// The set of `SpendableOutput` events which we have already passed upstream to be claimed. /// These are tracked explicitly to ensure that we don't generate the same events redundantly /// if users duplicatively confirm old transactions. Specifically for transactions claiming a @@ -1770,6 +1774,7 @@ pub(crate) fn write_chanmon_internal( (34, channel_monitor.alternative_funding_confirmed, option), (35, channel_monitor.is_manual_broadcast, required), (37, channel_monitor.funding_seen_onchain, required), + (39, channel_monitor.inbound_payments_claimed, required), }); Ok(()) @@ -1969,6 +1974,7 @@ impl ChannelMonitor { confirmed_commitment_tx_counterparty_output: None, htlcs_resolved_on_chain: Vec::new(), htlcs_resolved_to_user: new_hash_set(), + inbound_payments_claimed: new_hash_set(), spendable_txids_confirmed: Vec::new(), best_block, @@ -6523,6 +6529,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let mut funding_spend_confirmed = None; let mut htlcs_resolved_on_chain = Some(Vec::new()); let mut htlcs_resolved_to_user = Some(new_hash_set()); + let mut inbound_payments_claimed = Some(new_hash_set()); let mut funding_spend_seen = Some(false); let mut counterparty_node_id = None; let mut confirmed_commitment_tx_counterparty_output = None; @@ -6562,6 +6569,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (34, alternative_funding_confirmed, option), (35, is_manual_broadcast, (default_value, false)), (37, funding_seen_onchain, (default_value, true)), + (39, inbound_payments_claimed, option), }); // Note that `payment_preimages_with_info` was added (and is always written) in LDK 0.1, so // we can use it to determine if this monitor was last written by LDK 0.1 or later. @@ -6727,6 +6735,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP confirmed_commitment_tx_counterparty_output, htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(), htlcs_resolved_to_user: htlcs_resolved_to_user.unwrap(), + inbound_payments_claimed: inbound_payments_claimed.unwrap(), spendable_txids_confirmed: spendable_txids_confirmed.unwrap(), best_block, From f761051d637cc4c21ea87fc1772d4ff25076e9bb Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 6 Mar 2026 12:29:36 -0500 Subject: [PATCH 04/12] Add EventCompletionAction::InboundPaymentClaimedChannelMonitorUpdate When an Event::PaymentClaimed is processed by the user, we need to track that so we don't keep regenerating the event redundantly on startup. Here we add an event completion action for that. Note that this action will only be generated for closed channels -- if the channel is open, the inbound payment is pruned automatically when the HTLC is no longer present in any unrevoked commitment transaction, which stops the redundant event regeneration. Similar to EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate, but for inbound payments. Actual generation and handling of this completion action is added in an upcoming commit. --- lightning/src/ln/channelmanager.rs | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 93409ec903a..810d572c98a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1475,6 +1475,21 @@ impl_writeable_tlv_based!(PaymentCompleteUpdate, { (7, htlc_id, required), }); +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct InboundPaymentClaimedUpdate { + pub counterparty_node_id: PublicKey, + pub channel_funding_outpoint: OutPoint, + pub channel_id: ChannelId, + pub payment_hash: PaymentHash, +} + +impl_writeable_tlv_based!(InboundPaymentClaimedUpdate, { + (1, counterparty_node_id, required), + (3, channel_funding_outpoint, required), + (5, channel_id, required), + (7, payment_hash, required), +}); + #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) enum EventCompletionAction { ReleaseRAAChannelMonitorUpdate { @@ -1489,6 +1504,12 @@ pub(crate) enum EventCompletionAction { /// fully-resolved in the [`ChannelMonitor`], which we do via this action. /// Note that this action will be dropped on downgrade to LDK prior to 0.2! ReleasePaymentCompleteChannelMonitorUpdate(PaymentCompleteUpdate), + + /// When a payment's resolution is communicated to the downstream logic via + /// [`Event::PaymentClaimed`], we may want to mark the payment as fully-resolved in the + /// [`ChannelMonitor`], which we do via this action. + /// Note that this action will be dropped on downgrade to LDK prior to 0.3! + InboundPaymentClaimedChannelMonitorUpdate(InboundPaymentClaimedUpdate), } impl_writeable_tlv_based_enum!(EventCompletionAction, (0, ReleaseRAAChannelMonitorUpdate) => { @@ -1500,8 +1521,9 @@ impl_writeable_tlv_based_enum!(EventCompletionAction, } ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.unwrap()) })), - } + }, {1, ReleasePaymentCompleteChannelMonitorUpdate} => (), + {3, InboundPaymentClaimedChannelMonitorUpdate} => (), ); /// The source argument which is passed to [`ChannelManager::claim_mpp_part`]. @@ -14818,6 +14840,7 @@ impl< update_step, ); }, + EventCompletionAction::InboundPaymentClaimedChannelMonitorUpdate(_) => {}, } } } From 8271ebd4d3539a7519593c3950151e0e735c005d Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 6 Mar 2026 12:41:29 -0500 Subject: [PATCH 05/12] Prevent redundant PaymentClaimeds for closed channels Previously, if we had an inbound payment on a closed channel that we started claiming but did not end up removing from the commitment tx, we would generate a PaymentClaimed event for the payment on every restart until the channelmonitor was archived. The past few commits have laid the groundwork to get rid of this redundancy -- here we now generate an event completion action for when the PaymentClaimed is processed by the user, at which point a monitor update will be released that tells the channel monitor that the user has processed the PaymentClaimed event. The monitor tracks this internally such that the pending claim will no longer be returned to the ChannelManager when reconstructing the set of mid-claim inbound payments on startup, and thus no longer generate the redundant event. --- lightning/src/chain/channelmonitor.rs | 16 ++++++ lightning/src/ln/chanmon_update_fail_tests.rs | 1 + lightning/src/ln/channelmanager.rs | 51 ++++++++++++++++--- lightning/src/ln/monitor_tests.rs | 30 ++++++++--- lightning/src/ln/reload_tests.rs | 10 ++-- 5 files changed, 91 insertions(+), 17 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 87d2c6e3343..c4b20e72e4b 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -3270,6 +3270,20 @@ impl ChannelMonitor { pub(crate) fn get_stored_preimages( &self, + ) -> HashMap)> { + let inner = self.inner.lock().unwrap(); + inner + .payment_preimages + .iter() + .filter(|(hash, _)| !inner.inbound_payments_claimed.contains(*hash)) + .map(|(hash, value)| (*hash, value.clone())) + .collect() + } + + /// Used in tests to verify preimage propagation. + #[cfg(test)] + pub(crate) fn test_get_all_stored_preimages( + &self, ) -> HashMap)> { self.inner.lock().unwrap().payment_preimages.clone() } @@ -4304,6 +4318,8 @@ impl ChannelMonitorImpl { self.htlcs_resolved_to_user.insert(*htlc); }, ChannelMonitorUpdateStep::InboundPaymentClaimed { payment_hash } => { + log_trace!(logger, "Inbound payment {} claimed", payment_hash); + self.inbound_payments_claimed.insert(*payment_hash); }, } } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index cd32d219b93..b43320ce324 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -4598,6 +4598,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { // available. nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2); expect_payment_claimed!(nodes[1], payment_hash, 1_000_000); + check_added_monitors(&nodes[1], 1); } #[test] diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 810d572c98a..ea51ebf02b4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10004,11 +10004,34 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let action = if let Some((outpoint, counterparty_node_id, channel_id)) = durable_preimage_channel { - Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint: Some(outpoint), - counterparty_node_id, - channel_id, - }) + let per_peer_state = self.per_peer_state.read().unwrap(); + let is_channel_closed = per_peer_state + .get(&counterparty_node_id) + .map(|peer_state_mutex| { + let peer_state = peer_state_mutex.lock().unwrap(); + !peer_state.channel_by_id.contains_key(&channel_id) + }) + .unwrap_or(true); + // For open channels, we use ReleaseRAAChannelMonitorUpdate to maintain the blocking + // behavior (RAA updates are blocked until the PaymentClaimed event is handled). + // For closed channels, we use InboundPaymentClaimedChannelMonitorUpdate to persist + // that the PaymentClaimed event has been handled, preventing regeneration on restart. + if is_channel_closed { + Some(EventCompletionAction::InboundPaymentClaimedChannelMonitorUpdate( + InboundPaymentClaimedUpdate { + channel_funding_outpoint: outpoint, + counterparty_node_id, + channel_id, + payment_hash, + }, + )) + } else { + Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { + channel_funding_outpoint: Some(outpoint), + counterparty_node_id, + channel_id, + }) + } } else { None }; @@ -14840,7 +14863,23 @@ impl< update_step, ); }, - EventCompletionAction::InboundPaymentClaimedChannelMonitorUpdate(_) => {}, + EventCompletionAction::InboundPaymentClaimedChannelMonitorUpdate( + InboundPaymentClaimedUpdate { + counterparty_node_id, + channel_funding_outpoint, + channel_id, + payment_hash, + }, + ) => { + let update_step = + ChannelMonitorUpdateStep::InboundPaymentClaimed { payment_hash }; + self.handle_closed_channel_monitor_update_for_event( + counterparty_node_id, + channel_funding_outpoint, + channel_id, + update_step, + ); + }, } } } diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 18a976871a6..0527010102b 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -225,6 +225,9 @@ fn archive_fully_resolved_monitors() { nodes[1].node.claim_funds(payment_preimage); check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 10_000_000); + // Processing PaymentClaimed on a closed channel generates a monitor update to mark the claim as + // resolved to the user. + check_added_monitors(&nodes[1], 1); let htlc_claim_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(htlc_claim_tx.len(), 1); @@ -3282,22 +3285,29 @@ fn test_update_replay_panics() { nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); + // Processing PaymentClaimed on a closed channel generates a monitor update to mark the claim as + // resolved to the user. + check_added_monitors(&nodes[1], 1); nodes[1].node.claim_funds(payment_preimage_2); check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000); + check_added_monitors(&nodes[1], 1); let mut updates = nodes[1].chain_monitor.monitor_updates.lock().unwrap().get_mut(&chan.2).unwrap().split_off(0); - // Update `monitor` until there's just one normal updates, an FC update, and a post-FC claim - // update pending - for update in updates.drain(..updates.len() - 4) { + // Update `monitor` until there's just one normal updates, an FC update, a post-FC claim + // and InboundPaymentClaimed updates pending. + // Updates are: [normal, FC, preimage1, inbound_claimed1, preimage2, inbound_claimed2] + for update in updates.drain(..updates.len() - 6) { monitor.update_monitor(&update, &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap(); } - assert_eq!(updates.len(), 4); + assert_eq!(updates.len(), 6); assert!(matches!(updates[1].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. })); assert!(matches!(updates[2].updates[0], ChannelMonitorUpdateStep::PaymentPreimage { .. })); - assert!(matches!(updates[3].updates[0], ChannelMonitorUpdateStep::PaymentPreimage { .. })); + assert!(matches!(updates[3].updates[0], ChannelMonitorUpdateStep::InboundPaymentClaimed { .. })); + assert!(matches!(updates[4].updates[0], ChannelMonitorUpdateStep::PaymentPreimage { .. })); + assert!(matches!(updates[5].updates[0], ChannelMonitorUpdateStep::InboundPaymentClaimed { .. })); // Ensure applying the force-close update skipping the last normal update fails let poisoned_monitor = monitor.clone(); @@ -3384,11 +3394,13 @@ fn test_claim_event_never_handled() { let chan_0_monitor_serialized = get_monitor!(nodes[1], chan.2).encode(); let mons = &[&chan_0_monitor_serialized[..]]; reload_node!(nodes[1], &init_node_ser, mons, persister, new_chain_mon, nodes_1_reload); + check_added_monitors(&nodes[1], 0); expect_payment_claimed!(nodes[1], payment_hash_a, 1_000_000); - // The reload logic spuriously generates a redundant payment preimage-containing - // `ChannelMonitorUpdate`. - check_added_monitors(&nodes[1], 2); + // The reload logic spuriously generates 2 redundant payment preimage-containing + // `ChannelMonitorUpdate`s, plus we get a monitor update once the PaymentClaimed event is + // processed. + check_added_monitors(&nodes[1], 3); } fn do_test_lost_preimage_monitor_events(on_counterparty_tx: bool, p2a_anchor: bool) { @@ -3863,6 +3875,7 @@ fn test_ladder_preimage_htlc_claims() { check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[node_id_0], 1_000_000); nodes[1].node.claim_funds(payment_preimage1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(&nodes[1], payment_hash1, 1_000_000); check_added_monitors(&nodes[1], 1); @@ -3884,6 +3897,7 @@ fn test_ladder_preimage_htlc_claims() { check_added_monitors(&nodes[0], 1); nodes[1].node.claim_funds(payment_preimage2); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(&nodes[1], payment_hash2, 1_000_000); check_added_monitors(&nodes[1], 1); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index bb730f8fba8..bc78c2a50a1 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -853,16 +853,20 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest if persist_both_monitors { if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); } if let Event::PaymentClaimed { amount_msat: 15_000_000, .. } = events[3] { } else { panic!(); } - check_added_monitors(&nodes[3], 4); + // 4 monitors for preimage updates + 1 for InboundPaymentClaimed marking the payment as + // claimed in the closed channel's monitor. + check_added_monitors(&nodes[3], 5); } else { if let Event::PaymentClaimed { amount_msat: 15_000_000, .. } = events[2] { } else { panic!(); } + // Only one channel closed; the durable_preimage_channel is the live one, so no extra + // InboundPaymentClaimed update is generated. check_added_monitors(&nodes[3], 3); } // Now that we've processed background events, the preimage should have been copied into the // non-persisted monitor: - assert!(get_monitor!(nodes[3], chan_id_persisted).get_stored_preimages().contains_key(&payment_hash)); - assert!(get_monitor!(nodes[3], chan_id_not_persisted).get_stored_preimages().contains_key(&payment_hash)); + assert!(get_monitor!(nodes[3], chan_id_persisted).test_get_all_stored_preimages().contains_key(&payment_hash)); + assert!(get_monitor!(nodes[3], chan_id_not_persisted).test_get_all_stored_preimages().contains_key(&payment_hash)); // On restart, we should also get a duplicate PaymentClaimed event as we persisted the // ChannelManager prior to handling the original one. From 51d69318aface000e7ab7f71a0e3bd87a0d27974 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 26 Feb 2026 12:31:14 -0500 Subject: [PATCH 06/12] Rename claimable maps to _legacy on read Upcoming commits work towards deprecating persistence of these maps, in favor of reconstructing them from Channel{Monitor} data. --- lightning/src/ln/channelmanager.rs | 63 ++++++++++++++++-------------- 1 file changed, 33 insertions(+), 30 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ea51ebf02b4..32ac5ae790c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17550,13 +17550,13 @@ impl< decode_update_add_htlcs_opt = Some(decode_update_add_htlcs); } - let claimable_payments = self.claimable_payments.lock().unwrap(); + let claimable_payments_legacy = self.claimable_payments.lock().unwrap(); let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap(); let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new(); let mut htlc_onion_fields: Vec> = Vec::new(); - (claimable_payments.claimable_payments.len() as u64).write(writer)?; - for (payment_hash, payment) in claimable_payments.claimable_payments.iter() { + (claimable_payments_legacy.claimable_payments.len() as u64).write(writer)?; + for (payment_hash, payment) in claimable_payments_legacy.claimable_payments.iter() { payment_hash.write(writer)?; (payment.htlcs.len() as u64).write(writer)?; for htlc in payment.htlcs.iter() { @@ -17706,7 +17706,7 @@ impl< pending_intercepted_htlcs = Some(our_pending_intercepts); } - let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments); + let mut pending_claiming_payments = Some(&claimable_payments_legacy.pending_claiming_payments); if pending_claiming_payments.as_ref().unwrap().is_empty() { // LDK versions prior to 0.0.113 do not know how to read the pending claimed payments // map. Thus, if there are no entries we skip writing a TLV for it. @@ -17816,12 +17816,12 @@ pub(super) struct ChannelManagerData { best_block_height: u32, best_block_hash: BlockHash, channels: Vec>, - claimable_payments: HashMap, + claimable_payments_legacy: HashMap, peer_init_features: Vec<(PublicKey, InitFeatures)>, pending_events_read: VecDeque<(events::Event, Option)>, highest_seen_timestamp: u32, pending_outbound_payments: HashMap, - pending_claiming_payments: HashMap, + pending_claiming_payments_legacy: HashMap, received_network_pubkey: Option, monitor_update_blocked_actions_per_peer: Vec<(PublicKey, BTreeMap>)>, @@ -17899,7 +17899,7 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> }; let claimable_htlcs_count: u64 = Readable::read(reader)?; - let mut claimable_htlcs_list = + let mut claimable_htlcs_list_legacy = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); for _ in 0..claimable_htlcs_count { let payment_hash = Readable::read(reader)?; @@ -17921,7 +17921,7 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> previous_hops.push(htlc); } let total_mpp_value_msat = total_mpp_value_msat.ok_or(DecodeError::InvalidValue)?; - claimable_htlcs_list.push((payment_hash, previous_hops, total_mpp_value_msat)); + claimable_htlcs_list_legacy.push((payment_hash, previous_hops, total_mpp_value_msat)); } let peer_count: u64 = Readable::read(reader)?; @@ -18003,11 +18003,11 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; let mut probing_cookie_secret: Option<[u8; 32]> = None; - let mut claimable_htlc_purposes = None; - let mut amountless_claimable_htlc_onion_fields: Option< + let mut claimable_htlc_purposes_legacy = None; + let mut amountless_claimable_htlc_onion_fields_legacy: Option< Vec>, > = None; - let mut pending_claiming_payments = Some(new_hash_map()); + let mut pending_claiming_payments_legacy = Some(new_hash_map()); let mut monitor_update_blocked_actions_per_peer: Option>)>> = None; let mut events_override = None; @@ -18026,15 +18026,15 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> (1, pending_outbound_payments_no_retry, option), (2, pending_intercepted_htlcs_legacy, option), (3, pending_outbound_payments, option), - (4, pending_claiming_payments, option), + (4, pending_claiming_payments_legacy, option), (5, received_network_pubkey, option), (6, monitor_update_blocked_actions_per_peer, option), (7, fake_scid_rand_bytes, option), (8, events_override, option), - (9, claimable_htlc_purposes, optional_vec), + (9, claimable_htlc_purposes_legacy, optional_vec), (10, legacy_in_flight_monitor_updates, option), (11, probing_cookie_secret, option), - (13, amountless_claimable_htlc_onion_fields, optional_vec), + (13, amountless_claimable_htlc_onion_fields_legacy, optional_vec), (14, decode_update_add_htlcs_legacy, option), (15, inbound_payment_id_secret, option), (17, in_flight_monitor_updates, option), @@ -18091,18 +18091,19 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> let pending_events_read = events_override.unwrap_or(pending_events_read); // Combine claimable_htlcs_list with their purposes and onion fields. - let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len()); - if let Some(purposes) = claimable_htlc_purposes { - if purposes.len() != claimable_htlcs_list.len() { + let mut claimable_payments_legacy = + hash_map_with_capacity(claimable_htlcs_list_legacy.len()); + if let Some(purposes) = claimable_htlc_purposes_legacy { + if purposes.len() != claimable_htlcs_list_legacy.len() { return Err(DecodeError::InvalidValue); } - if let Some(onion_fields) = amountless_claimable_htlc_onion_fields { - if onion_fields.len() != claimable_htlcs_list.len() { + if let Some(onion_fields) = amountless_claimable_htlc_onion_fields_legacy { + if onion_fields.len() != claimable_htlcs_list_legacy.len() { return Err(DecodeError::InvalidValue); } for (purpose, (onion, (payment_hash, htlcs, total_mpp_value_msat))) in purposes .into_iter() - .zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter())) + .zip(onion_fields.into_iter().zip(claimable_htlcs_list_legacy.into_iter())) { let onion_fields = if let Some(mut onion) = onion { if onion.0.total_mpp_amount_msat != 0 @@ -18116,12 +18117,13 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> return Err(DecodeError::InvalidValue); }; let claimable = ClaimablePayment { purpose, htlcs, onion_fields }; - let existing_payment = claimable_payments.insert(payment_hash, claimable); + let existing_payment = + claimable_payments_legacy.insert(payment_hash, claimable); if existing_payment.is_some() { return Err(DecodeError::InvalidValue); } } - } else if !purposes.is_empty() || !claimable_htlcs_list.is_empty() { + } else if !purposes.is_empty() || !claimable_htlcs_list_legacy.is_empty() { // `amountless_claimable_htlc_onion_fields` was first written in LDK 0.0.115. We // haven't supported upgrade from 0.0.115 with pending HTLCs since 0.1. return Err(DecodeError::InvalidValue); @@ -18138,14 +18140,15 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> best_block_hash, channels, forward_htlcs_legacy, - claimable_payments, + claimable_payments_legacy, peer_init_features, pending_events_read, highest_seen_timestamp, pending_intercepted_htlcs_legacy: pending_intercepted_htlcs_legacy .unwrap_or_else(new_hash_map), pending_outbound_payments, - pending_claiming_payments: pending_claiming_payments.unwrap_or_else(new_hash_map), + pending_claiming_payments_legacy: pending_claiming_payments_legacy + .unwrap_or_else(new_hash_map), received_network_pubkey, monitor_update_blocked_actions_per_peer: monitor_update_blocked_actions_per_peer .unwrap_or_else(Vec::new), @@ -18443,13 +18446,13 @@ impl< best_block_hash, channels, mut forward_htlcs_legacy, - claimable_payments, + claimable_payments_legacy, peer_init_features, mut pending_events_read, highest_seen_timestamp, mut pending_intercepted_htlcs_legacy, pending_outbound_payments, - pending_claiming_payments, + pending_claiming_payments_legacy, received_network_pubkey, monitor_update_blocked_actions_per_peer, mut fake_scid_rand_bytes, @@ -19479,7 +19482,7 @@ impl< // Similar to the above cases for forwarded payments, if we have any pending inbound HTLCs // which haven't yet been claimed, we may be missing counterparty_node_id info and would // panic if we attempted to claim them at this point. - for (payment_hash, payment) in claimable_payments.iter() { + for (payment_hash, payment) in claimable_payments_legacy.iter() { for htlc in payment.htlcs.iter() { if htlc.prev_hop.counterparty_node_id.is_some() { continue; @@ -19671,7 +19674,7 @@ impl< } // See above comment on `failed_htlcs`. - for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for htlcs in claimable_payments_legacy.values().map(|pmt| &pmt.htlcs) { for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs, @@ -19764,8 +19767,8 @@ impl< forward_htlcs: Mutex::new(forward_htlcs), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { - claimable_payments, - pending_claiming_payments, + claimable_payments: claimable_payments_legacy, + pending_claiming_payments: pending_claiming_payments_legacy, }), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), short_to_chan_info: FairRwLock::new(short_to_chan_info), From 1c8a34efc29db5a0d38f45bd649ecf86a91a7df1 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 3 Mar 2026 14:52:43 -0500 Subject: [PATCH 07/12] Tweak reload_node test macro to be more flexible We'll need this in the next commit, and may be useful in the future to avoid having a bunch of macro variants. --- lightning/src/ln/functional_test_utils.rs | 106 +++++++++++----------- lightning/src/ln/reload_tests.rs | 6 +- 2 files changed, 58 insertions(+), 54 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 2d971c3a100..c7e83b27e97 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1369,10 +1369,36 @@ pub fn _reload_node<'a, 'b, 'c>( } #[macro_export] -macro_rules! _reload_node_inner { - ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: - ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr +macro_rules! reload_node { + // Reload the node with the new provided `UserConfig` + ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { + $crate::reload_node!( + $node, + $chanman_encoded, + $monitors_encoded, + $persister, + $new_chain_monitor, + $new_channelmanager, + TestReloadNodeCfg::new().with_cfg($new_config) + ); + }; + // Reload the node using the node's current config + ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { + $crate::reload_node!( + $node, + $chanman_encoded, + $monitors_encoded, + $persister, + $new_chain_monitor, + $new_channelmanager, + TestReloadNodeCfg::new() + ); + }; + // Base implementation - only called via internal recursive macro calls + ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: + ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reload_cfg: expr ) => { + let TestReloadNodeCfg { config_override, reconstruct_pending_htlcs } = $reload_cfg; let chanman_encoded = $chanman_encoded; $persister = $crate::util::test_utils::TestPersister::new(); @@ -1388,10 +1414,10 @@ macro_rules! _reload_node_inner { $new_channelmanager = $crate::ln::functional_test_utils::_reload_node( &$node, - $new_config, + config_override.unwrap_or_else(|| $node.node.get_current_config()), &chanman_encoded, $monitors_encoded, - $reconstruct_pending_htlcs, + reconstruct_pending_htlcs, ); $node.node = &$new_channelmanager; $node.onion_messenger.set_offers_handler(&$new_channelmanager); @@ -1399,52 +1425,30 @@ macro_rules! _reload_node_inner { }; } -#[macro_export] -macro_rules! reload_node { - // Reload the node using the node's current config - ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { - let config = $node.node.get_current_config(); - $crate::_reload_node_inner!( - $node, - config, - $chanman_encoded, - $monitors_encoded, - $persister, - $new_chain_monitor, - $new_channelmanager, - None - ); - }; - // Reload the node with the new provided config - ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { - $crate::_reload_node_inner!( - $node, - $new_config, - $chanman_encoded, - $monitors_encoded, - $persister, - $new_chain_monitor, - $new_channelmanager, - None - ); - }; - // Reload the node and have the `ChannelManager` use new codepaths that reconstruct its set of - // pending HTLCs from `Channel{Monitor}` data. - ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: - ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr - ) => { - let config = $node.node.get_current_config(); - $crate::_reload_node_inner!( - $node, - config, - $chanman_encoded, - $monitors_encoded, - $persister, - $new_chain_monitor, - $new_channelmanager, - $reconstruct_pending_htlcs - ); - }; +/// Knobs for [`reload_node`]. +pub struct TestReloadNodeCfg { + /// Override the `ChannelManager`'s [`UserConfig`] on reload. Otherwise, the node's pre-reload + /// config will be used. + pub config_override: Option, + /// Sets [`ChannelManagerReadArgs::reconstruct_manager_from_monitors`]. + pub reconstruct_pending_htlcs: Option, +} + +impl TestReloadNodeCfg { + /// Sets [`Self::config_override`] and [`Self::reconstruct_pending_htlcs`] to `None`. + pub fn new() -> Self { + Self { config_override: None, reconstruct_pending_htlcs: None } + } + /// Sets [`Self::config_override`] + pub fn with_cfg(mut self, cfg: UserConfig) -> Self { + self.config_override = Some(cfg); + self + } + /// Sets [`Self::reconstruct_pending_htlcs`] + pub fn with_reconstruct_htlcs(mut self, reconstruct_htlcs: bool) -> Self { + self.reconstruct_pending_htlcs = Some(reconstruct_htlcs); + self + } } pub fn create_funding_transaction<'a, 'b, 'c>( diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index bc78c2a50a1..264720b16f7 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1968,7 +1968,7 @@ fn test_reload_node_with_preimage_in_monitor_claims_htlc() { persister, new_chain_monitor, nodes_1_deserialized, - Some(true) + TestReloadNodeCfg::new().with_reconstruct_htlcs(true) ); // When the claim is reconstructed during reload, a PaymentForwarded event is generated. @@ -2071,7 +2071,7 @@ fn test_reload_node_without_preimage_fails_htlc() { persister, new_chain_monitor, nodes_1_deserialized, - Some(true) + TestReloadNodeCfg::new().with_reconstruct_htlcs(true) ); // After reload, nodes[1] should have generated an HTLCHandlingFailed event. @@ -2218,7 +2218,7 @@ fn test_reload_with_mpp_claims_on_same_channel() { persister, new_chain_monitor, nodes_1_deserialized, - Some(true) + TestReloadNodeCfg::new().with_reconstruct_htlcs(true) ); // When the claims are reconstructed during reload, PaymentForwarded events are regenerated. From 5be07709d3cacde456deb37b3bcd82accd244cc2 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 3 Mar 2026 15:00:11 -0500 Subject: [PATCH 08/12] In 0.5, reconstruct claimable_payments from Channel data We're working on reducing the requirement to persist the ChannelManager, via reconstructing the manager from Channel{Monitor} data on read. Here we stop relying on persistence of the claimable_payments map. This is trivial since we're building on previous work that stored inbound committed HTLC onions in Channels as part of getting rid of forwarded HTLC map persistence -- rather than persisting forwards explicitly, we started persisting the corresponding inbound payment onions -> putting them into ChannelManager::decode_update_add_htlcs on read --> the forwards will then be re-decoded and the forward_htlcs maps will be repopulated on the next call to process_pending_htlc_forwards. The process for claimable_htlcs is the same. The onions for these committed inbound receives are put back into ChannelManager::decode_update_add_htlcs, and will repopulate themselves into the claimable_payments map on the next call to process_pending_htlc_forwards. --- lightning/src/ln/channelmanager.rs | 19 ++++--------------- lightning/src/ln/payment_tests.rs | 27 +++++++++++++++++++++++++-- lightning/src/ln/splicing_tests.rs | 13 ++++++++++--- 3 files changed, 39 insertions(+), 20 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 32ac5ae790c..3ba1a6463df 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -19672,28 +19672,17 @@ impl< prune_forwarded_htlc(&mut already_forwarded_htlcs, prev_hop_data, payment_hash); } } - - // See above comment on `failed_htlcs`. - for htlcs in claimable_payments_legacy.values().map(|pmt| &pmt.htlcs) { - for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was already decoded and marked as a claimable payment", - &args.logger, - ); - } - } } - let (decode_update_add_htlcs, forward_htlcs, pending_intercepted_htlcs) = + let (decode_update_add_htlcs, forward_htlcs, pending_intercepted_htlcs, claimable_payments) = if reconstruct_manager_from_monitors { - (decode_update_add_htlcs, new_hash_map(), new_hash_map()) + (decode_update_add_htlcs, new_hash_map(), new_hash_map(), new_hash_map()) } else { ( decode_update_add_htlcs_legacy, forward_htlcs_legacy, pending_intercepted_htlcs_legacy, + claimable_payments_legacy, ) }; @@ -19767,7 +19756,7 @@ impl< forward_htlcs: Mutex::new(forward_htlcs), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { - claimable_payments: claimable_payments_legacy, + claimable_payments, pending_claiming_payments: pending_claiming_payments_legacy, }), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index b5cbe0fee98..ced5b042323 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -1455,7 +1455,9 @@ fn test_fulfill_restart_failure() { let node_b_id = nodes[1].node.get_our_node_id(); let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); + let amt_msat = 100_000; + let (payment_preimage, payment_hash, payment_secret, ..) = + route_payment(&nodes[0], &[&nodes[1]], amt_msat); // The simplest way to get a failure after a fulfill is to reload nodes[1] from a state // pre-fulfill, which we do by serializing it here. @@ -1472,11 +1474,29 @@ fn test_fulfill_restart_failure() { expect_payment_sent(&nodes[0], payment_preimage, None, false, false); // Now reload nodes[1]... - reload_node!(nodes[1], &node_b_ser, &[&mon_ser], persister, chain_monitor, node_b_reload); + reload_node!( + nodes[1], + &node_b_ser, + &[&mon_ser], + persister, + chain_monitor, + node_b_reload, + TestReloadNodeCfg::new().with_reconstruct_htlcs(true) + ); nodes[0].node.peer_disconnected(node_b_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + nodes[1].node.process_pending_htlc_forwards(); + expect_payment_claimable!( + &nodes[1], + payment_hash, + payment_secret, + amt_msat, + None, + nodes[1].node.get_our_node_id() + ); + nodes[1].node.fail_htlc_backwards(&payment_hash); let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[fail_type]); @@ -4890,6 +4910,9 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { reload_node!(nodes[3], config, &node_d_ser, &mons[..], persister, chain_mon, node_d_reload); nodes[1].node.peer_disconnected(node_d_id); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[3])); + // After reload, HTLCs need to be reprocessed since claimable_payments + // is no longer persisted. This is an incomplete MPP, so no event is generated. + nodes[3].node.process_pending_htlc_forwards(); } let mut reconnect_args = ReconnectArgs::new(&nodes[2], &nodes[3]); reconnect_args.send_channel_ready = (true, true); diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index 486e386be87..b4df9e96fef 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -1924,7 +1924,8 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { let prev_funding_script = get_monitor!(nodes[0], channel_id).get_funding_script(); // Keep a pending HTLC throughout the reestablish flow to make sure we can handle them. - route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let amt_msat = 1_000_000; + let (_, hash, secret, ..) = route_payment(&nodes[0], &[&nodes[1]], amt_msat); // Negotiate the splice up until the nodes exchange `tx_complete`. let outputs = vec![ @@ -1967,8 +1968,11 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { &[&encoded_monitor_1], persister_1a, chain_monitor_1a, - node_1a + node_1a, + TestReloadNodeCfg::new().with_reconstruct_htlcs(true) ); + nodes[1].node.process_pending_htlc_forwards(); + expect_payment_claimable!(&nodes[1], hash, secret, amt_msat); // We should have another signing event generated upon reload as they're not persisted. let _ = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); if async_monitor_update { @@ -2067,8 +2071,11 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { &[&encoded_monitor_1], persister_1b, chain_monitor_1b, - node_1b + node_1b, + TestReloadNodeCfg::new().with_reconstruct_htlcs(true) ); + nodes[1].node.process_pending_htlc_forwards(); + expect_payment_claimable!(&nodes[1], hash, secret, amt_msat); } else { nodes[0].node.peer_disconnected(node_id_1); nodes[1].node.peer_disconnected(node_id_0); From 250332718ae7cc3286dc6c9fed1d0b29a910d503 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 3 Mar 2026 10:43:56 -0500 Subject: [PATCH 09/12] Removed nested for-loop over monitor claims In an upcoming commit, we'll be iterating over the payment preimage claims in in-flight monitor updates as well as monitors, so update the loop now as a prefactor. --- lightning/src/ln/channelmanager.rs | 502 +++++++++++++++-------------- 1 file changed, 254 insertions(+), 248 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 3ba1a6463df..562fa62c0ca 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -19804,273 +19804,279 @@ impl< }; let mut processed_claims: HashSet> = new_hash_set(); - for (channel_id, monitor) in args.channel_monitors.iter() { - for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() + + let monitor_preimages = args.channel_monitors.iter().flat_map(|(channel_id, monitor)| { + let counterparty_node_id = monitor.get_counterparty_node_id(); + monitor.get_stored_preimages().into_iter().map( + move |(payment_hash, (payment_preimage, payment_claims))| { + ( + *channel_id, + counterparty_node_id, + payment_hash, + payment_preimage, + payment_claims, + ) + }, + ) + }); + + for (channel_id, counterparty_node_id, payment_hash, payment_preimage, payment_claims) in + monitor_preimages + { + // If we have unresolved inbound committed HTLCs that were already forwarded to the + // outbound edge and removed via claim, we need to make sure to claim them backwards via + // adding them to `pending_claims_to_replay`. + if let Some(forwarded_htlcs) = + already_forwarded_htlcs.remove(&(channel_id, payment_hash)) { - // If we have unresolved inbound committed HTLCs that were already forwarded to the - // outbound edge and removed via claim, we need to make sure to claim them backwards via - // adding them to `pending_claims_to_replay`. - if let Some(forwarded_htlcs) = - already_forwarded_htlcs.remove(&(*channel_id, payment_hash)) - { - for (prev_hop, next_hop) in forwarded_htlcs { - let new_pending_claim = - !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _, _)| { - matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == prev_hop.htlc_id && hop.channel_id == prev_hop.channel_id) + for (prev_hop, next_hop) in forwarded_htlcs { + let new_pending_claim = + !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _, _)| { + matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == prev_hop.htlc_id && hop.channel_id == prev_hop.channel_id) + }); + if new_pending_claim { + let is_downstream_closed = channel_manager + .per_peer_state + .read() + .unwrap() + .get(&next_hop.node_id) + .map_or(true, |peer_state_mtx| { + !peer_state_mtx + .lock() + .unwrap() + .channel_by_id + .contains_key(&next_hop.channel_id) }); - if new_pending_claim { - let is_downstream_closed = channel_manager - .per_peer_state - .read() - .unwrap() - .get(&next_hop.node_id) - .map_or(true, |peer_state_mtx| { - !peer_state_mtx - .lock() - .unwrap() - .channel_by_id - .contains_key(&next_hop.channel_id) - }); - pending_claims_to_replay.push(( - HTLCSource::PreviousHopData(prev_hop), - payment_preimage, - next_hop.amt_msat, - is_downstream_closed, - next_hop.node_id, - next_hop.funding_txo, - next_hop.channel_id, - Some(next_hop.user_channel_id), - )); - } + pending_claims_to_replay.push(( + HTLCSource::PreviousHopData(prev_hop), + payment_preimage, + next_hop.amt_msat, + is_downstream_closed, + next_hop.node_id, + next_hop.funding_txo, + next_hop.channel_id, + Some(next_hop.user_channel_id), + )); } } - if !payment_claims.is_empty() { - for payment_claim in payment_claims { - if processed_claims.contains(&payment_claim.mpp_parts) { - // We might get the same payment a few times from different channels - // that the MPP payment was received using. There's no point in trying - // to claim the same payment again and again, so we check if the HTLCs - // are the same and skip the payment here. - continue; - } - if payment_claim.mpp_parts.is_empty() { - return Err(DecodeError::InvalidValue); - } - { - let payments = channel_manager.claimable_payments.lock().unwrap(); - if !payments.claimable_payments.contains_key(&payment_hash) { - if let Some(payment) = - payments.pending_claiming_payments.get(&payment_hash) - { - if payment.payment_id - == payment_claim.claiming_payment.payment_id - { - // If this payment already exists and was marked as - // being-claimed then the serialized state must contain all - // of the pending `ChannelMonitorUpdate`s required to get - // the preimage on disk in all MPP parts. Thus we can skip - // the replay below. - continue; - } + } + if !payment_claims.is_empty() { + for payment_claim in payment_claims { + if processed_claims.contains(&payment_claim.mpp_parts) { + // We might get the same payment a few times from different channels + // that the MPP payment was received using. There's no point in trying + // to claim the same payment again and again, so we check if the HTLCs + // are the same and skip the payment here. + continue; + } + if payment_claim.mpp_parts.is_empty() { + return Err(DecodeError::InvalidValue); + } + { + let payments = channel_manager.claimable_payments.lock().unwrap(); + if !payments.claimable_payments.contains_key(&payment_hash) { + if let Some(payment) = + payments.pending_claiming_payments.get(&payment_hash) + { + if payment.payment_id == payment_claim.claiming_payment.payment_id { + // If this payment already exists and was marked as + // being-claimed then the serialized state must contain all + // of the pending `ChannelMonitorUpdate`s required to get + // the preimage on disk in all MPP parts. Thus we can skip + // the replay below. + continue; } } } + } - let mut channels_without_preimage = payment_claim - .mpp_parts - .iter() - .map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.channel_id)) - .collect::>(); - // If we have multiple MPP parts which were received over the same channel, - // we only track it once as once we get a preimage durably in the - // `ChannelMonitor` it will be used for all HTLCs with a matching hash. - channels_without_preimage.sort_unstable(); - channels_without_preimage.dedup(); - let pending_claims = PendingMPPClaim { - channels_without_preimage, - channels_with_preimage: Vec::new(), - }; - let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims))); - - // While it may be duplicative to generate a PaymentClaimed here, trying to - // figure out if the user definitely saw it before shutdown would require some - // nontrivial logic and may break as we move away from regularly persisting - // ChannelManager. Instead, we rely on the users' event handler being - // idempotent and just blindly generate one no matter what, letting the - // preimages eventually timing out from ChannelMonitors to prevent us from - // doing so forever. - - let claim_found = channel_manager - .claimable_payments - .lock() - .unwrap() - .begin_claiming_payment( - payment_hash, - &channel_manager.node_signer, - &channel_manager.logger, - &channel_manager.inbound_payment_id_secret, - true, - ); - if claim_found.is_err() { - let mut claimable_payments = - channel_manager.claimable_payments.lock().unwrap(); - match claimable_payments.pending_claiming_payments.entry(payment_hash) { - hash_map::Entry::Occupied(_) => { - debug_assert!( - false, - "Entry was added in begin_claiming_payment" - ); - return Err(DecodeError::InvalidValue); - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(payment_claim.claiming_payment); - }, - } + let mut channels_without_preimage = payment_claim + .mpp_parts + .iter() + .map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.channel_id)) + .collect::>(); + // If we have multiple MPP parts which were received over the same channel, + // we only track it once as once we get a preimage durably in the + // `ChannelMonitor` it will be used for all HTLCs with a matching hash. + channels_without_preimage.sort_unstable(); + channels_without_preimage.dedup(); + let pending_claims = PendingMPPClaim { + channels_without_preimage, + channels_with_preimage: Vec::new(), + }; + let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims))); + + // While it may be duplicative to generate a PaymentClaimed here, trying to + // figure out if the user definitely saw it before shutdown would require some + // nontrivial logic and may break as we move away from regularly persisting + // ChannelManager. Instead, we rely on the users' event handler being + // idempotent and just blindly generate one no matter what, letting the + // preimages eventually timing out from ChannelMonitors to prevent us from + // doing so forever. + + let claim_found = + channel_manager.claimable_payments.lock().unwrap().begin_claiming_payment( + payment_hash, + &channel_manager.node_signer, + &channel_manager.logger, + &channel_manager.inbound_payment_id_secret, + true, + ); + if claim_found.is_err() { + let mut claimable_payments = + channel_manager.claimable_payments.lock().unwrap(); + match claimable_payments.pending_claiming_payments.entry(payment_hash) { + hash_map::Entry::Occupied(_) => { + debug_assert!(false, "Entry was added in begin_claiming_payment"); + return Err(DecodeError::InvalidValue); + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(payment_claim.claiming_payment); + }, } + } - for part in payment_claim.mpp_parts.iter() { - let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| { + for part in payment_claim.mpp_parts.iter() { + let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| { + ( + part.counterparty_node_id, + part.channel_id, + PendingMPPClaimPointer(Arc::clone(&ptr)), + ) + }); + let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr| { + RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { + pending_claim: PendingMPPClaimPointer(Arc::clone(&ptr)), + } + }); + // Note that we don't need to pass the `payment_info` here - its + // already (clearly) durably on disk in the `ChannelMonitor` so there's + // no need to worry about getting it into others. + // + // We don't encode any attribution data, because the required onion shared secret isn't + // available here. + channel_manager.claim_mpp_part( + part.into(), + payment_preimage, + None, + None, + |_, _| { ( - part.counterparty_node_id, - part.channel_id, - PendingMPPClaimPointer(Arc::clone(&ptr)), + Some(MonitorUpdateCompletionAction::PaymentClaimed { + payment_hash, + pending_mpp_claim, + }), + pending_claim_ptr, ) - }); - let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr| { - RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { - pending_claim: PendingMPPClaimPointer(Arc::clone(&ptr)), - } - }); - // Note that we don't need to pass the `payment_info` here - its - // already (clearly) durably on disk in the `ChannelMonitor` so there's - // no need to worry about getting it into others. - // - // We don't encode any attribution data, because the required onion shared secret isn't - // available here. - channel_manager.claim_mpp_part( - part.into(), - payment_preimage, - None, - None, - |_, _| { - ( - Some(MonitorUpdateCompletionAction::PaymentClaimed { - payment_hash, - pending_mpp_claim, - }), - pending_claim_ptr, - ) - }, - ); - } - processed_claims.insert(payment_claim.mpp_parts); + }, + ); } - } else { - let per_peer_state = channel_manager.per_peer_state.read().unwrap(); - let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap(); - let payment = claimable_payments.claimable_payments.remove(&payment_hash); - mem::drop(claimable_payments); - if let Some(payment) = payment { - log_info!(channel_manager.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash); - let mut claimable_amt_msat = 0; - let mut receiver_node_id = Some(our_network_pubkey); - let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret; - if phantom_shared_secret.is_some() { - let phantom_pubkey = channel_manager - .node_signer - .get_node_id(Recipient::PhantomNode) - .expect("Failed to get node_id for phantom node recipient"); - receiver_node_id = Some(phantom_pubkey) - } - for claimable_htlc in &payment.htlcs { - claimable_amt_msat += claimable_htlc.value; - - // Add a holding-cell claim of the payment to the Channel, which should be - // applied ~immediately on peer reconnection. Because it won't generate a - // new commitment transaction we can just provide the payment preimage to - // the corresponding ChannelMonitor and nothing else. - // - // We do so directly instead of via the normal ChannelMonitor update - // procedure as the ChainMonitor hasn't yet been initialized, implying - // we're not allowed to call it directly yet. Further, we do the update - // without incrementing the ChannelMonitor update ID as there isn't any - // reason to. - // If we were to generate a new ChannelMonitor update ID here and then - // crash before the user finishes block connect we'd end up force-closing - // this channel as well. On the flip side, there's no harm in restarting - // without the new monitor persisted - we'll end up right back here on - // restart. - let previous_channel_id = claimable_htlc.prev_hop.channel_id; - let peer_node_id = monitor.get_counterparty_node_id(); - { - let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap(); - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - if let Some(channel) = peer_state - .channel_by_id - .get_mut(&previous_channel_id) - .and_then(Channel::as_funded_mut) - { - let logger = WithChannelContext::from( - &channel_manager.logger, - &channel.context, - Some(payment_hash), - ); - channel - .claim_htlc_while_disconnected_dropping_mon_update_legacy( - claimable_htlc.prev_hop.htlc_id, - payment_preimage, - &&logger, - ); - } - } - if let Some(previous_hop_monitor) = - args.channel_monitors.get(&claimable_htlc.prev_hop.channel_id) + processed_claims.insert(payment_claim.mpp_parts); + } + } else { + let per_peer_state = channel_manager.per_peer_state.read().unwrap(); + let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap(); + let payment = claimable_payments.claimable_payments.remove(&payment_hash); + mem::drop(claimable_payments); + if let Some(payment) = payment { + log_info!(channel_manager.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash); + let mut claimable_amt_msat = 0; + let mut receiver_node_id = Some(our_network_pubkey); + let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret; + if phantom_shared_secret.is_some() { + let phantom_pubkey = channel_manager + .node_signer + .get_node_id(Recipient::PhantomNode) + .expect("Failed to get node_id for phantom node recipient"); + receiver_node_id = Some(phantom_pubkey) + } + for claimable_htlc in &payment.htlcs { + claimable_amt_msat += claimable_htlc.value; + + // Add a holding-cell claim of the payment to the Channel, which should be + // applied ~immediately on peer reconnection. Because it won't generate a + // new commitment transaction we can just provide the payment preimage to + // the corresponding ChannelMonitor and nothing else. + // + // We do so directly instead of via the normal ChannelMonitor update + // procedure as the ChainMonitor hasn't yet been initialized, implying + // we're not allowed to call it directly yet. Further, we do the update + // without incrementing the ChannelMonitor update ID as there isn't any + // reason to. + // If we were to generate a new ChannelMonitor update ID here and then + // crash before the user finishes block connect we'd end up force-closing + // this channel as well. On the flip side, there's no harm in restarting + // without the new monitor persisted - we'll end up right back here on + // restart. + let previous_channel_id = claimable_htlc.prev_hop.channel_id; + { + let peer_state_mutex = + per_peer_state.get(&counterparty_node_id).unwrap(); + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let Some(channel) = peer_state + .channel_by_id + .get_mut(&previous_channel_id) + .and_then(Channel::as_funded_mut) { - // Note that this is unsafe as we no longer require the - // `ChannelMonitor`s to be re-persisted prior to this - // `ChannelManager` being persisted after we get started running. - // If this `ChannelManager` gets persisted first then we crash, we - // won't have the `claimable_payments` entry we need to re-enter - // this code block, causing us to not re-apply the preimage to this - // `ChannelMonitor`. - // - // We should never be here with modern payment claims, however, as - // they should always include the HTLC list. Instead, this is only - // for nodes during upgrade, and we explicitly require the old - // persistence semantics on upgrade in the release notes. - previous_hop_monitor.provide_payment_preimage_unsafe_legacy( - &payment_hash, - &payment_preimage, - &channel_manager.tx_broadcaster, - &channel_manager.fee_estimator, + let logger = WithChannelContext::from( &channel_manager.logger, + &channel.context, + Some(payment_hash), + ); + channel.claim_htlc_while_disconnected_dropping_mon_update_legacy( + claimable_htlc.prev_hop.htlc_id, + payment_preimage, + &&logger, ); } } - let mut pending_events = channel_manager.pending_events.lock().unwrap(); - let payment_id = - payment.inbound_payment_id(&inbound_payment_id_secret.unwrap()); - let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(); - let sender_intended_total_msat = payment.onion_fields.total_mpp_amount_msat; - pending_events.push_back(( - events::Event::PaymentClaimed { - receiver_node_id, - payment_hash, - purpose: payment.purpose, - amount_msat: claimable_amt_msat, - htlcs, - sender_intended_total_msat: Some(sender_intended_total_msat), - onion_fields: Some(payment.onion_fields), - payment_id: Some(payment_id), - }, - // Note that we don't bother adding a EventCompletionAction here to - // ensure the `PaymentClaimed` event is durable processed as this - // should only be hit for particularly old channels and we don't have - // enough information to generate such an action. - None, - )); + if let Some(previous_hop_monitor) = + args.channel_monitors.get(&claimable_htlc.prev_hop.channel_id) + { + // Note that this is unsafe as we no longer require the + // `ChannelMonitor`s to be re-persisted prior to this + // `ChannelManager` being persisted after we get started running. + // If this `ChannelManager` gets persisted first then we crash, we + // won't have the `claimable_payments` entry we need to re-enter + // this code block, causing us to not re-apply the preimage to this + // `ChannelMonitor`. + // + // We should never be here with modern payment claims, however, as + // they should always include the HTLC list. Instead, this is only + // for nodes during upgrade, and we explicitly require the old + // persistence semantics on upgrade in the release notes. + previous_hop_monitor.provide_payment_preimage_unsafe_legacy( + &payment_hash, + &payment_preimage, + &channel_manager.tx_broadcaster, + &channel_manager.fee_estimator, + &channel_manager.logger, + ); + } } + let mut pending_events = channel_manager.pending_events.lock().unwrap(); + let payment_id = + payment.inbound_payment_id(&inbound_payment_id_secret.unwrap()); + let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(); + let sender_intended_total_msat = payment.onion_fields.total_mpp_amount_msat; + pending_events.push_back(( + events::Event::PaymentClaimed { + receiver_node_id, + payment_hash, + purpose: payment.purpose, + amount_msat: claimable_amt_msat, + htlcs, + sender_intended_total_msat: Some(sender_intended_total_msat), + onion_fields: Some(payment.onion_fields), + payment_id: Some(payment_id), + }, + // Note that we don't bother adding a EventCompletionAction here to + // ensure the `PaymentClaimed` event is durable processed as this + // should only be hit for particularly old channels and we don't have + // enough information to generate such an action. + None, + )); } } } From 4d316b2ea3f4b74258772ff42301892c3284f03e Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 4 Mar 2026 15:32:45 -0500 Subject: [PATCH 10/12] Avoid redundant closed channel preimage updates Previously, if when we restart with the ChannelManager behind a ChannelMonitor, and that monitor has already claimed an HTLC but the manager thinks the HTLC is yet-to-be-claimed, we will generate a redundant preimage monitor update. That was fine since the manager being behind the monitor is rare, but in upcoming commits we are moving towards not persisting the ChannelManager's pending_claiming_payments map, and instead rebuilding its contents from the monitors. As a consequence of these changes, our existing safeguard that prevents generating redundant preimage monitor updates in the case that the manager is *not* behind the monitor will no longer work, because we can no longer check the pending_claiming_payments map to short circuit the logic that generates the (possibly redundant) preimage monitor updates. So here we implement more rigorous checks to see whether a preimage update for a closed channel is redundant or not before generating it, which prevents the current manager-behind cases as well as upcoming cases when we start reconstructing the pending_claiming_payments map. --- lightning/src/ln/channelmanager.rs | 85 +++++++++++++++++++++--------- lightning/src/ln/monitor_tests.rs | 7 ++- lightning/src/ln/reload_tests.rs | 21 +++++--- 3 files changed, 78 insertions(+), 35 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 562fa62c0ca..902bd618e8c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9506,7 +9506,43 @@ impl< } } + // Below, we always queue up the monitor update completion action because we don't have any + // idea if it's duplicative. This may result in a duplicate `Event`, but note that `Event`s are + // generally always allowed to be duplicative (and it's specifically noted in + // `PaymentForwarded`). + let (action_opt, raa_blocker_opt) = completion_action(None, false); + + let needs_post_close_monitor_update = + raa_blocker_opt.as_ref().map_or(true, |raa_blocker| match raa_blocker { + RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim } => { + // If this monitor already has the preimage, there's no need to generate a redundant update. + let claim = pending_claim.0.lock().unwrap(); + claim + .channels_without_preimage + .contains(&(prev_hop.counterparty_node_id, chan_id)) + }, + RAAMonitorUpdateBlockingAction::ForwardedPaymentInboundClaim { .. } => true, + }); + let peer_state = &mut *peer_state_lock; + if let Some(raa_blocker) = raa_blocker_opt { + peer_state + .actions_blocking_raa_monitor_updates + .entry(prev_hop.channel_id) + .or_default() + .push(raa_blocker); + } + + if !needs_post_close_monitor_update { + // If there's no need for a monitor update, just run the (possibly duplicative) completion + // action. + if let Some(action) = action_opt { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(core::iter::once(action)); + return; + } + } let update_id = if let Some(latest_update_id) = peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) @@ -9530,21 +9566,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ channel_id: Some(prev_hop.channel_id), }; - // We don't have any idea if this is a duplicate claim without interrogating the - // `ChannelMonitor`, so we just always queue up the completion action after the - // `ChannelMonitorUpdate` we're about to generate. This may result in a duplicate `Event`, - // but note that `Event`s are generally always allowed to be duplicative (and it's - // specifically noted in `PaymentForwarded`). - let (action_opt, raa_blocker_opt) = completion_action(None, false); - - if let Some(raa_blocker) = raa_blocker_opt { - peer_state - .actions_blocking_raa_monitor_updates - .entry(prev_hop.channel_id) - .or_default() - .push(raa_blocker); - } - // Given the fact that we're in a bit of a weird edge case, its worth hashing the preimage // to include the `payment_hash` in the log metadata here. let payment_hash = payment_preimage.into(); @@ -19820,6 +19841,15 @@ impl< ) }); + // Build the set of channels where the preimage is durably persisted, for use below + let mut channels_with_durable_preimage: HashSet<(ChannelId, PaymentHash)> = new_hash_set(); + for (channel_id, monitor) in args.channel_monitors.iter() { + for (payment_hash, (_, claims)) in monitor.get_stored_preimages() { + if !claims.is_empty() { + channels_with_durable_preimage.insert((*channel_id, payment_hash)); + } + } + } for (channel_id, counterparty_node_id, payment_hash, payment_preimage, payment_claims) in monitor_preimages { @@ -19890,20 +19920,27 @@ impl< } } - let mut channels_without_preimage = payment_claim - .mpp_parts - .iter() - .map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.channel_id)) - .collect::>(); + let mut channels_with_preimage = Vec::new(); + let mut channels_without_preimage = Vec::new(); + for htlc_info in payment_claim.mpp_parts.iter() { + if channels_with_durable_preimage + .contains(&(htlc_info.channel_id, payment_hash)) + { + channels_with_preimage + .push((htlc_info.counterparty_node_id, htlc_info.channel_id)); + } else { + channels_without_preimage + .push((htlc_info.counterparty_node_id, htlc_info.channel_id)); + } + } + // If we have multiple MPP parts which were received over the same channel, // we only track it once as once we get a preimage durably in the // `ChannelMonitor` it will be used for all HTLCs with a matching hash. channels_without_preimage.sort_unstable(); channels_without_preimage.dedup(); - let pending_claims = PendingMPPClaim { - channels_without_preimage, - channels_with_preimage: Vec::new(), - }; + let pending_claims = + PendingMPPClaim { channels_without_preimage, channels_with_preimage }; let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims))); // While it may be duplicative to generate a PaymentClaimed here, trying to diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 0527010102b..74255749be2 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -3397,10 +3397,9 @@ fn test_claim_event_never_handled() { check_added_monitors(&nodes[1], 0); expect_payment_claimed!(nodes[1], payment_hash_a, 1_000_000); - // The reload logic spuriously generates 2 redundant payment preimage-containing - // `ChannelMonitorUpdate`s, plus we get a monitor update once the PaymentClaimed event is - // processed. - check_added_monitors(&nodes[1], 3); + // One monitor update for the outdated channel force-closure, one for the PaymentClaimed event + // being handled + check_added_monitors(&nodes[1], 2); } fn do_test_lost_preimage_monitor_events(on_counterparty_tx: bool, p2a_anchor: bool) { diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 264720b16f7..e8db75c17a8 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -853,14 +853,12 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest if persist_both_monitors { if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); } if let Event::PaymentClaimed { amount_msat: 15_000_000, .. } = events[3] { } else { panic!(); } - // 4 monitors for preimage updates + 1 for InboundPaymentClaimed marking the payment as - // claimed in the closed channel's monitor. - check_added_monitors(&nodes[3], 5); + // One update per channel closure + an update for PaymentClaimed being processed + check_added_monitors(&nodes[3], 3); } else { if let Event::PaymentClaimed { amount_msat: 15_000_000, .. } = events[2] { } else { panic!(); } - // Only one channel closed; the durable_preimage_channel is the live one, so no extra - // InboundPaymentClaimed update is generated. - check_added_monitors(&nodes[3], 3); + // One update for channel closure, one for preimage replay to non-persisted monitor + check_added_monitors(&nodes[3], 2); } // Now that we've processed background events, the preimage should have been copied into the @@ -929,10 +927,19 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest } #[test] -fn test_partial_claim_before_restart() { +fn test_partial_claim_before_restart_a() { do_test_partial_claim_before_restart(false, false); +} +#[test] +fn test_partial_claim_before_restart_b() { do_test_partial_claim_before_restart(false, true); +} +#[test] +fn test_partial_claim_before_restart_c() { do_test_partial_claim_before_restart(true, false); +} +#[test] +fn test_partial_claim_before_restart_d() { do_test_partial_claim_before_restart(true, true); } From 1cb6e766fe7dcfcc27554a4e5b72a7bfdd215f75 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 4 Mar 2026 16:15:01 -0500 Subject: [PATCH 11/12] Reconstruct pending_claiming_payments from monitor data We are working on reducing reliance on regular persistence of the ChannelManager, and instead reconstructing (at least parts of) it using ChannelMonitor data on startup. As part of this, here we stop persisting the manager's pending_claiming_payments map, and reconstruct it from ChannelMonitor data. We already had most of the logic to do this that was in place for cases where the manager has fallen behind the monitors. The only missing piece was checking in-flight monitor updates in case they have preimages that tell us we're mid-claim. --- lightning/src/ln/channelmanager.rs | 35 ++++- lightning/src/ln/reload_tests.rs | 215 +++++++++++++++++++++++++++++ 2 files changed, 248 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 902bd618e8c..411c2764832 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18499,6 +18499,33 @@ impl< is_connected: false, }; + // Extract preimage data from in_flight_monitor_updates before it's consumed by the loop below. + // We need this for reconstructing pending_claiming_payments claims on restart. + let in_flight_preimages: Vec<_> = in_flight_monitor_updates + .iter() + .flat_map(|((counterparty_id, channel_id), updates)| { + updates.iter().flat_map(move |update| { + update.updates.iter().filter_map(move |step| { + if let ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage, + payment_info: Some(details), + } = step + { + Some(( + *channel_id, + *counterparty_id, + (*payment_preimage).into(), + *payment_preimage, + vec![details.clone()], + )) + } else { + None + } + }) + }) + }) + .collect(); + const MAX_ALLOC_SIZE: usize = 1024 * 64; let mut failed_htlcs = Vec::new(); let channel_count = channels.len(); @@ -19778,7 +19805,7 @@ impl< decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, - pending_claiming_payments: pending_claiming_payments_legacy, + pending_claiming_payments: new_hash_map(), }), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), short_to_chan_info: FairRwLock::new(short_to_chan_info), @@ -19850,8 +19877,12 @@ impl< } } } + + // Because we are rebuilding `ClaimablePayments::pending_claiming_payments` here, we need to + // iterate over all the preimages in all the monitors as well as the preimages in in-flight + // monitor updates to get a complete picture of which channels/payments are mid-claim. for (channel_id, counterparty_node_id, payment_hash, payment_preimage, payment_claims) in - monitor_preimages + monitor_preimages.chain(in_flight_preimages.into_iter()) { // If we have unresolved inbound committed HTLCs that were already forwarded to the // outbound edge and removed via claim, we need to make sure to claim them backwards via diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index e8db75c17a8..54531c23afa 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -18,6 +18,7 @@ use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::sign::EntropySource; use crate::chain::transaction::OutPoint; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; +use crate::ln::chan_utils::HTLCClaim; use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder}; use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::msgs; @@ -2247,3 +2248,217 @@ fn test_reload_with_mpp_claims_on_same_channel() { // nodes[0] should now have received both fulfills and generate PaymentSent. expect_payment_sent(&nodes[0], payment_preimage, None, true, true); } + +#[test] +fn test_reload_with_in_flight_preimage_claim() { + do_test_reload_with_in_flight_preimage_claim(false); + do_test_reload_with_in_flight_preimage_claim(true); +} + +fn do_test_reload_with_in_flight_preimage_claim(close_channel: bool) { + // Test that if a node receives a payment and calls `claim_funds`, but the + // `ChannelMonitorUpdate` containing the preimage is still in-flight (not yet persisted), + // then after a restart the payment claim completes correctly using the preimage from the + // in-flight monitor update. + // + // If close_channel is set, the channel is force-closed before reload to test that in-flight + // monitor updates are preserved across channel closure. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let persister_2; + let new_chain_monitor_2; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes_1_deserialized; + let nodes_1_deserialized_2; + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; + + // Send a payment from nodes[0] to nodes[1]. + let amt_msat = 1_000_000; + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], amt_msat); + send_along_route_with_secret( + &nodes[0], route, &[&[&nodes[1]]], amt_msat, payment_hash, payment_secret, + ); + + // Serialize the monitor before claiming so it doesn't have the preimage update. + let mon_serialized_pre_claim = get_monitor!(nodes[1], chan_id).encode(); + + // Set the persister to return InProgress so the preimage monitor update will be stored as + // in-flight. + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + + nodes[1].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[1], 1); + + // The PaymentClaimed event is held back until the monitor update completes. + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Disconnect peers before reload/close. + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + let (commitment_tx, coinbase_tx) = if close_channel { + // Provide anchor reserves for fee bumping (anchors are enabled by default). + let coinbase_tx = provide_anchor_reserves(&nodes); + + // Force close the channel - the in-flight preimage update should be preserved + nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &node_0_id, "test".to_string()).unwrap(); + check_closed_broadcast(&nodes[1], 1, false); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message: "test".to_string() }; + check_closed_event(&nodes[1], 1, reason, &[node_0_id], 100_000); + // Handle the bump event to broadcast the commitment tx (anchors are enabled by default). + handle_bump_close_event(&nodes[1]); + let txn = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 1); + (Some(txn.into_iter().next().unwrap()), Some(coinbase_tx)) + } else { + (None, None) + }; + + // Serialize the ChannelManager containing the in-flight preimage monitor update. + let node_1_serialized = nodes[1].node.encode(); + + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_serialized_pre_claim], + persister, + new_chain_monitor, + nodes_1_deserialized + ); + + // The PaymentClaimed event should be regenerated from the in-flight update. + expect_payment_claimed!(nodes[1], payment_hash, amt_msat); + + if close_channel { + check_added_monitors(&nodes[1], 4); + { + let monitor_updates = nodes[1].chain_monitor.monitor_updates.lock().unwrap(); + let updates = monitor_updates.get(&chan_id).unwrap(); + for (i, update) in updates.iter().rev().take(4).enumerate() { + match i { + 0 => { + // The latest update should be because we processed PaymentClaimed on a closed channel. + assert_eq!(update.updates.len(), 1); + assert!(matches!(update.updates[0], ChannelMonitorUpdateStep::InboundPaymentClaimed { .. })); + }, + 1 => { + // Because pre-reload our preimage update was in-flight, we will still generate a + // redundant one on startup + assert_eq!(update.updates.len(), 1); + assert!(matches!(update.updates[0], ChannelMonitorUpdateStep::PaymentPreimage { payment_info: None, .. })) + }, + 2 => { + // The force close update + assert_eq!(update.updates.len(), 1); + assert!(matches!(update.updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true })); + }, + 3 => { + // The original in-flight claim with full payment info and counterparty commitment + assert_eq!(update.updates.len(), 2); + assert!(matches!(update.updates[0], ChannelMonitorUpdateStep::PaymentPreimage { payment_info: Some(_), .. })); + assert!(matches!(update.updates[1], ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. })); + }, + _ => panic!("Unexpected update index"), + } + } + } + } else { + check_added_monitors(&nodes[1], 1); + { + let monitor_updates = nodes[1].chain_monitor.monitor_updates.lock().unwrap(); + let updates = monitor_updates.get(&chan_id).unwrap(); + let update = updates.last().unwrap(); + assert_eq!(update.updates.len(), 2); + assert!(matches!(update.updates[0], ChannelMonitorUpdateStep::PaymentPreimage { payment_info: Some(_), .. })); + assert!(matches!(update.updates[1], ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. })); + } + } + + // Verify the monitor now has the preimage (the in-flight update was applied during reload). + assert!( + get_monitor!(nodes[1], chan_id).test_get_all_stored_preimages().contains_key(&payment_hash), + "Monitor should have preimage after in-flight update replay" + ); + + // Second reload to test for redundant PaymentClaimed events. + let node_1_serialized_2 = nodes[1].node.encode(); + let mon_serialized_2 = get_monitor!(nodes[1], chan_id).encode(); + + reload_node!( + nodes[1], + node_1_serialized_2, + &[&mon_serialized_2], + persister_2, + new_chain_monitor_2, + nodes_1_deserialized_2 + ); + + // The second reload should not replay any monitor updates (they were already applied). + check_added_monitors(&nodes[1], 0); + + if !close_channel { + // If the channel is still open, there will be a redundant PaymentClaimed event generated each + // restart until the HTLC is removed. + expect_payment_claimed!(nodes[1], payment_hash, amt_msat); + + // Complete the payment. Use pending_htlc_claims instead of pending_cell_htlc_claims + // because the latter expects a monitor update, but the claim is already in the monitor. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_htlc_claims = (0, 1); + reconnect_nodes(reconnect_args); + + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); + } else { + // No redundant PaymentClaimed event. + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Mine the commitment tx on both nodes so nodes[0] sees the channel is closed. + let commitment_tx = commitment_tx.unwrap(); + let coinbase_tx = coinbase_tx.unwrap(); + mine_transaction(&nodes[0], &commitment_tx); + mine_transaction(&nodes[1], &commitment_tx); + + // Peers are disconnected, so no error message is sent. + check_closed_broadcast(&nodes[0], 1, false); + check_added_monitors(&nodes[0], 1); + check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[node_1_id], 100_000); + + // nodes[1] broadcasts HTLC claim tx with the preimage. + // We get 2 BumpTransaction events: ChannelClose (for anchor) and HTLCResolution. + let events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert!(events.len() <= 2); + for event in events { + if let Event::BumpTransaction(bump) = event { + nodes[1].bump_tx_handler.handle_event(&bump); + } else { + panic!("Unexpected event: {:?}", event); + } + } + // Filter for HTLC claim tx by checking for preimage in the witness. + let htlc_claim_txn: Vec<_> = nodes[1] + .tx_broadcaster + .txn_broadcast() + .into_iter() + .filter(|tx| { + tx.input.iter().any(|inp| { + matches!(HTLCClaim::from_witness(&inp.witness), Some(HTLCClaim::AcceptedPreimage)) + }) + }) + .collect(); + assert_eq!(htlc_claim_txn.len(), 1); + check_spends!(htlc_claim_txn[0], commitment_tx, coinbase_tx); + + // Mine the HTLC claim on nodes[0] - it learns the preimage and generates PaymentSent. + mine_transaction(&nodes[0], &htlc_claim_txn[0]); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); + } +} From f9e4f7c480ea8a6c8e6df66248e3cb9c33b97416 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 4 Mar 2026 16:15:47 -0500 Subject: [PATCH 12/12] Remove legacy ChannelManagerData::pending_claiming_payments Cleanup from previous commit --- lightning/src/ln/channelmanager.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 411c2764832..c633e90c293 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17842,7 +17842,6 @@ pub(super) struct ChannelManagerData { pending_events_read: VecDeque<(events::Event, Option)>, highest_seen_timestamp: u32, pending_outbound_payments: HashMap, - pending_claiming_payments_legacy: HashMap, received_network_pubkey: Option, monitor_update_blocked_actions_per_peer: Vec<(PublicKey, BTreeMap>)>, @@ -18028,7 +18027,9 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> let mut amountless_claimable_htlc_onion_fields_legacy: Option< Vec>, > = None; - let mut pending_claiming_payments_legacy = Some(new_hash_map()); + // As of 0.4 we reconstruct this map using `ChannelMonitor` data on read. + let mut _pending_claiming_payments_legacy: Option> = + None; let mut monitor_update_blocked_actions_per_peer: Option>)>> = None; let mut events_override = None; @@ -18047,7 +18048,7 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> (1, pending_outbound_payments_no_retry, option), (2, pending_intercepted_htlcs_legacy, option), (3, pending_outbound_payments, option), - (4, pending_claiming_payments_legacy, option), + (4, _pending_claiming_payments_legacy, option), (5, received_network_pubkey, option), (6, monitor_update_blocked_actions_per_peer, option), (7, fake_scid_rand_bytes, option), @@ -18168,8 +18169,6 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> pending_intercepted_htlcs_legacy: pending_intercepted_htlcs_legacy .unwrap_or_else(new_hash_map), pending_outbound_payments, - pending_claiming_payments_legacy: pending_claiming_payments_legacy - .unwrap_or_else(new_hash_map), received_network_pubkey, monitor_update_blocked_actions_per_peer: monitor_update_blocked_actions_per_peer .unwrap_or_else(Vec::new), @@ -18473,7 +18472,6 @@ impl< highest_seen_timestamp, mut pending_intercepted_htlcs_legacy, pending_outbound_payments, - pending_claiming_payments_legacy, received_network_pubkey, monitor_update_blocked_actions_per_peer, mut fake_scid_rand_bytes,