diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index e52b05036ec..5f1280db0c7 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -2104,6 +2104,33 @@ impl ChannelMonitor { self.inner.lock().unwrap().get_latest_update_id() } + /// Forces the monitor's `latest_update_id` to the given value. + /// + /// This is used to resync a stale monitor with the `ChannelManager` when accepting stale + /// channel monitors on startup (see [`ChannelManagerReadArgs::accept_stale_channel_monitors`]). + /// + /// # Safety + /// + /// This skips the normal sequential `update_id` validation. Only use when deliberately + /// accepting a stale monitor, e.g. after a migration overwrote a newer monitor with older data. + /// The monitor's commitment state will remain stale until the next real channel update + /// (e.g. triggered by a fee update round-trip). + /// + /// [`ChannelManagerReadArgs::accept_stale_channel_monitors`]: crate::ln::channelmanager::ChannelManagerReadArgs::accept_stale_channel_monitors + pub fn force_set_latest_update_id(&self, update_id: u64) { + let mut inner = self.inner.lock().unwrap(); + inner.latest_update_id = update_id; + // Reset the counterparty commitment secrets store so that new secrets + // from healing round-trips can build a fresh, consistent derivation tree. + // The stale secrets would fail cross-tree validation against new secrets + // (the old and new indices occupy different subtrees in the BOLT-3 tree). + // Losing the old secrets means we cannot punish revoked commitments from + // before the stale point, but the monitor lacks commitment transaction data + // for the gap period regardless — the stale secrets provide no actionable + // security benefit. + inner.commitment_secrets = CounterpartyCommitmentSecrets::new(); + } + /// Gets the funding transaction outpoint of the channel this ChannelMonitor is monitoring for. pub fn get_funding_txo(&self) -> OutPoint { self.inner.lock().unwrap().get_funding_txo() diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index 13b39f6cabd..7e09c7794fe 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -468,6 +468,9 @@ impl CounterpartyCommitmentSecrets { let pos = Self::place_secret(idx); for i in 0..pos { let (old_secret, old_idx) = self.old_secrets[i as usize]; + if old_idx == 1 << 48 { + continue; // Uninitialized slot — no real secret to validate against + } if Self::derive_secret(secret, pos, old_idx) != old_secret { return Err(()); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 816eaee8db2..9d424ee6d06 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -16628,6 +16628,19 @@ pub struct ChannelManagerReadArgs< /// This is not exported to bindings users because we have no HashMap bindings pub channel_monitors: HashMap::EcdsaSigner>>, + + /// If set to `true`, stale channel monitors will be accepted on startup instead of returning + /// [`DecodeError::DangerousValue`]. When a stale monitor is detected, its `update_id` will be + /// force-synced to match the `ChannelManager`'s expected value. The monitor's commitment state + /// remains stale until the next real channel update (e.g. a fee update round-trip). + /// + /// Use this for recovery after monitor data was overwritten by a migration or backup restore. + /// The caller should trigger a commitment round-trip after startup (e.g. via `update_fee`) + /// to heal the monitor's commitment state and recover revocation secrets via the derivation + /// tree. + /// + /// Default: `false`. + pub accept_stale_channel_monitors: bool, } impl< @@ -16676,6 +16689,7 @@ where channel_monitors: hash_map_from_iter( channel_monitors.drain(..).map(|monitor| (monitor.channel_id(), monitor)), ), + accept_stale_channel_monitors: false, } } } @@ -17351,20 +17365,30 @@ where if funded_chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id { - // If the channel is ahead of the monitor, return DangerousValue: - log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); - log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight", - chan_id, monitor.get_latest_update_id(), max_in_flight_update_id); - log_error!( - logger, - " but the ChannelManager is at update_id {}.", - funded_chan.get_latest_unblocked_monitor_update_id() - ); - log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); - log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); - log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); - log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); - return Err(DecodeError::DangerousValue); + if args.accept_stale_channel_monitors { + let target_id = funded_chan.get_latest_unblocked_monitor_update_id(); + log_warn!(logger, + "Accepting stale ChannelMonitor for channel {}: monitor at update_id {} \ + but ChannelManager at {}. Forcing update_id sync. Monitor state will \ + self-heal on next channel update.", + chan_id, monitor.get_latest_update_id(), target_id + ); + monitor.force_set_latest_update_id(target_id); + } else { + log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); + log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight", + chan_id, monitor.get_latest_update_id(), max_in_flight_update_id); + log_error!( + logger, + " but the ChannelManager is at update_id {}.", + funded_chan.get_latest_unblocked_monitor_update_id() + ); + log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); + log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); + log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); + log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); + return Err(DecodeError::DangerousValue); + } } } else { // We shouldn't have persisted (or read) any unfunded channel types so none should have been diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index bd4403fd3fe..1dcb7accb77 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -908,6 +908,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { tx_broadcaster: &broadcaster, logger: &self.logger, channel_monitors, + accept_stale_channel_monitors: false, }, ) .unwrap(); @@ -1361,6 +1362,7 @@ pub fn _reload_node<'a, 'b, 'c>( tx_broadcaster: node.tx_broadcaster, logger: node.logger, channel_monitors, + accept_stale_channel_monitors: false, }, ) .unwrap() diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 9e169d176e6..f3b4aff8112 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -437,6 +437,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), + accept_stale_channel_monitors: false, }) { } else { panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return"); }; @@ -455,6 +456,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), + accept_stale_channel_monitors: false, }).unwrap(); nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty());