Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2104,6 +2104,33 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
self.inner.lock().unwrap().get_latest_update_id()
}

/// Forces the monitor's `latest_update_id` to the given value.
///
/// This is used to resync a stale monitor with the `ChannelManager` when accepting stale
/// channel monitors on startup (see [`ChannelManagerReadArgs::accept_stale_channel_monitors`]).
///
/// # Safety
///
/// This skips the normal sequential `update_id` validation. Only use when deliberately
/// accepting a stale monitor, e.g. after a migration overwrote a newer monitor with older data.
/// The monitor's commitment state will remain stale until the next real channel update
/// (e.g. triggered by a fee update round-trip).
///
/// [`ChannelManagerReadArgs::accept_stale_channel_monitors`]: crate::ln::channelmanager::ChannelManagerReadArgs::accept_stale_channel_monitors
pub fn force_set_latest_update_id(&self, update_id: u64) {
let mut inner = self.inner.lock().unwrap();
inner.latest_update_id = update_id;
// Reset the counterparty commitment secrets store so that new secrets
// from healing round-trips can build a fresh, consistent derivation tree.
// The stale secrets would fail cross-tree validation against new secrets
// (the old and new indices occupy different subtrees in the BOLT-3 tree).
// Losing the old secrets means we cannot punish revoked commitments from
// before the stale point, but the monitor lacks commitment transaction data
// for the gap period regardless — the stale secrets provide no actionable
// security benefit.
inner.commitment_secrets = CounterpartyCommitmentSecrets::new();
}

/// Gets the funding transaction outpoint of the channel this ChannelMonitor is monitoring for.
pub fn get_funding_txo(&self) -> OutPoint {
self.inner.lock().unwrap().get_funding_txo()
Expand Down
3 changes: 3 additions & 0 deletions lightning/src/ln/chan_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -468,6 +468,9 @@ impl CounterpartyCommitmentSecrets {
let pos = Self::place_secret(idx);
for i in 0..pos {
let (old_secret, old_idx) = self.old_secrets[i as usize];
if old_idx == 1 << 48 {
continue; // Uninitialized slot — no real secret to validate against
}
if Self::derive_secret(secret, pos, old_idx) != old_secret {
return Err(());
}
Expand Down
52 changes: 38 additions & 14 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16628,6 +16628,19 @@ pub struct ChannelManagerReadArgs<
/// This is not exported to bindings users because we have no HashMap bindings
pub channel_monitors:
HashMap<ChannelId, &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,

/// If set to `true`, stale channel monitors will be accepted on startup instead of returning
/// [`DecodeError::DangerousValue`]. When a stale monitor is detected, its `update_id` will be
/// force-synced to match the `ChannelManager`'s expected value. The monitor's commitment state
/// remains stale until the next real channel update (e.g. a fee update round-trip).
///
/// Use this for recovery after monitor data was overwritten by a migration or backup restore.
/// The caller should trigger a commitment round-trip after startup (e.g. via `update_fee`)
/// to heal the monitor's commitment state and recover revocation secrets via the derivation
/// tree.
///
/// Default: `false`.
pub accept_stale_channel_monitors: bool,
}

impl<
Expand Down Expand Up @@ -16676,6 +16689,7 @@ where
channel_monitors: hash_map_from_iter(
channel_monitors.drain(..).map(|monitor| (monitor.channel_id(), monitor)),
),
accept_stale_channel_monitors: false,
}
}
}
Expand Down Expand Up @@ -17351,20 +17365,30 @@ where
if funded_chan.get_latest_unblocked_monitor_update_id()
> max_in_flight_update_id
{
// If the channel is ahead of the monitor, return DangerousValue:
log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
chan_id, monitor.get_latest_update_id(), max_in_flight_update_id);
log_error!(
logger,
" but the ChannelManager is at update_id {}.",
funded_chan.get_latest_unblocked_monitor_update_id()
);
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
return Err(DecodeError::DangerousValue);
if args.accept_stale_channel_monitors {
let target_id = funded_chan.get_latest_unblocked_monitor_update_id();
log_warn!(logger,
"Accepting stale ChannelMonitor for channel {}: monitor at update_id {} \
but ChannelManager at {}. Forcing update_id sync. Monitor state will \
self-heal on next channel update.",
chan_id, monitor.get_latest_update_id(), target_id
);
monitor.force_set_latest_update_id(target_id);
} else {
log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
chan_id, monitor.get_latest_update_id(), max_in_flight_update_id);
log_error!(
logger,
" but the ChannelManager is at update_id {}.",
funded_chan.get_latest_unblocked_monitor_update_id()
);
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
return Err(DecodeError::DangerousValue);
}
}
} else {
// We shouldn't have persisted (or read) any unfunded channel types so none should have been
Expand Down
2 changes: 2 additions & 0 deletions lightning/src/ln/functional_test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -908,6 +908,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
tx_broadcaster: &broadcaster,
logger: &self.logger,
channel_monitors,
accept_stale_channel_monitors: false,
},
)
.unwrap();
Expand Down Expand Up @@ -1361,6 +1362,7 @@ pub fn _reload_node<'a, 'b, 'c>(
tx_broadcaster: node.tx_broadcaster,
logger: node.logger,
channel_monitors,
accept_stale_channel_monitors: false,
},
)
.unwrap()
Expand Down
2 changes: 2 additions & 0 deletions lightning/src/ln/reload_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -437,6 +437,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
tx_broadcaster: nodes[0].tx_broadcaster,
logger: &logger,
channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(),
accept_stale_channel_monitors: false,
}) { } else {
panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return");
};
Expand All @@ -455,6 +456,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
tx_broadcaster: nodes[0].tx_broadcaster,
logger: &logger,
channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(),
accept_stale_channel_monitors: false,
}).unwrap();
nodes_0_deserialized = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());
Expand Down