Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,20 @@
# 0.2.2 - Feb 6, 2025 - "An Async Splicing Production"

## API Updates
* The `SplicePrototype` feature flag has been updated to refer to feature bit
63 - the same as `SpliceProduction`. This resolves a compatibility issue with
eclair nodes due to the use of the same splicing feature flag (155) they were
using for a pre-standardization version of splicing (#4387).

## Bug Fixes
* Async `ChannelMonitorUpdate` persistence operations which complete, but are
not marked as complete in a persisted `ChannelManager` prior to restart,
followed immediately by a block connection and then another restart could
result in some channel operations hanging leading for force-closures (#4377).
* A debug assertion failure reachable when receiving an invalid splicing
message from a peer was fixed (#4383).


# 0.2.1 - Jan 29, 2025 - "Electrum Confirmations Logged"

## API Updates
Expand Down
2 changes: 1 addition & 1 deletion lightning-types/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "lightning-types"
version = "0.3.0"
version = "0.3.1"
authors = ["Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning/"
Expand Down
28 changes: 8 additions & 20 deletions lightning-types/src/features.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,15 +166,15 @@ mod sealed {
// Byte 6
ZeroConf,
// Byte 7
Trampoline | SimpleClose | SpliceProduction,
Trampoline | SimpleClose | SpliceProduction | SplicePrototype,
// Byte 8 - 16
,,,,,,,,,
// Byte 17
AnchorZeroFeeCommitmentsStaging,
// Byte 18
,
// Byte 19
HtlcHold | SplicePrototype,
HtlcHold,
]
);
define_context!(
Expand All @@ -195,15 +195,15 @@ mod sealed {
// Byte 6
ZeroConf | Keysend,
// Byte 7
Trampoline | SimpleClose | SpliceProduction,
Trampoline | SimpleClose | SpliceProduction | SplicePrototype,
// Byte 8 - 16
,,,,,,,,,
// Byte 17
AnchorZeroFeeCommitmentsStaging,
// Byte 18
,
// Byte 19
HtlcHold | SplicePrototype,
HtlcHold,
// Byte 20 - 31
,,,,,,,,,,,,
// Byte 32
Expand Down Expand Up @@ -722,7 +722,7 @@ mod sealed {
requires_htlc_hold
);
define_feature!(
155, // Splice prototype feature bit as listed in https://github.com/lightning/bolts/issues/605#issuecomment-877237519.
63, // Actually the SpliceProduction feature
SplicePrototype,
[InitContext, NodeContext],
"Feature flags for channel splicing.",
Expand Down Expand Up @@ -1441,28 +1441,16 @@ mod tests {
// - onion_messages
// - option_channel_type | option_scid_alias
// - option_zeroconf
// - option_simple_close | option_splice
assert_eq!(node_features.flags.len(), 20);
// - option_simple_close
assert_eq!(node_features.flags.len(), 8);
assert_eq!(node_features.flags[0], 0b00000001);
assert_eq!(node_features.flags[1], 0b01010001);
assert_eq!(node_features.flags[2], 0b10001010);
assert_eq!(node_features.flags[3], 0b00001010);
assert_eq!(node_features.flags[4], 0b10001000);
assert_eq!(node_features.flags[5], 0b10100000);
assert_eq!(node_features.flags[6], 0b00001000);
assert_eq!(node_features.flags[7], 0b00100000);
assert_eq!(node_features.flags[8], 0b00000000);
assert_eq!(node_features.flags[9], 0b00000000);
assert_eq!(node_features.flags[10], 0b00000000);
assert_eq!(node_features.flags[11], 0b00000000);
assert_eq!(node_features.flags[12], 0b00000000);
assert_eq!(node_features.flags[13], 0b00000000);
assert_eq!(node_features.flags[14], 0b00000000);
assert_eq!(node_features.flags[15], 0b00000000);
assert_eq!(node_features.flags[16], 0b00000000);
assert_eq!(node_features.flags[17], 0b00000000);
assert_eq!(node_features.flags[18], 0b00000000);
assert_eq!(node_features.flags[19], 0b00001000);
assert_eq!(node_features.flags[7], 0b10100000);
}

// Check that cleared flags are kept blank when converting back:
Expand Down
2 changes: 1 addition & 1 deletion lightning/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "lightning"
version = "0.2.1"
version = "0.2.2"
authors = ["Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning/"
Expand Down
73 changes: 33 additions & 40 deletions lightning/src/ln/chanmon_update_fail_tests.rs

Large diffs are not rendered by default.

8 changes: 4 additions & 4 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2521,8 +2521,8 @@ impl FundingScope {
where
SP::Target: SignerProvider,
{
debug_assert!(our_funding_contribution.abs() <= SignedAmount::MAX_MONEY);
debug_assert!(their_funding_contribution.abs() <= SignedAmount::MAX_MONEY);
debug_assert!(our_funding_contribution.unsigned_abs() <= Amount::MAX_MONEY);
debug_assert!(their_funding_contribution.unsigned_abs() <= Amount::MAX_MONEY);

let post_channel_value = prev_funding.compute_post_splice_value(
our_funding_contribution.to_sat(),
Expand Down Expand Up @@ -12137,15 +12137,15 @@ where
fn validate_splice_contributions(
&self, our_funding_contribution: SignedAmount, their_funding_contribution: SignedAmount,
) -> Result<(), String> {
if our_funding_contribution.abs() > SignedAmount::MAX_MONEY {
if our_funding_contribution.unsigned_abs() > Amount::MAX_MONEY {
return Err(format!(
"Channel {} cannot be spliced; our {} contribution exceeds the total bitcoin supply",
self.context.channel_id(),
our_funding_contribution,
));
}

if their_funding_contribution.abs() > SignedAmount::MAX_MONEY {
if their_funding_contribution.unsigned_abs() > Amount::MAX_MONEY {
return Err(format!(
"Channel {} cannot be spliced; their {} contribution exceeds the total bitcoin supply",
self.context.channel_id(),
Expand Down
102 changes: 75 additions & 27 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1275,7 +1275,11 @@ enum BackgroundEvent {
/// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
/// them marked pending, thus we need to run any [`MonitorUpdateCompletionAction`] (s) pending
/// on a channel.
MonitorUpdatesComplete { counterparty_node_id: PublicKey, channel_id: ChannelId },
MonitorUpdatesComplete {
counterparty_node_id: PublicKey,
channel_id: ChannelId,
highest_update_id_completed: u64,
},
}

/// A pointer to a channel that is unblocked when an event is surfaced
Expand Down Expand Up @@ -8025,9 +8029,11 @@ where
/// Free the background events, generally called from [`PersistenceNotifierGuard`] constructors.
///
/// Expects the caller to have a total_consistency_lock read lock.
#[rustfmt::skip]
fn process_background_events(&self) -> NotifyOption {
debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread);
debug_assert_ne!(
self.total_consistency_lock.held_by_thread(),
LockHeldState::NotHeldByThread
);

self.background_events_processed_since_startup.store(true, Ordering::Release);

Expand All @@ -8039,11 +8045,34 @@ where

for event in background_events.drain(..) {
match event {
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id,
funding_txo,
channel_id,
update,
} => {
self.apply_post_close_monitor_update(
counterparty_node_id,
channel_id,
funding_txo,
update,
);
},
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
self.channel_monitor_updated(&channel_id, None, &counterparty_node_id);
BackgroundEvent::MonitorUpdatesComplete {
counterparty_node_id,
channel_id,
highest_update_id_completed,
} => {
// Now that we can finally handle the background event, remove all in-flight
// monitor updates for this channel that we've known to complete, as they have
// already been persisted to the monitor and can be applied to our internal
// state such that the channel resumes operation if no new updates have been
// made since.
self.channel_monitor_updated(
&channel_id,
Some(highest_update_id_completed),
&counterparty_node_id,
);
},
}
}
Expand Down Expand Up @@ -17224,39 +17253,58 @@ where
($counterparty_node_id: expr, $chan_in_flight_upds: expr, $monitor: expr,
$peer_state: expr, $logger: expr, $channel_info_log: expr
) => { {
// When all in-flight updates have completed after we were last serialized, we
// need to remove them. However, we can't guarantee that the next serialization
// will have happened after processing the
// `BackgroundEvent::MonitorUpdatesComplete`, so removing them now could lead to the
// channel never being resumed as the event would not be regenerated after another
// reload. At the same time, we don't want to resume the channel now because there
// may be post-update actions to handle. Therefore, we're forced to keep tracking
// the completed in-flight updates (but only when they have all completed) until we
// are processing the `BackgroundEvent::MonitorUpdatesComplete`.
let mut max_in_flight_update_id = 0;
let starting_len = $chan_in_flight_upds.len();
$chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
if $chan_in_flight_upds.len() < starting_len {
let num_updates_completed = $chan_in_flight_upds
.iter()
.filter(|update| {
max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
update.update_id <= $monitor.get_latest_update_id()
})
.count();
if num_updates_completed > 0 {
log_debug!(
$logger,
"{} ChannelMonitorUpdates completed after ChannelManager was last serialized",
starting_len - $chan_in_flight_upds.len()
num_updates_completed,
);
}
let all_updates_completed = num_updates_completed == $chan_in_flight_upds.len();

let funding_txo = $monitor.get_funding_txo();
for update in $chan_in_flight_upds.iter() {
log_debug!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
update.update_id, $channel_info_log, &$monitor.channel_id());
max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
pending_background_events.push(
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: $counterparty_node_id,
funding_txo: funding_txo,
channel_id: $monitor.channel_id(),
update: update.clone(),
});
}
if $chan_in_flight_upds.is_empty() {
// We had some updates to apply, but it turns out they had completed before we
// were serialized, we just weren't notified of that. Thus, we may have to run
// the completion actions for any monitor updates, but otherwise are done.
if all_updates_completed {
log_debug!($logger, "All monitor updates completed since the ChannelManager was last serialized");
pending_background_events.push(
BackgroundEvent::MonitorUpdatesComplete {
counterparty_node_id: $counterparty_node_id,
channel_id: $monitor.channel_id(),
highest_update_id_completed: max_in_flight_update_id,
});
} else {
$chan_in_flight_upds.retain(|update| {
let replay = update.update_id > $monitor.get_latest_update_id();
if replay {
log_debug!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
update.update_id, $channel_info_log, &$monitor.channel_id());
pending_background_events.push(
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: $counterparty_node_id,
funding_txo: funding_txo,
channel_id: $monitor.channel_id(),
update: update.clone(),
}
);
}
replay
});
$peer_state.closed_channel_monitor_update_ids.entry($monitor.channel_id())
.and_modify(|v| *v = cmp::max(max_in_flight_update_id, *v))
.or_insert(max_in_flight_update_id);
Expand Down
81 changes: 81 additions & 0 deletions lightning/src/ln/reload_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1420,3 +1420,84 @@ fn test_peer_storage() {
assert!(res.is_err());
}

#[test]
fn test_hold_completed_inflight_monitor_updates_upon_manager_reload() {
// Test that if a `ChannelMonitorUpdate` completes after the `ChannelManager` is serialized,
// but before it is deserialized, we hold any completed in-flight updates until background event
// processing. Previously, we would remove completed monitor updates from
// `in_flight_monitor_updates` during deserialization, relying on
// [`ChannelManager::process_background_events`] to eventually be called before the
// `ChannelManager` is serialized again such that the channel is resumed and further updates can
// be made.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let (persister_a, persister_b);
let (chain_monitor_a, chain_monitor_b);

let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes_0_deserialized_a;
let nodes_0_deserialized_b;

let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;

send_payment(&nodes[0], &[&nodes[1]], 1_000_000);

chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);

// Send a payment that will be pending due to an async monitor update.
let (route, payment_hash, _, payment_secret) =
get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000);
let payment_id = PaymentId(payment_hash.0);
let onion = RecipientOnionFields::secret_only(payment_secret);
nodes[0].node.send_payment_with_route(route, payment_hash, onion, payment_id).unwrap();
check_added_monitors(&nodes[0], 1);

assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());

// Serialize the ChannelManager while the monitor update is still in-flight.
let node_0_serialized = nodes[0].node.encode();

// Now complete the monitor update by calling force_channel_monitor_updated.
// This updates the monitor's state, but the ChannelManager still thinks it's pending.
let (_, latest_update_id) = nodes[0].chain_monitor.get_latest_mon_update_id(chan_id);
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update_id);
let monitor_serialized_updated = get_monitor!(nodes[0], chan_id).encode();

// Reload the node with the updated monitor. Upon deserialization, the ChannelManager will
// detect that the monitor update completed (monitor's update_id >= the in-flight update_id)
// and queue a `BackgroundEvent::MonitorUpdatesComplete`.
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
reload_node!(
nodes[0],
test_default_channel_config(),
&node_0_serialized,
&[&monitor_serialized_updated[..]],
persister_a,
chain_monitor_a,
nodes_0_deserialized_a
);

// If we serialize again, even though we haven't processed any background events yet, we should
// still see the `BackgroundEvent::MonitorUpdatesComplete` be regenerated on startup.
let node_0_serialized = nodes[0].node.encode();
reload_node!(
nodes[0],
test_default_channel_config(),
&node_0_serialized,
&[&monitor_serialized_updated[..]],
persister_b,
chain_monitor_b,
nodes_0_deserialized_b
);

// Reconnect the nodes. We should finally see the `update_add_htlc` go out, as the reconnection
// should first process `BackgroundEvent::MonitorUpdatesComplete, allowing the channel to be
// resumed.
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_htlc_adds = (0, 1);
reconnect_nodes(reconnect_args);
}

5 changes: 5 additions & 0 deletions lightning/src/util/test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -550,6 +550,11 @@ impl<'a> TestChainMonitor<'a> {
self.added_monitors.lock().unwrap().push((channel_id, monitor));
self.chain_monitor.load_existing_monitor(channel_id, new_monitor)
}

pub fn get_latest_mon_update_id(&self, channel_id: ChannelId) -> (u64, u64) {
let monitor_id_state = self.latest_monitor_update_id.lock().unwrap();
monitor_id_state.get(&channel_id).unwrap().clone()
}
}
impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
fn watch_channel(
Expand Down