@@ -501,9 +501,11 @@ struct ClaimablePayments {
501501/// for some reason. They are handled in timer_tick_occurred, so may be processed with
502502/// quite some time lag.
503503enum BackgroundEvent {
504- /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
505- /// commitment transaction.
506- ClosingMonitorUpdate ( ( OutPoint , ChannelMonitorUpdate ) ) ,
504+ /// Handle a ChannelMonitorUpdate
505+ ///
506+ /// Note that any such events are lost on shutdown, so in general they must be updates which
507+ /// are regenerated on startup.
508+ MonitorUpdateRegeneratedOnStartup ( ( OutPoint , ChannelMonitorUpdate ) ) ,
507509}
508510
509511#[ derive( Debug ) ]
@@ -3774,7 +3776,7 @@ where
37743776
37753777 for event in background_events. drain ( ..) {
37763778 match event {
3777- BackgroundEvent :: ClosingMonitorUpdate ( ( funding_txo, update) ) => {
3779+ BackgroundEvent :: MonitorUpdateRegeneratedOnStartup ( ( funding_txo, update) ) => {
37783780 // The channel has already been closed, so no use bothering to care about the
37793781 // monitor updating completing.
37803782 let _ = self . chain_monitor . update_channel ( funding_txo, & update) ;
@@ -5694,7 +5696,7 @@ where
56945696 if let ChannelMonitorUpdateStep :: ChannelForceClosed { should_broadcast } = update. updates [ 0 ] {
56955697 assert ! ( should_broadcast) ;
56965698 } else { unreachable ! ( ) ; }
5697- self . pending_background_events . lock ( ) . unwrap ( ) . push ( BackgroundEvent :: ClosingMonitorUpdate ( ( funding_txo, update) ) ) ;
5699+ self . pending_background_events . lock ( ) . unwrap ( ) . push ( BackgroundEvent :: MonitorUpdateRegeneratedOnStartup ( ( funding_txo, update) ) ) ;
56985700 }
56995701 self . finish_force_close_channel ( failure) ;
57005702 }
@@ -7448,17 +7450,12 @@ where
74487450 }
74497451 }
74507452
7451- let background_events = self . pending_background_events . lock ( ) . unwrap ( ) ;
7452- ( background_events. len ( ) as u64 ) . write ( writer) ?;
7453- for event in background_events. iter ( ) {
7454- match event {
7455- BackgroundEvent :: ClosingMonitorUpdate ( ( funding_txo, monitor_update) ) => {
7456- 0u8 . write ( writer) ?;
7457- funding_txo. write ( writer) ?;
7458- monitor_update. write ( writer) ?;
7459- } ,
7460- }
7461- }
7453+ // LDK versions prior to 0.0.116 wrote the `pending_background_events`
7454+ // `MonitorUpdateRegeneratedOnStartup`s here, however there was never a reason to do so -
7455+ // the closing monitor updates were always effectively replayed on startup (either directly
7456+ // by calling `broadcast_latest_holder_commitment_txn` on a `ChannelMonitor` during
7457+ // deserialization or, in 0.0.115, by regenerating the monitor update itself).
7458+ 0u64 . write ( writer) ?;
74627459
74637460 // Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in
74647461 // `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is
@@ -7773,7 +7770,7 @@ where
77737770 log_bytes!( channel. channel_id( ) ) , monitor. get_latest_update_id( ) , channel. get_latest_monitor_update_id( ) ) ;
77747771 let ( monitor_update, mut new_failed_htlcs) = channel. force_shutdown ( true ) ;
77757772 if let Some ( monitor_update) = monitor_update {
7776- pending_background_events. push ( BackgroundEvent :: ClosingMonitorUpdate ( monitor_update) ) ;
7773+ pending_background_events. push ( BackgroundEvent :: MonitorUpdateRegeneratedOnStartup ( monitor_update) ) ;
77777774 }
77787775 failed_htlcs. append ( & mut new_failed_htlcs) ;
77797776 channel_closures. push_back ( ( events:: Event :: ChannelClosed {
@@ -7848,7 +7845,7 @@ where
78487845 update_id : CLOSED_CHANNEL_UPDATE_ID ,
78497846 updates : vec ! [ ChannelMonitorUpdateStep :: ChannelForceClosed { should_broadcast: true } ] ,
78507847 } ;
7851- pending_background_events. push ( BackgroundEvent :: ClosingMonitorUpdate ( ( * funding_txo, monitor_update) ) ) ;
7848+ pending_background_events. push ( BackgroundEvent :: MonitorUpdateRegeneratedOnStartup ( ( * funding_txo, monitor_update) ) ) ;
78527849 }
78537850 }
78547851
@@ -7905,13 +7902,11 @@ where
79057902 for _ in 0 ..background_event_count {
79067903 match <u8 as Readable >:: read ( reader) ? {
79077904 0 => {
7908- let ( funding_txo, monitor_update) : ( OutPoint , ChannelMonitorUpdate ) = ( Readable :: read ( reader) ?, Readable :: read ( reader) ?) ;
7909- if pending_background_events. iter ( ) . find ( |e| {
7910- let BackgroundEvent :: ClosingMonitorUpdate ( ( pending_funding_txo, pending_monitor_update) ) = e;
7911- * pending_funding_txo == funding_txo && * pending_monitor_update == monitor_update
7912- } ) . is_none ( ) {
7913- pending_background_events. push ( BackgroundEvent :: ClosingMonitorUpdate ( ( funding_txo, monitor_update) ) ) ;
7914- }
7905+ // LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here,
7906+ // however we really don't (and never did) need them - we regenerate all
7907+ // on-startup monitor updates.
7908+ let _: OutPoint = Readable :: read ( reader) ?;
7909+ let _: ChannelMonitorUpdate = Readable :: read ( reader) ?;
79157910 }
79167911 _ => return Err ( DecodeError :: InvalidValue ) ,
79177912 }
0 commit comments