@@ -3923,11 +3923,10 @@ where
39233923 }
39243924
39253925 /// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3926- #[must_use]
39273926 fn apply_post_close_monitor_update(
39283927 &self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
39293928 monitor_update: ChannelMonitorUpdate,
3930- ) -> ChannelMonitorUpdateStatus {
3929+ ) {
39313930 // Note that there may be some post-close updates which need to be well-ordered with
39323931 // respect to the `update_id`, so we hold the `peer_state` lock here.
39333932 let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3938,16 +3937,21 @@ where
39383937 match peer_state.channel_by_id.entry(channel_id) {
39393938 hash_map::Entry::Occupied(mut chan_phase) => {
39403939 if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3941- let completed = handle_new_monitor_update!(self, funding_txo,
3940+ handle_new_monitor_update!(self, funding_txo,
39423941 monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3943- return if completed { ChannelMonitorUpdateStatus::Completed } else { ChannelMonitorUpdateStatus::InProgress } ;
3942+ return;
39443943 } else {
39453944 debug_assert!(false, "We shouldn't have an update for a non-funded channel");
39463945 }
39473946 },
39483947 hash_map::Entry::Vacant(_) => {},
39493948 }
3950- self.chain_monitor.update_channel(funding_txo, &monitor_update)
3949+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
3950+
3951+ handle_new_monitor_update!(
3952+ self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
3953+ logger, channel_id, POST_CHANNEL_CLOSE
3954+ );
39513955 }
39523956
39533957 /// When a channel is removed, two things need to happen:
@@ -3976,7 +3980,7 @@ where
39763980 }
39773981 if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
39783982 debug_assert!(false, "This should have been handled in `locked_close_channel`");
3979- let _ = self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
3983+ self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
39803984 }
39813985 if self.background_events_processed_since_startup.load(Ordering::Acquire) {
39823986 // If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
@@ -6293,9 +6297,7 @@ where
62936297 let _ = self.chain_monitor.update_channel(funding_txo, &update);
62946298 },
62956299 BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6296- // The monitor update will be replayed on startup if it doesnt complete, so no
6297- // use bothering to care about the monitor update completing.
6298- let _ = self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6300+ self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
62996301 },
63006302 BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
63016303 let per_peer_state = self.per_peer_state.read().unwrap();
@@ -7226,32 +7228,31 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
72267228 let payment_hash = payment_preimage.into();
72277229 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
72287230
7229- if !during_init {
7230- if let Some(action) = action_opt {
7231- log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7232- chan_id, action);
7233- peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7234- }
7231+ if let Some(action) = action_opt {
7232+ log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7233+ chan_id, action);
7234+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7235+ }
72357236
7237+ if !during_init {
72367238 handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
72377239 } else {
72387240 // If we're running during init we cannot update a monitor directly - they probably
72397241 // haven't actually been loaded yet. Instead, push the monitor update as a background
72407242 // event.
7241- // TODO: Track this update as pending and only complete the completion action when it
7242- // finishes.
7243+
7244+ let in_flight_updates = peer_state.in_flight_monitor_updates
7245+ .entry(prev_hop.funding_txo)
7246+ .or_insert_with(Vec::new);
7247+ in_flight_updates.push(preimage_update.clone());
7248+
72437249 let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
72447250 counterparty_node_id,
72457251 funding_txo: prev_hop.funding_txo,
72467252 channel_id: prev_hop.channel_id,
72477253 update: preimage_update,
72487254 };
72497255 self.pending_background_events.lock().unwrap().push(event);
7250-
7251- mem::drop(peer_state);
7252- mem::drop(per_peer_state);
7253-
7254- self.handle_monitor_update_completion_actions(action_opt);
72557256 }
72567257 }
72577258
0 commit comments