@@ -3939,11 +3939,10 @@ where
39393939 }
39403940
39413941 /// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3942- #[must_use]
39433942 fn apply_post_close_monitor_update(
39443943 &self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
39453944 monitor_update: ChannelMonitorUpdate,
3946- ) -> ChannelMonitorUpdateStatus {
3945+ ) {
39473946 // Note that there may be some post-close updates which need to be well-ordered with
39483947 // respect to the `update_id`, so we hold the `peer_state` lock here.
39493948 let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3954,16 +3953,21 @@ where
39543953 match peer_state.channel_by_id.entry(channel_id) {
39553954 hash_map::Entry::Occupied(mut chan_phase) => {
39563955 if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3957- let completed = handle_new_monitor_update!(self, funding_txo,
3956+ handle_new_monitor_update!(self, funding_txo,
39583957 monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3959- return if completed { ChannelMonitorUpdateStatus::Completed } else { ChannelMonitorUpdateStatus::InProgress } ;
3958+ return;
39603959 } else {
39613960 debug_assert!(false, "We shouldn't have an update for a non-funded channel");
39623961 }
39633962 },
39643963 hash_map::Entry::Vacant(_) => {},
39653964 }
3966- self.chain_monitor.update_channel(funding_txo, &monitor_update)
3965+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
3966+
3967+ handle_new_monitor_update!(
3968+ self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
3969+ logger, channel_id, POST_CHANNEL_CLOSE
3970+ );
39673971 }
39683972
39693973 /// When a channel is removed, two things need to happen:
@@ -3992,7 +3996,7 @@ where
39923996 }
39933997 if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
39943998 debug_assert!(false, "This should have been handled in `locked_close_channel`");
3995- let _ = self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
3999+ self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
39964000 }
39974001 if self.background_events_processed_since_startup.load(Ordering::Acquire) {
39984002 // If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
@@ -6309,9 +6313,7 @@ where
63096313 let _ = self.chain_monitor.update_channel(funding_txo, &update);
63106314 },
63116315 BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6312- // The monitor update will be replayed on startup if it doesnt complete, so no
6313- // use bothering to care about the monitor update completing.
6314- let _ = self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6316+ self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
63156317 },
63166318 BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
63176319 let per_peer_state = self.per_peer_state.read().unwrap();
@@ -7242,32 +7244,31 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
72427244 let payment_hash = payment_preimage.into();
72437245 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
72447246
7245- if !during_init {
7246- if let Some(action) = action_opt {
7247- log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7248- chan_id, action);
7249- peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7250- }
7247+ if let Some(action) = action_opt {
7248+ log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7249+ chan_id, action);
7250+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7251+ }
72517252
7253+ if !during_init {
72527254 handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
72537255 } else {
72547256 // If we're running during init we cannot update a monitor directly - they probably
72557257 // haven't actually been loaded yet. Instead, push the monitor update as a background
72567258 // event.
7257- // TODO: Track this update as pending and only complete the completion action when it
7258- // finishes.
7259+
7260+ let in_flight_updates = peer_state.in_flight_monitor_updates
7261+ .entry(prev_hop.funding_txo)
7262+ .or_insert_with(Vec::new);
7263+ in_flight_updates.push(preimage_update.clone());
7264+
72597265 let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
72607266 counterparty_node_id,
72617267 funding_txo: prev_hop.funding_txo,
72627268 channel_id: prev_hop.channel_id,
72637269 update: preimage_update,
72647270 };
72657271 self.pending_background_events.lock().unwrap().push(event);
7266-
7267- mem::drop(peer_state);
7268- mem::drop(per_peer_state);
7269-
7270- self.handle_monitor_update_completion_actions(action_opt);
72717272 }
72727273 }
72737274
0 commit comments