@@ -3957,11 +3957,10 @@ where
39573957 }
39583958
39593959 /// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3960- #[must_use]
39613960 fn apply_post_close_monitor_update(
39623961 &self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
39633962 monitor_update: ChannelMonitorUpdate,
3964- ) -> ChannelMonitorUpdateStatus {
3963+ ) {
39653964 // Note that there may be some post-close updates which need to be well-ordered with
39663965 // respect to the `update_id`, so we hold the `peer_state` lock here.
39673966 let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3972,16 +3971,21 @@ where
39723971 match peer_state.channel_by_id.entry(channel_id) {
39733972 hash_map::Entry::Occupied(mut chan_phase) => {
39743973 if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3975- let in_flight = handle_new_monitor_update!(self, funding_txo,
3974+ handle_new_monitor_update!(self, funding_txo,
39763975 monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3977- return if in_flight { ChannelMonitorUpdateStatus::InProgress } else { ChannelMonitorUpdateStatus::Completed } ;
3976+ return;
39783977 } else {
39793978 debug_assert!(false, "We shouldn't have an update for a non-funded channel");
39803979 }
39813980 },
39823981 hash_map::Entry::Vacant(_) => {},
39833982 }
3984- self.chain_monitor.update_channel(funding_txo, &monitor_update)
3983+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
3984+
3985+ handle_new_monitor_update!(
3986+ self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
3987+ logger, channel_id, POST_CHANNEL_CLOSE
3988+ );
39853989 }
39863990
39873991 /// When a channel is removed, two things need to happen:
@@ -4010,7 +4014,7 @@ where
40104014 }
40114015 if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
40124016 debug_assert!(false, "This should have been handled in `locked_close_channel`");
4013- let _ = self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
4017+ self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
40144018 }
40154019 if self.background_events_processed_since_startup.load(Ordering::Acquire) {
40164020 // If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
@@ -6359,9 +6363,7 @@ where
63596363 let _ = self.chain_monitor.update_channel(funding_txo, &update);
63606364 },
63616365 BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6362- // The monitor update will be replayed on startup if it doesnt complete, so no
6363- // use bothering to care about the monitor update completing.
6364- let _ = self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6366+ self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
63656367 },
63666368 BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
63676369 let per_peer_state = self.per_peer_state.read().unwrap();
@@ -7305,32 +7307,31 @@ where
73057307 let payment_hash = payment_preimage.into();
73067308 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
73077309
7308- if !during_init {
7309- if let Some(action) = action_opt {
7310- log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7311- chan_id, action);
7312- peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7313- }
7310+ if let Some(action) = action_opt {
7311+ log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7312+ chan_id, action);
7313+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7314+ }
73147315
7316+ if !during_init {
73157317 handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
73167318 } else {
73177319 // If we're running during init we cannot update a monitor directly - they probably
73187320 // haven't actually been loaded yet. Instead, push the monitor update as a background
73197321 // event.
7320- // TODO: Track this update as pending and only complete the completion action when it
7321- // finishes.
7322+
7323+ let in_flight_updates = peer_state.in_flight_monitor_updates
7324+ .entry(prev_hop.funding_txo)
7325+ .or_insert_with(Vec::new);
7326+ in_flight_updates.push(preimage_update.clone());
7327+
73227328 let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
73237329 counterparty_node_id,
73247330 funding_txo: prev_hop.funding_txo,
73257331 channel_id: prev_hop.channel_id,
73267332 update: preimage_update,
73277333 };
73287334 self.pending_background_events.lock().unwrap().push(event);
7329-
7330- mem::drop(peer_state);
7331- mem::drop(per_peer_state);
7332-
7333- self.handle_monitor_update_completion_actions(action_opt);
73347335 }
73357336 }
73367337
0 commit comments