@@ -3922,39 +3922,6 @@ where
39223922 self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
39233923 }
39243924
3925- fn set_closed_chan_next_monitor_update_id(
3926- peer_state: &mut PeerState<SP>, channel_id: ChannelId, monitor_update: &mut ChannelMonitorUpdate,
3927- ) {
3928- match peer_state.closed_channel_monitor_update_ids.entry(channel_id) {
3929- btree_map::Entry::Vacant(entry) => {
3930- let is_closing_unupdated_monitor = monitor_update.update_id == 1
3931- && monitor_update.updates.len() == 1
3932- && matches!(&monitor_update.updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. });
3933- // If the ChannelMonitorUpdate is closing a channel that never got past initial
3934- // funding (to have any commitment updates), we'll skip inserting in
3935- // `locked_close_channel`, allowing us to avoid keeping around the PeerState for
3936- // that peer. In that specific case we expect no entry in the map here. In any
3937- // other cases, this is a bug, but in production we go ahead and recover by
3938- // inserting the update_id and hoping its right.
3939- debug_assert!(is_closing_unupdated_monitor, "Expected closing monitor against an unused channel, got {:?}", monitor_update);
3940- if !is_closing_unupdated_monitor {
3941- entry.insert(monitor_update.update_id);
3942- }
3943- },
3944- btree_map::Entry::Occupied(entry) => {
3945- // If we're running in a threaded environment its possible we generate updates for
3946- // a channel that is closing, then apply some preimage update, then go back and
3947- // apply the close monitor update here. In order to ensure the updates are still
3948- // well-ordered, we have to use the `closed_channel_monitor_update_ids` map to
3949- // override the `update_id`, taking care to handle old monitors where the
3950- // `latest_update_id` is already `u64::MAX`.
3951- let latest_update_id = entry.into_mut();
3952- *latest_update_id = latest_update_id.saturating_add(1);
3953- monitor_update.update_id = *latest_update_id;
3954- }
3955- }
3956- }
3957-
39583925 /// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
39593926 #[must_use]
39603927 fn apply_post_close_monitor_update(
@@ -7220,38 +7187,53 @@ where
72207187 let mut peer_state = peer_state_opt.expect("peer_state_opt is always Some when the counterparty_node_id is Some");
72217188
72227189 let mut preimage_update = ChannelMonitorUpdate {
7223- update_id: 0, // set in set_closed_chan_next_monitor_update_id
7224- counterparty_node_id: prev_hop. counterparty_node_id,
7190+ update_id: 0, // set below
7191+ counterparty_node_id: Some( counterparty_node_id) ,
72257192 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
72267193 payment_preimage,
72277194 payment_info,
72287195 }],
72297196 channel_id: Some(prev_hop.channel_id),
72307197 };
72317198
7232- // Note that the below is race-y - we set the `update_id` here and then drop the peer_state
7233- // lock before applying the update in `apply_post_close_monitor_update` (or via the
7234- // background events pipeline). During that time, some other update could be created and
7235- // then applied, resultin in `ChannelMonitorUpdate`s being applied out of order and causing
7236- // a panic.
7237- Self::set_closed_chan_next_monitor_update_id(&mut *peer_state, prev_hop.channel_id, &mut preimage_update);
7199+ if let Some(latest_update_id) = peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) {
7200+ *latest_update_id = latest_update_id.saturating_add(1);
7201+ preimage_update.update_id = *latest_update_id;
7202+ } else {
7203+ let err = "We need the latest ChannelMonitorUpdate ID to build a new update.
7204+ This should have been checked for availability on startup but somehow it is no longer available.
7205+ This indicates a bug inside LDK. Please report this error at https://github.com/lightningdevkit/rust-lightning/issues/new";
7206+ log_error!(self.logger, "{}", err);
7207+ panic!("{}", err);
7208+ }
72387209
7239- mem::drop(peer_state);
7240- mem::drop(per_peer_state);
7210+ // Note that we do process the completion action here. This totally could be a
7211+ // duplicate claim, but we have no way of knowing without interrogating the
7212+ // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
7213+ // generally always allowed to be duplicative (and it's specifically noted in
7214+ // `PaymentForwarded`).
7215+ let (action_opt, raa_blocker_opt) = completion_action(None, false);
7216+
7217+ if let Some(raa_blocker) = raa_blocker_opt {
7218+ peer_state.actions_blocking_raa_monitor_updates
7219+ .entry(prev_hop.channel_id)
7220+ .or_default()
7221+ .push(raa_blocker);
7222+ }
7223+
7224+ // Given the fact that we're in a bit of a weird edge case, its worth hashing the preimage
7225+ // to include the `payment_hash` in the log metadata here.
7226+ let payment_hash = payment_preimage.into();
7227+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
72417228
72427229 if !during_init {
7243- // We update the ChannelMonitor on the backward link, after
7244- // receiving an `update_fulfill_htlc` from the forward link.
7245- let update_res = self.apply_post_close_monitor_update(counterparty_node_id, prev_hop.channel_id, prev_hop.funding_txo, preimage_update);
7246- if update_res != ChannelMonitorUpdateStatus::Completed {
7247- // TODO: This needs to be handled somehow - if we receive a monitor update
7248- // with a preimage we *must* somehow manage to propagate it to the upstream
7249- // channel, or we must have an ability to receive the same event and try
7250- // again on restart.
7251- log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id), None),
7252- "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
7253- payment_preimage, update_res);
7230+ if let Some(action) = action_opt {
7231+ log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7232+ chan_id, action);
7233+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
72547234 }
7235+
7236+ handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
72557237 } else {
72567238 // If we're running during init we cannot update a monitor directly - they probably
72577239 // haven't actually been loaded yet. Instead, push the monitor update as a background
@@ -7265,39 +7247,12 @@ where
72657247 update: preimage_update,
72667248 };
72677249 self.pending_background_events.lock().unwrap().push(event);
7268- }
72697250
7270- // Note that we do process the completion action here. This totally could be a
7271- // duplicate claim, but we have no way of knowing without interrogating the
7272- // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
7273- // generally always allowed to be duplicative (and it's specifically noted in
7274- // `PaymentForwarded`).
7275- let (action_opt, raa_blocker_opt) = completion_action(None, false);
7276-
7277- if let Some(raa_blocker) = raa_blocker_opt {
7278- // TODO: Avoid always blocking the world for the write lock here.
7279- let mut per_peer_state = self.per_peer_state.write().unwrap();
7280- let peer_state_mutex = per_peer_state.entry(counterparty_node_id).or_insert_with(||
7281- Mutex::new(PeerState {
7282- channel_by_id: new_hash_map(),
7283- inbound_channel_request_by_id: new_hash_map(),
7284- latest_features: InitFeatures::empty(),
7285- pending_msg_events: Vec::new(),
7286- in_flight_monitor_updates: BTreeMap::new(),
7287- monitor_update_blocked_actions: BTreeMap::new(),
7288- actions_blocking_raa_monitor_updates: BTreeMap::new(),
7289- closed_channel_monitor_update_ids: BTreeMap::new(),
7290- is_connected: false,
7291- }));
7292- let mut peer_state = peer_state_mutex.lock().unwrap();
7251+ mem::drop(peer_state);
7252+ mem::drop(per_peer_state);
72937253
7294- peer_state.actions_blocking_raa_monitor_updates
7295- .entry(prev_hop.channel_id)
7296- .or_default()
7297- .push(raa_blocker);
7254+ self.handle_monitor_update_completion_actions(action_opt);
72987255 }
7299-
7300- self.handle_monitor_update_completion_actions(action_opt);
73017256 }
73027257
73037258 fn finalize_claims(&self, sources: Vec<HTLCSource>) {
0 commit comments