@@ -2953,31 +2953,9 @@ macro_rules! handle_error {
29532953/// [`ChannelMonitor`]/channel funding transaction) to begin with.
29542954macro_rules! locked_close_channel {
29552955 ($self: ident, $peer_state: expr, $channel_context: expr, $shutdown_res_mut: expr) => {{
2956- if let Some((counterparty_node_id, funding_txo, channel_id, update)) = $shutdown_res_mut.monitor_update.take() {
2957- if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
2958- handle_new_monitor_update!($self, funding_txo, update, $peer_state,
2959- $channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
2960- } else {
2961- // We want to track the in-flight update both in `in_flight_monitor_updates` and in
2962- // `pending_background_events` to avoid a race condition during
2963- // `pending_background_events` processing where we complete one
2964- // `ChannelMonitorUpdate` (but there are more pending as background events) but we
2965- // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
2966- // run post-completion actions. We could work around that with some effort, but its
2967- // simpler to just track updates twice.
2968- let in_flight_updates = $peer_state.in_flight_monitor_updates.entry(funding_txo)
2969- .or_insert_with(Vec::new);
2970- if !in_flight_updates.contains(&update) {
2971- in_flight_updates.push(update.clone());
2972- }
2973- let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
2974- counterparty_node_id,
2975- funding_txo,
2976- channel_id,
2977- update,
2978- };
2979- $self.pending_background_events.lock().unwrap().push(event);
2980- }
2956+ if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() {
2957+ handle_new_monitor_update!($self, funding_txo, update, $peer_state,
2958+ $channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
29812959 }
29822960 // If there's a possibility that we need to generate further monitor updates for this
29832961 // channel, we need to store the last update_id of it. However, we don't want to insert
@@ -3289,8 +3267,8 @@ macro_rules! handle_new_monitor_update {
32893267 };
32903268 (
32913269 $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
3292- $chan_id: expr, $in_flight_updates: ident, $update_idx: ident, _internal_outer ,
3293- $completed: expr
3270+ $chan_id: expr, $counterparty_node_id: expr, $ in_flight_updates: ident, $update_idx: ident,
3271+ _internal_outer, $completed: expr
32943272 ) => { {
32953273 $in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
32963274 .or_insert_with(Vec::new);
@@ -3302,31 +3280,55 @@ macro_rules! handle_new_monitor_update {
33023280 $in_flight_updates.push($update);
33033281 $in_flight_updates.len() - 1
33043282 });
3305- let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
3306- handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3283+ if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
3284+ let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
3285+ handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3286+ } else {
3287+ // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3288+ // fail to persist it. This is a fairly safe assumption, however, since anything we do
3289+ // during the startup sequence should be replayed exactly if we immediately crash.
3290+ let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3291+ counterparty_node_id: $counterparty_node_id,
3292+ funding_txo: $funding_txo,
3293+ channel_id: $chan_id,
3294+ update: $in_flight_updates[$update_idx].clone(),
3295+ };
3296+ // We want to track the in-flight update both in `in_flight_monitor_updates` and in
3297+ // `pending_background_events` to avoid a race condition during
3298+ // `pending_background_events` processing where we complete one
3299+ // `ChannelMonitorUpdate` (but there are more pending as background events) but we
3300+ // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3301+ // run post-completion actions.
3302+ // We could work around that with some effort, but its simpler to just track updates
3303+ // twice.
3304+ $self.pending_background_events.lock().unwrap().push(event);
3305+ false
3306+ }
33073307 } };
33083308 (
33093309 $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr,
33103310 REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
33113311 ) => { {
33123312 let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
33133313 let chan_id = $chan_context.channel_id();
3314+ let counterparty_node_id = $chan_context.get_counterparty_node_id();
33143315 let in_flight_updates;
33153316 let idx;
33163317 handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3317- in_flight_updates, idx, _internal_outer,
3318+ counterparty_node_id, in_flight_updates, idx, _internal_outer,
33183319 {
33193320 let _ = in_flight_updates.remove(idx);
33203321 })
33213322 } };
33223323 (
33233324 $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3324- $per_peer_state_lock: expr, $logger : expr, $channel_id: expr, POST_CHANNEL_CLOSE
3325+ $per_peer_state_lock: expr, $counterparty_node_id : expr, $channel_id: expr, POST_CHANNEL_CLOSE
33253326 ) => { {
3327+ let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
33263328 let in_flight_updates;
33273329 let idx;
3328- handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, $ logger,
3329- $channel_id, in_flight_updates, idx, _internal_outer,
3330+ handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger,
3331+ $channel_id, $counterparty_node_id, in_flight_updates, idx, _internal_outer,
33303332 {
33313333 let _ = in_flight_updates.remove(idx);
33323334 if in_flight_updates.is_empty() {
@@ -3346,10 +3348,11 @@ macro_rules! handle_new_monitor_update {
33463348 ) => { {
33473349 let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
33483350 let chan_id = $chan.context.channel_id();
3351+ let counterparty_node_id = $chan.context.get_counterparty_node_id();
33493352 let in_flight_updates;
33503353 let idx;
33513354 handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3352- in_flight_updates, idx, _internal_outer,
3355+ counterparty_node_id, in_flight_updates, idx, _internal_outer,
33533356 {
33543357 let _ = in_flight_updates.remove(idx);
33553358 if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
@@ -3946,11 +3949,10 @@ where
39463949 },
39473950 hash_map::Entry::Vacant(_) => {},
39483951 }
3949- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
39503952
39513953 handle_new_monitor_update!(
39523954 self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
3953- logger , channel_id, POST_CHANNEL_CLOSE
3955+ counterparty_node_id , channel_id, POST_CHANNEL_CLOSE
39543956 );
39553957 }
39563958
@@ -7080,7 +7082,6 @@ where
70807082 let peer_state = &mut **peer_state_lock;
70817083 if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
70827084 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
7083- let counterparty_node_id = chan.context.get_counterparty_node_id();
70847085 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
70857086 let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger);
70867087
@@ -7095,21 +7096,8 @@ where
70957096 if let Some(raa_blocker) = raa_blocker_opt {
70967097 peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker);
70977098 }
7098- if !during_init {
7099- handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
7100- peer_state, per_peer_state, chan);
7101- } else {
7102- // If we're running during init we cannot update a monitor directly -
7103- // they probably haven't actually been loaded yet. Instead, push the
7104- // monitor update as a background event.
7105- self.pending_background_events.lock().unwrap().push(
7106- BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
7107- counterparty_node_id,
7108- funding_txo: prev_hop.funding_txo,
7109- channel_id: prev_hop.channel_id,
7110- update: monitor_update.clone(),
7111- });
7112- }
7099+ handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
7100+ peer_state, per_peer_state, chan);
71137101 }
71147102 UpdateFulfillCommitFetch::DuplicateClaim {} => {
71157103 let (action_opt, raa_blocker_opt) = completion_action(None, true);
@@ -7234,26 +7222,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
72347222 peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
72357223 }
72367224
7237- if !during_init {
7238- handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
7239- } else {
7240- // If we're running during init we cannot update a monitor directly - they probably
7241- // haven't actually been loaded yet. Instead, push the monitor update as a background
7242- // event.
7243-
7244- let in_flight_updates = peer_state.in_flight_monitor_updates
7245- .entry(prev_hop.funding_txo)
7246- .or_insert_with(Vec::new);
7247- in_flight_updates.push(preimage_update.clone());
7248-
7249- let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
7250- counterparty_node_id,
7251- funding_txo: prev_hop.funding_txo,
7252- channel_id: prev_hop.channel_id,
7253- update: preimage_update,
7254- };
7255- self.pending_background_events.lock().unwrap().push(event);
7256- }
7225+ handle_new_monitor_update!(
7226+ self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state,
7227+ counterparty_node_id, chan_id, POST_CHANNEL_CLOSE
7228+ );
72577229 }
72587230
72597231 fn finalize_claims(&self, sources: Vec<HTLCSource>) {
0 commit comments