diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 00696515724..37595ba8fc0 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -600,7 +600,7 @@ fn do_test_async_raa_peer_disconnect( } // Expect the RAA - let (_, revoke_and_ack, commitment_signed, resend_order) = + let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _) = handle_chan_reestablish_msgs!(dst, src); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { assert!(revoke_and_ack.is_none()); @@ -616,14 +616,15 @@ fn do_test_async_raa_peer_disconnect( dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { - let (_, revoke_and_ack, commitment_signed, resend_order) = + let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_some()); assert!(commitment_signed.is_some()); assert!(resend_order == RAACommitmentOrder::RevokeAndACKFirst); } else { // Make sure we don't double send the RAA. - let (_, revoke_and_ack, commitment_signed, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, revoke_and_ack, commitment_signed, _, _, _, _) = + handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_none()); assert!(commitment_signed.is_none()); } @@ -749,7 +750,8 @@ fn do_test_async_commitment_signature_peer_disconnect( } // Expect the RAA - let (_, revoke_and_ack, commitment_signed, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, revoke_and_ack, commitment_signed, _, _, _, _) = + handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_some()); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { assert!(commitment_signed.is_none()); @@ -762,11 +764,11 @@ fn do_test_async_commitment_signature_peer_disconnect( dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { - let (_, _, commitment_signed, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, _, commitment_signed, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(commitment_signed.is_some()); } else { // Make sure we don't double send the CS. - let (_, _, commitment_signed, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, _, commitment_signed, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(commitment_signed.is_none()); } } @@ -881,6 +883,9 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.0.is_none()); assert!(as_resp.1.is_none()); assert!(as_resp.2.is_none()); + assert!(as_resp.4.is_none()); + assert!(as_resp.5.is_none()); + assert!(as_resp.6.is_none()); if monitor_update_failure { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -900,6 +905,9 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.0.is_none()); assert!(as_resp.1.is_none()); assert!(as_resp.2.is_none()); + assert!(as_resp.4.is_none()); + assert!(as_resp.5.is_none()); + assert!(as_resp.6.is_none()); nodes[0].enable_channel_signer_op(&node_b_id, &chan_id, SignerOp::SignCounterpartyCommitment); nodes[0].node.signer_unblocked(Some((node_b_id, chan_id))); @@ -916,6 +924,15 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst); + assert!(as_resp.4.is_none()); + assert!(bs_resp.4.is_none()); + + assert!(as_resp.5.is_none()); + assert!(bs_resp.5.is_none()); + + assert!(as_resp.6.is_none()); + assert!(bs_resp.6.is_none()); + // Now that everything is restored, get the CS + RAA and handle them. nodes[1] .node diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 1bc1bfbc2ff..1a9af4f2071 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -206,6 +206,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { nodes[1].node.peer_disconnected(node_a_id); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_channel_ready = (true, true); + reconnect_args.send_announcement_sigs = (true, true); reconnect_nodes(reconnect_args); } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 1ca067f43f4..f11aa3b6d19 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8568,16 +8568,38 @@ where } } - fn on_tx_signatures_exchange(&mut self, funding_tx: Transaction) { + fn on_tx_signatures_exchange<'a, L: Deref>( + &mut self, funding_tx: Transaction, best_block_height: u32, + logger: &WithChannelContext<'a, L>, + ) -> Option + where + L::Target: Logger, + { debug_assert!(!self.context.channel_state.is_monitor_update_in_progress()); debug_assert!(!self.context.channel_state.is_awaiting_remote_revoke()); + let mut splice_locked = None; if let Some(pending_splice) = self.pending_splice.as_mut() { if let Some(FundingNegotiation::AwaitingSignatures { mut funding }) = pending_splice.funding_negotiation.take() { funding.funding_transaction = Some(funding_tx); pending_splice.negotiated_candidates.push(funding); + splice_locked = pending_splice.check_get_splice_locked( + &self.context, + pending_splice.negotiated_candidates.len() - 1, + best_block_height, + ); + if let Some(splice_txid) = + splice_locked.as_ref().map(|splice_locked| splice_locked.splice_txid) + { + log_info!( + logger, + "Sending 0conf splice_locked txid {} to our peer for channel {}", + splice_txid, + &self.context.channel_id + ); + } } else { debug_assert!(false); } @@ -8587,11 +8609,20 @@ where self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); } + + splice_locked } - pub fn funding_transaction_signed( - &mut self, funding_txid_signed: Txid, witnesses: Vec, - ) -> Result<(Option, Option), APIError> { + pub fn funding_transaction_signed( + &mut self, funding_txid_signed: Txid, witnesses: Vec, best_block_height: u32, + logger: &L, + ) -> Result< + (Option, Option, Option), + APIError, + > + where + L::Target: Logger, + { let signing_session = if let Some(signing_session) = self.context.interactive_tx_signing_session.as_mut() { if let Some(pending_splice) = self.pending_splice.as_ref() { @@ -8605,6 +8636,12 @@ where .unwrap_or(false)); } + if signing_session.holder_tx_signatures().is_some() { + // Our `tx_signatures` either should've been sent the first time we processed + // them, or we're waiting for our counterparty to send theirs first. + return Ok((None, None, None)); + } + signing_session } else { let err = @@ -8647,17 +8684,31 @@ where .provide_holder_witnesses(tx_signatures, &self.context.secp_ctx) .map_err(|err| APIError::APIMisuseError { err })?; - if let Some(funding_tx) = funding_tx_opt.clone() { - debug_assert!(tx_signatures_opt.is_some()); - self.on_tx_signatures_exchange(funding_tx); + let logger = WithChannelContext::from(logger, &self.context, None); + if tx_signatures_opt.is_some() { + log_info!( + logger, + "Sending tx_signatures for interactive funding transaction {funding_txid_signed}" + ); } - Ok((tx_signatures_opt, funding_tx_opt)) + let splice_locked_opt = funding_tx_opt.clone().and_then(|funding_tx| { + debug_assert!(tx_signatures_opt.is_some()); + self.on_tx_signatures_exchange(funding_tx, best_block_height, &logger) + }); + + Ok((tx_signatures_opt, splice_locked_opt, funding_tx_opt)) } - pub fn tx_signatures( - &mut self, msg: &msgs::TxSignatures, - ) -> Result<(Option, Option), ChannelError> { + pub fn tx_signatures( + &mut self, msg: &msgs::TxSignatures, best_block_height: u32, logger: &L, + ) -> Result< + (Option, Option, Option), + ChannelError, + > + where + L::Target: Logger, + { let signing_session = if let Some(signing_session) = self.context.interactive_tx_signing_session.as_mut() { @@ -8703,11 +8754,18 @@ where let (holder_tx_signatures_opt, funding_tx_opt) = signing_session.received_tx_signatures(msg).map_err(|msg| ChannelError::Warn(msg))?; - if let Some(funding_tx) = funding_tx_opt.clone() { - self.on_tx_signatures_exchange(funding_tx); - } + let logger = WithChannelContext::from(logger, &self.context, None); + log_info!( + logger, + "Received tx_signatures for interactive funding transaction {}", + msg.tx_hash + ); - Ok((holder_tx_signatures_opt, funding_tx_opt)) + let splice_locked_opt = funding_tx_opt.clone().and_then(|funding_tx| { + self.on_tx_signatures_exchange(funding_tx, best_block_height, &logger) + }); + + Ok((holder_tx_signatures_opt, splice_locked_opt, funding_tx_opt)) } /// Queues up an outbound update fee by placing it in the holding cell. You should call @@ -8864,7 +8922,18 @@ where } self.context.channel_state.clear_local_stfu_sent(); self.context.channel_state.clear_remote_stfu_sent(); - self.context.channel_state.clear_quiescent(); + if self + .context + .interactive_tx_signing_session + .as_ref() + .map(|signing_session| { + signing_session.has_received_tx_signatures() + && signing_session.holder_tx_signatures().is_some() + }) + .unwrap_or(true) + { + self.context.channel_state.clear_quiescent(); + } } self.context.channel_state.set_peer_disconnected(); @@ -10768,6 +10837,12 @@ where let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, block_height, logger); + if let Some(quiescent_action) = self.quiescent_action.as_ref() { + if matches!(quiescent_action, QuiescentAction::Splice(_)) { + self.context.channel_state.set_awaiting_quiescence(); + } + } + Some(SpliceFundingPromotion { funding_txo, monitor_update, @@ -11002,7 +11077,11 @@ where confirmed_funding_index, height, ) { - log_info!(logger, "Sending a splice_locked to our peer for channel {}", &self.context.channel_id); + log_info!( + logger, "Sending splice_locked txid {} to our peer for channel {}", + splice_locked.splice_txid, + &self.context.channel_id + ); let (funding_txo, monitor_update, announcement_sigs, discarded_funding) = chain_node_signer .and_then(|(chain_hash, node_signer, user_config)| { @@ -11301,6 +11380,10 @@ where .as_ref() .filter(|session| !session.has_received_tx_signatures()) .map(|signing_session| { + if self.pending_splice.is_some() { + debug_assert!(self.context.channel_state.is_quiescent()); + } + let mut next_funding = msgs::NextFunding { txid: signing_session.unsigned_tx().compute_txid(), retransmit_flags: 0, @@ -11430,10 +11513,10 @@ where }); } - if !self.context.is_live() { + if !self.context.is_usable() { return Err(APIError::APIMisuseError { err: format!( - "Channel {} cannot be spliced, as channel is not live", + "Channel {} cannot be spliced as it is either pending open/close", self.context.channel_id() ), }); @@ -12579,6 +12662,7 @@ where || self.context.channel_state.is_awaiting_quiescence() || self.context.channel_state.is_local_stfu_sent() { + log_info!(logger, "Channel is either pending or already quiescent"); return Ok(None); } @@ -12586,6 +12670,7 @@ where if self.context.is_live() { Ok(Some(self.send_stfu(logger)?)) } else { + log_info!(logger, "Waiting for peer reconnection to send stfu"); Ok(None) } } @@ -13875,7 +13960,18 @@ where } channel_state.clear_local_stfu_sent(); channel_state.clear_remote_stfu_sent(); - channel_state.clear_quiescent(); + if self + .context + .interactive_tx_signing_session + .as_ref() + .map(|signing_session| { + signing_session.has_received_tx_signatures() + && signing_session.holder_tx_signatures().is_some() + }) + .unwrap_or(true) + { + channel_state.clear_quiescent(); + } }, ChannelState::FundingNegotiated(_) if self.context.interactive_tx_signing_session.is_some() => {}, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 41af9145152..17eef3f7d10 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4675,14 +4675,6 @@ where // Look for the channel match peer_state.channel_by_id.entry(*channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if !chan_phase_entry.get().context().is_connected() { - // TODO: We should probably support this, but right now `splice_channel` refuses when - // the peer is disconnected, so we just check it here. - return Err(APIError::ChannelUnavailable { - err: "Cannot initiate splice while peer is disconnected".to_owned(), - }); - } - let locktime = locktime.unwrap_or_else(|| self.current_best_block().height); if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); @@ -6245,10 +6237,20 @@ where .map(|input| input.witness) .filter(|witness| !witness.is_empty()) .collect(); - match chan.funding_transaction_signed(txid, witnesses) { - Ok((Some(tx_signatures), funding_tx_opt)) => { + let best_block_height = self.best_block.read().unwrap().height; + match chan.funding_transaction_signed( + txid, + witnesses, + best_block_height, + &self.logger, + ) { + Ok((Some(tx_signatures), splice_locked_opt, funding_tx_opt)) => { if let Some(funding_tx) = funding_tx_opt { - self.broadcast_interactive_funding(chan, &funding_tx); + self.broadcast_interactive_funding( + chan, + &funding_tx, + &self.logger, + ); } peer_state.pending_msg_events.push( MessageSendEvent::SendTxSignatures { @@ -6256,6 +6258,14 @@ where msg: tx_signatures, }, ); + if let Some(splice_locked) = splice_locked_opt { + peer_state.pending_msg_events.push( + MessageSendEvent::SendSpliceLocked { + node_id: *counterparty_node_id, + msg: splice_locked, + }, + ); + } return NotifyOption::DoPersist; }, Err(err) => { @@ -6293,8 +6303,14 @@ where } fn broadcast_interactive_funding( - &self, channel: &mut FundedChannel, funding_tx: &Transaction, + &self, channel: &mut FundedChannel, funding_tx: &Transaction, logger: &L, ) { + let logger = WithChannelContext::from(logger, channel.context(), None); + log_info!( + logger, + "Broadcasting signed interactive funding transaction {}", + funding_tx.compute_txid() + ); self.tx_broadcaster.broadcast_transactions(&[funding_tx]); { let mut pending_events = self.pending_events.lock().unwrap(); @@ -9334,6 +9350,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(signing_session) = (!channel.is_awaiting_monitor_update()) .then(|| ()) .and_then(|_| channel.context.interactive_tx_signing_session.as_mut()) + .filter(|signing_session| signing_session.has_received_commitment_signed()) .filter(|signing_session| signing_session.holder_tx_signatures().is_none()) { if signing_session.has_local_contribution() { @@ -9354,21 +9371,27 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } else { let txid = signing_session.unsigned_tx().compute_txid(); - match channel.funding_transaction_signed(txid, vec![]) { - Ok((Some(tx_signatures), funding_tx_opt)) => { + let best_block_height = self.best_block.read().unwrap().height; + match channel.funding_transaction_signed(txid, vec![], best_block_height, &self.logger) { + Ok((tx_signatures_opt, splice_locked_opt, funding_tx_opt)) => { if let Some(funding_tx) = funding_tx_opt { - self.broadcast_interactive_funding(channel, &funding_tx); + self.broadcast_interactive_funding(channel, &funding_tx, &self.logger); } if channel.context.is_connected() { - pending_msg_events.push(MessageSendEvent::SendTxSignatures { - node_id: counterparty_node_id, - msg: tx_signatures, - }); + if let Some(tx_signatures) = tx_signatures_opt { + pending_msg_events.push(MessageSendEvent::SendTxSignatures { + node_id: counterparty_node_id, + msg: tx_signatures, + }); + } + if let Some(splice_locked) = splice_locked_opt { + pending_msg_events.push(MessageSendEvent::SendSpliceLocked { + node_id: counterparty_node_id, + msg: splice_locked, + }); + } } }, - Ok((None, _)) => { - debug_assert!(false, "If our tx_signatures is empty, then we should send it first!"); - }, Err(err) => { log_warn!(logger, "Failed signing interactive funding transaction: {err:?}"); }, @@ -10315,19 +10338,22 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ hash_map::Entry::Occupied(mut chan_entry) => { match chan_entry.get_mut().as_funded_mut() { Some(chan) => { - let (tx_signatures_opt, funding_tx_opt) = try_channel_entry!(self, peer_state, chan.tx_signatures(msg), chan_entry); + let best_block_height = self.best_block.read().unwrap().height; + let (tx_signatures_opt, splice_locked_opt, funding_tx_opt) = try_channel_entry!(self, peer_state, chan.tx_signatures(msg, best_block_height, &self.logger), chan_entry); if let Some(tx_signatures) = tx_signatures_opt { peer_state.pending_msg_events.push(MessageSendEvent::SendTxSignatures { node_id: *counterparty_node_id, msg: tx_signatures, }); } + if let Some(splice_locked) = splice_locked_opt { + peer_state.pending_msg_events.push(MessageSendEvent::SendSpliceLocked { + node_id: *counterparty_node_id, + msg: splice_locked, + }); + } if let Some(ref funding_tx) = funding_tx_opt { - self.tx_broadcaster.broadcast_transactions(&[funding_tx]); - { - let mut pending_events = self.pending_events.lock().unwrap(); - emit_channel_pending_event!(pending_events, chan); - } + self.broadcast_interactive_funding(chan, funding_tx, &self.logger); } }, None => { diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index f26ef03a74f..cf304da5923 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1549,6 +1549,14 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option, +) -> (bitcoin::Transaction, ChannelId) { + open_zero_conf_channel_with_value(initiator, receiver, initiator_config, 100_000, 10_001) +} + +// Receiver must have been initialized with manually_accept_inbound_channels set to true. +pub fn open_zero_conf_channel_with_value<'a, 'b, 'c, 'd>( + initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, + initiator_config: Option, channel_value_sat: u64, push_msat: u64, ) -> (bitcoin::Transaction, ChannelId) { let initiator_channels = initiator.node.list_usable_channels().len(); let receiver_channels = receiver.node.list_usable_channels().len(); @@ -1558,7 +1566,7 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( initiator .node - .create_channel(receiver_node_id, 100_000, 10_001, 42, None, initiator_config) + .create_channel(receiver_node_id, channel_value_sat, push_msat, 42, None, initiator_config) .unwrap(); let open_channel = get_event_msg!(initiator, MessageSendEvent::SendOpenChannel, receiver_node_id); @@ -1587,7 +1595,7 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( initiator.node.handle_accept_channel(receiver_node_id, &accept_channel); let (temporary_channel_id, tx, _) = - create_funding_transaction(&initiator, &receiver_node_id, 100_000, 42); + create_funding_transaction(&initiator, &receiver_node_id, channel_value_sat, 42); initiator .node .funding_transaction_generated(temporary_channel_id, receiver_node_id, tx.clone()) @@ -4771,11 +4779,14 @@ macro_rules! handle_chan_reestablish_msgs { None }; - if let Some(&MessageSendEvent::SendAnnouncementSignatures { ref node_id, msg: _ }) = + let mut announcement_sigs = None; // May be now or later + if let Some(&MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg }) = msg_events.get(idx) { idx += 1; assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + assert!(announcement_sigs.is_none()); + announcement_sigs = Some(msg.clone()); } let mut had_channel_update = false; // ChannelUpdate may be now or later, but not both @@ -4786,6 +4797,13 @@ macro_rules! handle_chan_reestablish_msgs { had_channel_update = true; } + let mut stfu = None; + if let Some(&MessageSendEvent::SendStfu { ref node_id, ref msg }) = msg_events.get(idx) { + idx += 1; + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + stfu = Some(msg.clone()); + } + let mut revoke_and_ack = None; let mut commitment_update = None; let order = if let Some(ev) = msg_events.get(idx) { @@ -4834,6 +4852,15 @@ macro_rules! handle_chan_reestablish_msgs { } } + let mut tx_signatures = None; + if let Some(&MessageSendEvent::SendTxSignatures { ref node_id, ref msg }) = + msg_events.get(idx) + { + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + tx_signatures = Some(msg.clone()); + idx += 1; + } + if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, .. }) = msg_events.get(idx) { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); @@ -4841,9 +4868,26 @@ macro_rules! handle_chan_reestablish_msgs { assert!(!had_channel_update); } - assert_eq!(msg_events.len(), idx); + if let Some(&MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg }) = + msg_events.get(idx) + { + idx += 1; + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + assert!(announcement_sigs.is_none()); + announcement_sigs = Some(msg.clone()); + } + + assert_eq!(msg_events.len(), idx, "{msg_events:?}"); - (channel_ready, revoke_and_ack, commitment_update, order) + ( + channel_ready, + revoke_and_ack, + commitment_update, + order, + announcement_sigs, + tx_signatures, + stfu, + ) }}; } @@ -4851,6 +4895,11 @@ pub struct ReconnectArgs<'a, 'b, 'c, 'd> { pub node_a: &'a Node<'b, 'c, 'd>, pub node_b: &'a Node<'b, 'c, 'd>, pub send_channel_ready: (bool, bool), + pub send_announcement_sigs: (bool, bool), + pub send_stfu: (bool, bool), + pub send_interactive_tx_commit_sig: (bool, bool), + pub send_interactive_tx_sigs: (bool, bool), + pub expect_renegotiated_funding_locked_monitor_update: (bool, bool), pub pending_responding_commitment_signed: (bool, bool), /// Indicates that the pending responding commitment signed will be a dup for the recipient, /// and no monitor update is expected @@ -4869,6 +4918,11 @@ impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> { node_a, node_b, send_channel_ready: (false, false), + send_announcement_sigs: (false, false), + send_stfu: (false, false), + send_interactive_tx_commit_sig: (false, false), + send_interactive_tx_sigs: (false, false), + expect_renegotiated_funding_locked_monitor_update: (false, false), pending_responding_commitment_signed: (false, false), pending_responding_commitment_signed_dup_monitor: (false, false), pending_htlc_adds: (0, 0), @@ -4888,6 +4942,11 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_a, node_b, send_channel_ready, + send_announcement_sigs, + send_stfu, + send_interactive_tx_commit_sig, + send_interactive_tx_sigs, + expect_renegotiated_funding_locked_monitor_update, pending_htlc_adds, pending_htlc_claims, pending_htlc_fails, @@ -4938,7 +4997,11 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_b.node.handle_channel_reestablish(node_a_id, &msg); resp_1.push(handle_chan_reestablish_msgs!(node_b, node_a)); } - if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 { + + if pending_cell_htlc_claims.0 != 0 + || pending_cell_htlc_fails.0 != 0 + || expect_renegotiated_funding_locked_monitor_update.1 + { check_added_monitors!(node_b, 1); } else { check_added_monitors!(node_b, 0); @@ -4949,7 +5012,10 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_a.node.handle_channel_reestablish(node_b_id, &msg); resp_2.push(handle_chan_reestablish_msgs!(node_a, node_b)); } - if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 { + if pending_cell_htlc_claims.1 != 0 + || pending_cell_htlc_fails.1 != 0 + || expect_renegotiated_funding_locked_monitor_update.0 + { check_added_monitors!(node_a, 1); } else { check_added_monitors!(node_a, 0); @@ -4969,7 +5035,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { && pending_cell_htlc_fails.1 == 0) ); - for chan_msgs in resp_1.drain(..) { + for mut chan_msgs in resp_1.drain(..) { if send_channel_ready.0 { node_a.node.handle_channel_ready(node_b_id, &chan_msgs.0.unwrap()); let announcement_event = node_a.node.get_and_clear_pending_msg_events(); @@ -4984,6 +5050,39 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } else { assert!(chan_msgs.0.is_none()); } + if send_announcement_sigs.0 { + let announcement_sigs = chan_msgs.4.take().unwrap(); + node_a.node.handle_announcement_signatures(node_b_id, &announcement_sigs); + let msg_events = node_a.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = msg_events[0] { + } else { + panic!("Unexpected event! {:?}", msg_events[0]); + } + } else { + assert!(chan_msgs.4.is_none()); + } + if send_stfu.0 { + let stfu = chan_msgs.6.take().unwrap(); + node_a.node.handle_stfu(node_b_id, &stfu); + } else { + assert!(chan_msgs.6.is_none()); + } + if send_interactive_tx_commit_sig.0 { + assert!(chan_msgs.1.is_none()); + let commitment_update = chan_msgs.2.take().unwrap(); + assert_eq!(commitment_update.commitment_signed.len(), 1); + node_a.node.handle_commitment_signed_batch_test( + node_b_id, + &commitment_update.commitment_signed, + ) + } + if send_interactive_tx_sigs.0 { + let tx_signatures = chan_msgs.5.take().unwrap(); + node_a.node.handle_tx_signatures(node_b_id, &tx_signatures); + } else { + assert!(chan_msgs.5.is_none()); + } if pending_raa.0 { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); node_a.node.handle_revoke_and_ack(node_b_id, &chan_msgs.1.unwrap()); @@ -5048,7 +5147,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } } - for chan_msgs in resp_2.drain(..) { + for mut chan_msgs in resp_2.drain(..) { if send_channel_ready.1 { node_b.node.handle_channel_ready(node_a_id, &chan_msgs.0.unwrap()); let announcement_event = node_b.node.get_and_clear_pending_msg_events(); @@ -5063,6 +5162,39 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } else { assert!(chan_msgs.0.is_none()); } + if send_announcement_sigs.1 { + let announcement_sigs = chan_msgs.4.take().unwrap(); + node_b.node.handle_announcement_signatures(node_a_id, &announcement_sigs); + let mut msg_events = node_b.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = msg_events.remove(0) { + } else { + panic!(); + } + } else { + assert!(chan_msgs.4.is_none()); + } + if send_stfu.1 { + let stfu = chan_msgs.6.take().unwrap(); + node_b.node.handle_stfu(node_a_id, &stfu); + } else { + assert!(chan_msgs.6.is_none()); + } + if send_interactive_tx_commit_sig.1 { + assert!(chan_msgs.1.is_none()); + let commitment_update = chan_msgs.2.take().unwrap(); + assert_eq!(commitment_update.commitment_signed.len(), 1); + node_b.node.handle_commitment_signed_batch_test( + node_a_id, + &commitment_update.commitment_signed, + ) + } + if send_interactive_tx_sigs.1 { + let tx_signatures = chan_msgs.5.take().unwrap(); + node_b.node.handle_tx_signatures(node_a_id, &tx_signatures); + } else { + assert!(chan_msgs.5.is_none()); + } if pending_raa.1 { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); node_b.node.handle_revoke_and_ack(node_a_id, &chan_msgs.1.unwrap()); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index aabce4a55a3..6451ce5e31e 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2588,6 +2588,7 @@ pub fn test_simple_peer_disconnect() { nodes[1].node.peer_disconnected(node_a_id); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_channel_ready = (true, true); + reconnect_args.send_announcement_sigs = (true, true); reconnect_nodes(reconnect_args); let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000).0; @@ -2745,22 +2746,29 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken // received on either side, both sides will need to resend them. let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_channel_ready = (true, true); + if simulate_broken_lnd || messages_delivered > 0 { + reconnect_args.send_announcement_sigs.0 = true; + } + reconnect_args.send_announcement_sigs.1 = true; reconnect_args.pending_htlc_adds.1 = 1; reconnect_nodes(reconnect_args); } else if messages_delivered == 3 { // nodes[0] still wants its RAA + commitment_signed let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_announcement_sigs = (true, true); reconnect_args.pending_responding_commitment_signed.0 = true; reconnect_args.pending_raa.0 = true; reconnect_nodes(reconnect_args); } else if messages_delivered == 4 { // nodes[0] still wants its commitment_signed let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_announcement_sigs.0 = true; reconnect_args.pending_responding_commitment_signed.0 = true; reconnect_nodes(reconnect_args); } else if messages_delivered == 5 { // nodes[1] still wants its final RAA let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_announcement_sigs.0 = true; reconnect_args.pending_raa.1 = true; reconnect_nodes(reconnect_args); } else if messages_delivered == 6 { @@ -2781,7 +2789,16 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + if !simulate_broken_lnd + && (messages_delivered == 0 || (messages_delivered > 2 && messages_delivered < 6)) + { + reconnect_args.send_announcement_sigs.0 = true; + } + if messages_delivered < 4 { + reconnect_args.send_announcement_sigs.1 = true; + } + reconnect_nodes(reconnect_args); nodes[1].node.process_pending_htlc_forwards(); @@ -2879,6 +2896,10 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken nodes[1].node.peer_disconnected(node_a_id); if messages_delivered < 2 { let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + if !simulate_broken_lnd && messages_delivered == 0 { + reconnect_args.send_announcement_sigs.0 = true; + } + reconnect_args.send_announcement_sigs.1 = true; reconnect_args.pending_htlc_claims.0 = 1; reconnect_nodes(reconnect_args); if messages_delivered < 1 { @@ -2889,12 +2910,14 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken } else if messages_delivered == 2 { // nodes[0] still wants its RAA + commitment_signed let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_announcement_sigs.1 = true; reconnect_args.pending_responding_commitment_signed.1 = true; reconnect_args.pending_raa.1 = true; reconnect_nodes(reconnect_args); } else if messages_delivered == 3 { // nodes[0] still wants its commitment_signed let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_announcement_sigs.1 = true; reconnect_args.pending_responding_commitment_signed.1 = true; reconnect_nodes(reconnect_args); } else if messages_delivered == 4 { @@ -2914,7 +2937,15 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); } - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + if !simulate_broken_lnd { + if messages_delivered == 0 { + reconnect_args.send_announcement_sigs.0 = true; + } else if messages_delivered == 2 || messages_delivered == 3 { + reconnect_args.send_announcement_sigs.1 = true; + } + } + reconnect_nodes(reconnect_args); if messages_delivered > 2 { expect_payment_path_successful!(nodes[0]); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index d2479bbb0e5..9eb85173a83 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -4858,6 +4858,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { } let mut reconnect_args = ReconnectArgs::new(&nodes[2], &nodes[3]); reconnect_args.send_channel_ready = (true, true); + reconnect_args.send_announcement_sigs = (true, true); reconnect_nodes(reconnect_args); // Create a new channel between C and D as A will refuse to retry on the existing one because diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 9c99337dea3..64e87a72997 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -333,6 +333,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ // generate an error message we can handle below. let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_channel_ready = (true, true); + reconnect_args.send_announcement_sigs = (true, true); reconnect_nodes(reconnect_args); } } diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index 1537a36cc8b..c3a1e2ca41d 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -10,6 +10,7 @@ use crate::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::transaction::OutPoint; +use crate::chain::ChannelMonitorUpdateStatus; use crate::events::bump_transaction::sync::WalletSourceSync; use crate::events::{ClosureReason, Event, FundingInfo, HTLCHandlingFailureType}; use crate::ln::chan_utils; @@ -19,7 +20,9 @@ use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::ln::types::ChannelId; use crate::util::errors::APIError; +use crate::util::ser::Writeable; +use bitcoin::secp256k1::PublicKey; use bitcoin::{Amount, OutPoint as BitcoinOutPoint, ScriptBuf, Transaction, TxOut}; #[test] @@ -62,6 +65,65 @@ fn test_v1_splice_in_negative_insufficient_inputs() { } } +fn negotiate_splice_tx_with_init<'a, 'b, 'c, 'd>( + initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, + initiator_contribution: SpliceContribution, splice_init: &msgs::SpliceInit, +) -> msgs::CommitmentSigned { + let node_id_initiator = initiator.node.get_our_node_id(); + let node_id_acceptor = acceptor.node.get_our_node_id(); + + acceptor.node.handle_splice_init(node_id_initiator, &splice_init); + let splice_ack = get_event_msg!(acceptor, MessageSendEvent::SendSpliceAck, node_id_initiator); + initiator.node.handle_splice_ack(node_id_acceptor, &splice_ack); + + let new_funding_script = chan_utils::make_funding_redeemscript( + &splice_init.funding_pubkey, + &splice_ack.funding_pubkey, + ) + .to_p2wsh(); + + complete_interactive_funding_negotiation( + initiator, + acceptor, + channel_id, + initiator_contribution, + new_funding_script, + ) +} + +fn negotiate_splice_tx<'a, 'b, 'c, 'd>( + initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, + initiator_contribution: SpliceContribution, +) -> msgs::CommitmentSigned { + let node_id_initiator = initiator.node.get_our_node_id(); + let node_id_acceptor = acceptor.node.get_our_node_id(); + + initiator + .node + .splice_channel( + &channel_id, + &node_id_acceptor, + initiator_contribution.clone(), + FEERATE_FLOOR_SATS_PER_KW, + None, + ) + .unwrap(); + + let stfu_init = get_event_msg!(initiator, MessageSendEvent::SendStfu, node_id_acceptor); + acceptor.node.handle_stfu(node_id_initiator, &stfu_init); + let stfu_ack = get_event_msg!(acceptor, MessageSendEvent::SendStfu, node_id_initiator); + initiator.node.handle_stfu(node_id_acceptor, &stfu_ack); + + let splice_init = get_event_msg!(initiator, MessageSendEvent::SendSpliceInit, node_id_acceptor); + negotiate_splice_tx_with_init( + initiator, + acceptor, + channel_id, + initiator_contribution, + &splice_init, + ) +} + fn complete_interactive_funding_negotiation<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, initiator_contribution: SpliceContribution, new_funding_script: ScriptBuf, @@ -151,25 +213,25 @@ fn complete_interactive_funding_negotiation<'a, 'b, 'c, 'd>( } } -fn sign_interactive_funding_transaction<'a, 'b, 'c, 'd>( +fn sign_interactive_funding_tx<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, - initial_commit_sig_for_acceptor: msgs::CommitmentSigned, -) { + initial_commit_sig_for_acceptor: msgs::CommitmentSigned, is_0conf: bool, +) -> (Transaction, Option<(msgs::SpliceLocked, PublicKey)>) { let node_id_initiator = initiator.node.get_our_node_id(); let node_id_acceptor = acceptor.node.get_our_node_id(); assert!(initiator.node.get_and_clear_pending_msg_events().is_empty()); acceptor.node.handle_commitment_signed(node_id_initiator, &initial_commit_sig_for_acceptor); - let mut msg_events = acceptor.node.get_and_clear_pending_msg_events(); + let msg_events = acceptor.node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 2, "{msg_events:?}"); - if let MessageSendEvent::UpdateHTLCs { mut updates, .. } = msg_events.remove(0) { - let commitment_signed = updates.commitment_signed.remove(0); - initiator.node.handle_commitment_signed(node_id_acceptor, &commitment_signed); + if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = &msg_events[0] { + let commitment_signed = &updates.commitment_signed[0]; + initiator.node.handle_commitment_signed(node_id_acceptor, commitment_signed); } else { panic!(); } - if let MessageSendEvent::SendTxSignatures { ref msg, .. } = msg_events.remove(0) { + if let MessageSendEvent::SendTxSignatures { ref msg, .. } = &msg_events[1] { initiator.node.handle_tx_signatures(node_id_acceptor, msg); } else { panic!(); @@ -189,98 +251,89 @@ fn sign_interactive_funding_transaction<'a, 'b, 'c, 'd>( .funding_transaction_signed(&channel_id, &counterparty_node_id, partially_signed_tx) .unwrap(); } - let tx_signatures = - get_event_msg!(initiator, MessageSendEvent::SendTxSignatures, node_id_acceptor); - acceptor.node.handle_tx_signatures(node_id_initiator, &tx_signatures); + let mut msg_events = initiator.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), if is_0conf { 2 } else { 1 }, "{msg_events:?}"); + if let MessageSendEvent::SendTxSignatures { ref msg, .. } = &msg_events[0] { + acceptor.node.handle_tx_signatures(node_id_initiator, msg); + } else { + panic!(); + } + let splice_locked = if is_0conf { + if let MessageSendEvent::SendSpliceLocked { msg, .. } = msg_events.remove(1) { + Some((msg, node_id_acceptor)) + } else { + panic!(); + } + } else { + None + }; check_added_monitors(&initiator, 1); check_added_monitors(&acceptor, 1); -} - -fn splice_channel<'a, 'b, 'c, 'd>( - initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, - initiator_contribution: SpliceContribution, -) -> Transaction { - let node_id_initiator = initiator.node.get_our_node_id(); - let node_id_acceptor = acceptor.node.get_our_node_id(); - - initiator - .node - .splice_channel( - &channel_id, - &node_id_acceptor, - initiator_contribution.clone(), - FEERATE_FLOOR_SATS_PER_KW, - None, - ) - .unwrap(); - let stfu_init = get_event_msg!(initiator, MessageSendEvent::SendStfu, node_id_acceptor); - acceptor.node.handle_stfu(node_id_initiator, &stfu_init); - let stfu_ack = get_event_msg!(acceptor, MessageSendEvent::SendStfu, node_id_initiator); - initiator.node.handle_stfu(node_id_acceptor, &stfu_ack); - - let splice_init = get_event_msg!(initiator, MessageSendEvent::SendSpliceInit, node_id_acceptor); - acceptor.node.handle_splice_init(node_id_initiator, &splice_init); - let splice_ack = get_event_msg!(acceptor, MessageSendEvent::SendSpliceAck, node_id_initiator); - initiator.node.handle_splice_ack(node_id_acceptor, &splice_ack); - - let new_funding_script = chan_utils::make_funding_redeemscript( - &splice_init.funding_pubkey, - &splice_ack.funding_pubkey, - ) - .to_p2wsh(); - - let initial_commit_sig_for_acceptor = complete_interactive_funding_negotiation( - initiator, - acceptor, - channel_id, - initiator_contribution, - new_funding_script, - ); - sign_interactive_funding_transaction(initiator, acceptor, initial_commit_sig_for_acceptor); - - let splice_tx = { + let tx = { let mut initiator_txn = initiator.tx_broadcaster.txn_broadcast(); assert_eq!(initiator_txn.len(), 1); let acceptor_txn = acceptor.tx_broadcaster.txn_broadcast(); - assert_eq!(initiator_txn, acceptor_txn); + assert_eq!(initiator_txn, acceptor_txn,); initiator_txn.remove(0) }; + (tx, splice_locked) +} + +fn splice_channel<'a, 'b, 'c, 'd>( + initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, + initiator_contribution: SpliceContribution, +) -> Transaction { + let initial_commit_sig_for_acceptor = + negotiate_splice_tx(initiator, acceptor, channel_id, initiator_contribution); + let (splice_tx, splice_locked) = + sign_interactive_funding_tx(initiator, acceptor, initial_commit_sig_for_acceptor, false); + assert!(splice_locked.is_none()); splice_tx } fn lock_splice_after_blocks<'a, 'b, 'c, 'd>( - node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, - num_blocks: u32, + node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, num_blocks: u32, +) { + connect_blocks(node_a, num_blocks); + connect_blocks(node_b, num_blocks); + + let node_id_b = node_b.node.get_our_node_id(); + let splice_locked_for_node_b = + get_event_msg!(node_a, MessageSendEvent::SendSpliceLocked, node_id_b); + lock_splice(node_a, node_b, &splice_locked_for_node_b, false); +} + +fn lock_splice<'a, 'b, 'c, 'd>( + node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, + splice_locked_for_node_b: &msgs::SpliceLocked, is_0conf: bool, ) { let (prev_funding_outpoint, prev_funding_script) = node_a .chain_monitor .chain_monitor - .get_monitor(channel_id) + .get_monitor(splice_locked_for_node_b.channel_id) .map(|monitor| (monitor.get_funding_txo(), monitor.get_funding_script())) .unwrap(); - connect_blocks(node_a, num_blocks); - connect_blocks(node_b, num_blocks); - let node_id_a = node_a.node.get_our_node_id(); let node_id_b = node_b.node.get_our_node_id(); - let splice_locked_a = get_event_msg!(node_a, MessageSendEvent::SendSpliceLocked, node_id_b); - node_b.node.handle_splice_locked(node_id_a, &splice_locked_a); + node_b.node.handle_splice_locked(node_id_a, splice_locked_for_node_b); let mut msg_events = node_b.node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 2, "{msg_events:?}"); + assert_eq!(msg_events.len(), if is_0conf { 1 } else { 2 }, "{msg_events:?}"); if let MessageSendEvent::SendSpliceLocked { msg, .. } = msg_events.remove(0) { node_a.node.handle_splice_locked(node_id_b, &msg); } else { panic!(); } - if let MessageSendEvent::SendAnnouncementSignatures { msg, .. } = msg_events.remove(0) { - node_a.node.handle_announcement_signatures(node_id_b, &msg); - } else { - panic!(); + if !is_0conf { + if let MessageSendEvent::SendAnnouncementSignatures { msg, .. } = msg_events.remove(0) { + node_a.node.handle_announcement_signatures(node_id_b, &msg); + } else { + panic!(); + } } expect_channel_ready_event(&node_a, &node_id_b); @@ -288,23 +341,25 @@ fn lock_splice_after_blocks<'a, 'b, 'c, 'd>( expect_channel_ready_event(&node_b, &node_id_a); check_added_monitors(&node_b, 1); - let mut msg_events = node_a.node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 2, "{msg_events:?}"); - if let MessageSendEvent::SendAnnouncementSignatures { msg, .. } = msg_events.remove(0) { - node_b.node.handle_announcement_signatures(node_id_a, &msg); - } else { - panic!(); - } - if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = msg_events.remove(0) { - } else { - panic!(); - } + if !is_0conf { + let mut msg_events = node_a.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2, "{msg_events:?}"); + if let MessageSendEvent::SendAnnouncementSignatures { msg, .. } = msg_events.remove(0) { + node_b.node.handle_announcement_signatures(node_id_a, &msg); + } else { + panic!(); + } + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = msg_events.remove(0) { + } else { + panic!(); + } - let mut msg_events = node_b.node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1, "{msg_events:?}"); - if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = msg_events.remove(0) { - } else { - panic!(); + let mut msg_events = node_b.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = msg_events.remove(0) { + } else { + panic!(); + } } // Remove the corresponding outputs and transactions the chain source is watching for the @@ -349,7 +404,7 @@ fn test_splice_in() { assert!(htlc_limit_msat < initial_channel_value_sat * 1000); let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); - lock_splice_after_blocks(&nodes[0], &nodes[1], channel_id, ANTI_REORG_DELAY - 1); + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; assert!(htlc_limit_msat > initial_channel_value_sat); @@ -392,7 +447,7 @@ fn test_splice_out() { assert!(htlc_limit_msat < initial_channel_value_sat / 2 * 1000); let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); - lock_splice_after_blocks(&nodes[0], &nodes[1], channel_id, ANTI_REORG_DELAY - 1); + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; assert!(htlc_limit_msat < initial_channel_value_sat / 2 * 1000); @@ -452,7 +507,7 @@ fn do_test_splice_commitment_broadcast(splice_status: SpliceStatus, claim_htlcs: mine_transaction(&nodes[1], &splice_tx); } if splice_status == SpliceStatus::Locked { - lock_splice_after_blocks(&nodes[0], &nodes[1], channel_id, ANTI_REORG_DELAY - 1); + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); } if claim_htlcs { @@ -602,3 +657,553 @@ fn do_test_splice_commitment_broadcast(splice_status: SpliceStatus, claim_htlcs: } } } + +#[test] +fn test_splice_reestablish() { + do_test_splice_reestablish(false, false); + do_test_splice_reestablish(false, true); + do_test_splice_reestablish(true, false); + do_test_splice_reestablish(true, true); +} + +fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { + // Test that we're able to reestablish the channel succesfully throughout the lifecycle of a splice. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let (persister_0a, persister_0b, persister_1a, persister_1b); + let (chain_monitor_0a, chain_monitor_0b, chain_monitor_1a, chain_monitor_1b); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let (node_0a, node_0b, node_1a, node_1b); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_id_0 = nodes[0].node.get_our_node_id(); + let node_id_1 = nodes[1].node.get_our_node_id(); + + let initial_channel_value_sat = 100_000; + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, initial_channel_value_sat, 0); + + let prev_funding_outpoint = get_monitor!(nodes[0], channel_id).get_funding_txo(); + let prev_funding_script = get_monitor!(nodes[0], channel_id).get_funding_script(); + + // Keep a pending HTLC throughout the reestablish flow to make sure we can handle them. + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + + // Negotiate the splice up until the nodes exchange `tx_complete`. + let initiator_contribution = SpliceContribution::SpliceOut { + outputs: vec![ + TxOut { + value: Amount::from_sat(initial_channel_value_sat / 4), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: Amount::from_sat(initial_channel_value_sat / 4), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ], + }; + let initial_commit_sig_for_acceptor = + negotiate_splice_tx(&nodes[0], &nodes[1], channel_id, initiator_contribution); + assert_eq!(initial_commit_sig_for_acceptor.htlc_signatures.len(), 1); + let initial_commit_sig_for_initiator = get_htlc_update_msgs!(&nodes[1], node_id_0); + assert_eq!(initial_commit_sig_for_initiator.commitment_signed.len(), 1); + assert_eq!(initial_commit_sig_for_initiator.commitment_signed[0].htlc_signatures.len(), 1); + + macro_rules! reconnect_nodes { + ($f: expr) => { + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + $f(&mut reconnect_args); + reconnect_nodes(reconnect_args); + }; + } + + // Reestablishing now should force both nodes to retransmit their initial `commitment_signed` + // message as they were never delivered. + if reload { + let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); + reload_node!( + nodes[0], + nodes[0].node.encode(), + &[&encoded_monitor_0], + persister_0a, + chain_monitor_0a, + node_0a + ); + let encoded_monitor_1 = get_monitor!(nodes[1], channel_id).encode(); + reload_node!( + nodes[1], + nodes[1].node.encode(), + &[&encoded_monitor_1], + persister_1a, + chain_monitor_1a, + node_1a + ); + if async_monitor_update { + persister_0a.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + persister_1a.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + } + } else { + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + if async_monitor_update { + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + } + } + + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_interactive_tx_commit_sig = (true, true); + reconnect_nodes(reconnect_args); + + // The `commitment_signed` messages were delivered in the reestablishment, so we should expect + // to see a `RenegotiatedFunding` monitor update on both nodes. + check_added_monitors(&nodes[0], 1); + check_added_monitors(&nodes[1], 1); + + if async_monitor_update { + // Reconnecting again should result in no messages/events being generated as the monitor + // update is pending. + reconnect_nodes!(|_| {}); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].chain_monitor.complete_sole_pending_chan_update(&channel_id); + nodes[1].chain_monitor.complete_sole_pending_chan_update(&channel_id); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); + } + + // Node 0 should have a signing event to handle since they had a contribution in the splice. + // Node 1 won't and will immediately send `tx_signatures`. + let _ = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + let _ = get_event_msg!(nodes[1], MessageSendEvent::SendTxSignatures, node_id_0); + + // Reconnecting now should force node 1 to retransmit their `tx_signatures` since it was never + // delivered. Node 0 still hasn't called back with `funding_transaction_signed`, so its + // `tx_signatures` is not ready yet. + reconnect_nodes!(|reconnect_args: &mut ReconnectArgs| { + reconnect_args.send_interactive_tx_sigs = (true, false); + }); + let _ = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); + + // Reconnect again to make sure node 1 doesn't retransmit `tx_signatures` unnecessarily as it + // was delivered in the previous reestablishment. + reconnect_nodes!(|_| {}); + + // Have node 0 sign, we should see its `tx_signatures` go out. + let event = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); + if let Event::FundingTransactionReadyForSigning { unsigned_transaction, .. } = event { + let tx = nodes[0].wallet_source.sign_tx(unsigned_transaction).unwrap(); + nodes[0].node.funding_transaction_signed(&channel_id, &node_id_1, tx).unwrap(); + } + let _ = get_event_msg!(nodes[0], MessageSendEvent::SendTxSignatures, node_id_1); + + // Reconnect to make sure node 0 retransmits its `tx_signatures` as it was never delivered. + reconnect_nodes!(|reconnect_args: &mut ReconnectArgs| { + reconnect_args.send_interactive_tx_sigs = (false, true); + }); + + // Reestablish the channel again to make sure node 0 doesn't retransmit `tx_signatures` + // unnecessarily as it was delivered in the previous reestablishment. + if reload { + let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); + reload_node!( + nodes[0], + nodes[0].node.encode(), + &[&encoded_monitor_0], + persister_0b, + chain_monitor_0b, + node_0b + ); + let encoded_monitor_1 = get_monitor!(nodes[1], channel_id).encode(); + reload_node!( + nodes[1], + nodes[1].node.encode(), + &[&encoded_monitor_1], + persister_1b, + chain_monitor_1b, + node_1b + ); + } else { + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + } + reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + + // The channel should no longer be quiescent with `tx_signatures` exchanged. We should expect to + // see the splice transaction broadcast. + let splice_tx = { + let mut txn_0 = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn_0.len(), 1); + let txn_1 = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(txn_0, txn_1); + txn_0.remove(0) + }; + + // Make sure we can still send payments. + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); + + // Lock in the splice on node 0. We should see its `splice_locked` sent. + confirm_transaction(&nodes[0], &splice_tx); + let _ = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceLocked, node_id_1); + + // Confirm the splice but with one less confirmation than required on node 1. Its + // `splice_locked` should no be sent yet. + mine_transaction(&nodes[1], &splice_tx); + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Reconnect the nodes. Node 1 should assume node 0's `splice_locked` via + // `ChannelReestablish::my_current_funding_locked`. + reconnect_nodes!(|_| {}); + + if async_monitor_update { + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + } + + // Mine the remaining block on node 1 for the splice to be locked. Since `splice_locked` has now + // been exchanged on node 1, we should see its `announcement_signatures` sent as well, and the + // `RenegotiatedFundingLocked` monitor update. + connect_blocks(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); + let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2, "{msg_events:?}"); + if let MessageSendEvent::SendSpliceLocked { .. } = msg_events.remove(0) { + } else { + panic!() + } + if let MessageSendEvent::SendAnnouncementSignatures { .. } = msg_events.remove(0) { + } else { + panic!() + } + expect_channel_ready_event(&nodes[1], &node_id_0); + + // Reconnect the nodes to ensure node 1 retransmits its `splice_locked` (implicitly via + // `my_current_funding_locked`) and `announcement_signatures` to node 0. + reconnect_nodes!(|reconnect_args: &mut ReconnectArgs| { + reconnect_args.expect_renegotiated_funding_locked_monitor_update = (true, false); + reconnect_args.send_announcement_sigs = (true, true); + }); + expect_channel_ready_event(&nodes[0], &node_id_1); + + if async_monitor_update { + nodes[0].chain_monitor.complete_sole_pending_chan_update(&channel_id); + nodes[1].chain_monitor.complete_sole_pending_chan_update(&channel_id); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); + } + + // We shouldn't have any further events or messages to process. + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Make sure we can still send payments. + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); + + // Remove the previous funding info the chain source was watching to avoid failing the + // end-of-test sanity checks. + nodes[0] + .chain_source + .remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script.clone()); + nodes[1] + .chain_source + .remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script); +} + +#[test] +fn test_propose_splice_while_disconnected() { + do_test_propose_splice_while_disconnected(false, false); + do_test_propose_splice_while_disconnected(false, true); + do_test_propose_splice_while_disconnected(true, false); + do_test_propose_splice_while_disconnected(true, true); +} + +fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { + // Test that both nodes are able to propose a splice while the counterparty is disconnected, and + // whoever doesn't go first due to the quiescence tie-breaker, will retry their splice after the + // first one becomes locked. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let (persister_0a, persister_0b, persister_1a, persister_1b); + let (chain_monitor_0a, chain_monitor_0b, chain_monitor_1a, chain_monitor_1b); + let mut config = test_default_channel_config(); + if use_0conf { + config.manually_accept_inbound_channels = true; + config.channel_handshake_limits.trust_own_funding_0conf = true; + } + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let (node_0a, node_0b, node_1a, node_1b); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_id_0 = nodes[0].node.get_our_node_id(); + let node_id_1 = nodes[1].node.get_our_node_id(); + + let initial_channel_value_sat = 1_000_000; + let push_msat = initial_channel_value_sat / 2 * 1000; + let channel_id = if use_0conf { + let (funding_tx, channel_id) = open_zero_conf_channel_with_value( + &nodes[0], + &nodes[1], + None, + initial_channel_value_sat, + push_msat, + ); + mine_transaction(&nodes[0], &funding_tx); + mine_transaction(&nodes[1], &funding_tx); + channel_id + } else { + let (_, _, channel_id, _) = create_announced_chan_between_nodes_with_value( + &nodes, + 0, + 1, + initial_channel_value_sat, + push_msat, + ); + channel_id + }; + + // Start with the nodes disconnected, and have each one attempt a splice. + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + + let splice_out_sat = initial_channel_value_sat / 4; + let node_0_contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(splice_out_sat), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }], + }; + nodes[0] + .node + .splice_channel( + &channel_id, + &node_id_1, + node_0_contribution.clone(), + FEERATE_FLOOR_SATS_PER_KW, + None, + ) + .unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + let node_1_contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(splice_out_sat), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }], + }; + nodes[1] + .node + .splice_channel( + &channel_id, + &node_id_0, + node_1_contribution.clone(), + FEERATE_FLOOR_SATS_PER_KW, + None, + ) + .unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + if reload { + let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); + reload_node!( + nodes[0], + nodes[0].node.encode(), + &[&encoded_monitor_0], + persister_0a, + chain_monitor_0a, + node_0a + ); + let encoded_monitor_1 = get_monitor!(nodes[1], channel_id).encode(); + reload_node!( + nodes[1], + nodes[1].node.encode(), + &[&encoded_monitor_1], + persister_1a, + chain_monitor_1a, + node_1a + ); + } + + // Reconnect the nodes. Both nodes should attempt quiescence as the initiator, but only one will + // be it via the tie-breaker. + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_channel_ready = (true, true); + if !use_0conf { + reconnect_args.send_announcement_sigs = (true, true); + } + reconnect_args.send_stfu = (true, true); + reconnect_nodes(reconnect_args); + let splice_init = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceInit, node_id_1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let (prev_funding_outpoint, prev_funding_script) = nodes[0] + .chain_monitor + .chain_monitor + .get_monitor(channel_id) + .map(|monitor| (monitor.get_funding_txo(), monitor.get_funding_script())) + .unwrap(); + + // Negotiate the first splice to completion. + let initial_commit_sig = negotiate_splice_tx_with_init( + &nodes[0], + &nodes[1], + channel_id, + node_0_contribution, + &splice_init, + ); + let (splice_tx, splice_locked) = + sign_interactive_funding_tx(&nodes[0], &nodes[1], initial_commit_sig, use_0conf); + + let splice_locked = if use_0conf { + let (splice_locked, for_node_id) = splice_locked.unwrap(); + assert_eq!(for_node_id, node_id_1); + splice_locked + } else { + assert!(splice_locked.is_none()); + + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + + // Mine enough blocks for the first splice to become locked. + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + + get_event_msg!(nodes[0], MessageSendEvent::SendSpliceLocked, node_id_1) + }; + nodes[1].node.handle_splice_locked(node_id_0, &splice_locked); + + // We should see the node which lost the tie-breaker attempt their splice now by first + // negotiating quiescence, but their `stfu` won't be sent until after another reconnection. + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), if use_0conf { 2 } else { 3 }, "{msg_events:?}"); + if let MessageSendEvent::SendSpliceLocked { ref msg, .. } = &msg_events[0] { + nodes[0].node.handle_splice_locked(node_id_1, msg); + if use_0conf { + // TODO(splicing): Revisit splice transaction rebroadcasts. + let txn_0 = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn_0.len(), 1); + assert_eq!(&txn_0[0], &splice_tx); + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + } + } else { + panic!("Unexpected event {:?}", &msg_events[0]); + } + if !use_0conf { + if let MessageSendEvent::SendAnnouncementSignatures { ref msg, .. } = &msg_events[1] { + nodes[0].node.handle_announcement_signatures(node_id_1, msg); + } else { + panic!("Unexpected event {:?}", &msg_events[1]); + } + } + assert!(matches!( + &msg_events[if use_0conf { 1 } else { 2 }], + MessageSendEvent::SendStfu { .. } + )); + + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), if use_0conf { 0 } else { 2 }, "{msg_events:?}"); + if !use_0conf { + if let MessageSendEvent::SendAnnouncementSignatures { ref msg, .. } = &msg_events[0] { + nodes[1].node.handle_announcement_signatures(node_id_0, msg); + } else { + panic!("Unexpected event {:?}", &msg_events[1]); + } + assert!(matches!(&msg_events[1], MessageSendEvent::BroadcastChannelAnnouncement { .. })); + } + + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), if use_0conf { 0 } else { 1 }, "{msg_events:?}"); + if !use_0conf { + assert!(matches!(&msg_events[0], MessageSendEvent::BroadcastChannelAnnouncement { .. })); + } + + expect_channel_ready_event(&nodes[0], &node_id_1); + check_added_monitors(&nodes[0], 1); + expect_channel_ready_event(&nodes[1], &node_id_0); + check_added_monitors(&nodes[1], 1); + + // Remove the corresponding outputs and transactions the chain source is watching for the + // old funding as it is no longer being tracked. + nodes[0] + .chain_source + .remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script.clone()); + nodes[1] + .chain_source + .remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script); + + // Reconnect the nodes. This should trigger the node which lost the tie-breaker to resend `stfu` + // for their splice attempt. + if reload { + let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); + reload_node!( + nodes[0], + nodes[0].node.encode(), + &[&encoded_monitor_0], + persister_0b, + chain_monitor_0b, + node_0b + ); + let encoded_monitor_1 = get_monitor!(nodes[1], channel_id).encode(); + reload_node!( + nodes[1], + nodes[1].node.encode(), + &[&encoded_monitor_1], + persister_1b, + chain_monitor_1b, + node_1b + ); + } else { + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + } + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_channel_ready = (true, true); + if !use_0conf { + reconnect_args.send_announcement_sigs = (true, true); + } + reconnect_args.send_stfu = (true, false); + reconnect_nodes(reconnect_args); + + // Drive the second splice to completion. + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + if let MessageSendEvent::SendStfu { ref msg, .. } = msg_events[0] { + nodes[1].node.handle_stfu(node_id_0, msg); + } else { + panic!("Unexpected event {:?}", &msg_events[0]); + } + + let splice_init = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceInit, node_id_0); + let initial_commit_sig = negotiate_splice_tx_with_init( + &nodes[1], + &nodes[0], + channel_id, + node_1_contribution, + &splice_init, + ); + let (splice_tx, splice_locked) = + sign_interactive_funding_tx(&nodes[1], &nodes[0], initial_commit_sig, use_0conf); + + if use_0conf { + let (splice_locked, for_node_id) = splice_locked.unwrap(); + assert_eq!(for_node_id, node_id_0); + lock_splice(&nodes[1], &nodes[0], &splice_locked, true); + } else { + assert!(splice_locked.is_none()); + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + lock_splice_after_blocks(&nodes[1], &nodes[0], ANTI_REORG_DELAY - 1); + } + + // Sanity check that we can still make a test payment. + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); +}