From 1ade599d4f6dcf0a1f2fc6bb487f3408c7dae8f7 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 19 Jun 2025 17:16:07 -0500 Subject: [PATCH 01/11] Add features for splicing While splicing is not yet fully supported, checking if the feature has been negotiated is needed for changes to the channel_reestablish logic. --- lightning-types/src/features.rs | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/lightning-types/src/features.rs b/lightning-types/src/features.rs index aca4bb6e5a9..79f6fa0ce92 100644 --- a/lightning-types/src/features.rs +++ b/lightning-types/src/features.rs @@ -80,6 +80,8 @@ //! (see [BOLT-2](https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#channel-quiescence) for more information). //! - `ZeroFeeCommitments` - A channel type which always uses zero transaction fee on commitment transactions. //! (see [BOLT PR #1228](https://github.com/lightning/bolts/pull/1228) for more info). +//! - `Splice` - Allows replacing the funding transaction with a new one +//! (see [BOLT PR #1160](https://github.com/lightning/bolts/pull/1160) for more information). //! //! LDK knows about the following features, but does not support them: //! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be @@ -163,7 +165,7 @@ mod sealed { // Byte 6 ZeroConf, // Byte 7 - Trampoline | SimpleClose, + Trampoline | SimpleClose | Splice, ] ); define_context!( @@ -184,7 +186,7 @@ mod sealed { // Byte 6 ZeroConf | Keysend, // Byte 7 - Trampoline | SimpleClose, + Trampoline | SimpleClose | Splice, // Byte 8 - 31 ,,,,,,,,,,,,,,,,,,,,,,,, // Byte 32 @@ -673,9 +675,20 @@ mod sealed { supports_simple_close, requires_simple_close ); - // By default, allocate enough bytes to cover up to SimpleClose. Update this as new features are + define_feature!( + 63, + Splice, + [InitContext, NodeContext], + "Feature flags for channel splicing.", + set_splicing_optional, + set_splicing_required, + clear_splicing, + supports_splicing, + requires_splicing + ); + // By default, allocate enough bytes to cover up to Splice. Update this as new features are // added which we expect to appear commonly across contexts. - pub(super) const MIN_FEATURES_ALLOCATION_BYTES: usize = (61 + 7) / 8; + pub(super) const MIN_FEATURES_ALLOCATION_BYTES: usize = (63 + 7) / 8; define_feature!( 259, DnsResolver, @@ -1369,6 +1382,7 @@ mod tests { init_features.set_zero_conf_optional(); init_features.set_quiescence_optional(); init_features.set_simple_close_optional(); + init_features.set_splicing_optional(); assert!(init_features.initial_routing_sync()); assert!(!init_features.supports_upfront_shutdown_script()); @@ -1384,7 +1398,7 @@ mod tests { // - onion_messages // - option_channel_type | option_scid_alias // - option_zeroconf - // - option_simple_close + // - option_simple_close | option_splice assert_eq!(node_features.flags.len(), 8); assert_eq!(node_features.flags[0], 0b00000001); assert_eq!(node_features.flags[1], 0b01010001); @@ -1393,7 +1407,7 @@ mod tests { assert_eq!(node_features.flags[4], 0b10001000); assert_eq!(node_features.flags[5], 0b10100000); assert_eq!(node_features.flags[6], 0b00001000); - assert_eq!(node_features.flags[7], 0b00100000); + assert_eq!(node_features.flags[7], 0b10100000); } // Check that cleared flags are kept blank when converting back: From 39ae79ead4049767ee62bafa6ff64d54eb78a89b Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 5 Jun 2025 15:27:36 -0500 Subject: [PATCH 02/11] Add funding_locked_txid TLVs to channel_reestablish The splicing spec extends the channel_reestablish message with two more TLVs indicating which funding txid the sender has sent/received either explicitly via splice_locked or implicitly via channel_ready. This allows peers to detect if a splice_locked was lost during disconnection and must be retransmitted. This commit updates channel_reestablish with the TLVs. Subsequent commits will implement the spec requirements. --- lightning/src/ln/channel.rs | 2 + lightning/src/ln/channelmanager.rs | 2 + lightning/src/ln/msgs.rs | 83 ++++++++++++++++++++++++++++++ 3 files changed, 87 insertions(+) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index b516961d100..8aa916cc9af 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -10201,6 +10201,8 @@ where your_last_per_commitment_secret: remote_last_secret, my_current_per_commitment_point: dummy_pubkey, next_funding_txid: self.maybe_get_next_funding_txid(), + your_last_funding_locked_txid: None, + my_current_funding_locked_txid: None, } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9e7031bf8cb..db6c21b8036 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10091,6 +10091,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ your_last_per_commitment_secret: [1u8; 32], my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(), next_funding_txid: None, + your_last_funding_locked_txid: None, + my_current_funding_locked_txid: None, }, }); return Err(MsgHandleErrInternal::send_err_msg_no_close( diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index c3913d31e9c..6129b72bfd5 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -928,6 +928,16 @@ pub struct ChannelReestablish { /// * `channel_reestablish`-sending node: https:///github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L2466-L2470 /// * `channel_reestablish`-receiving node: https:///github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L2520-L2531 pub next_funding_txid: Option, + /// The last funding txid received by the sending node, which may be: + /// - the txid of the last `splice_locked` it received, otherwise + /// - the txid of the funding transaction if it received `channel_ready`, or else + /// - `None` if it has never received `channel_ready` or `splice_locked` + pub your_last_funding_locked_txid: Option, + /// The last funding txid sent by the sending node, which may be: + /// - the txid of the last `splice_locked` it sent, otherwise + /// - the txid of the funding transaction if it sent `channel_ready`, or else + /// - `None` if it has never sent `channel_ready` or `splice_locked` + pub my_current_funding_locked_txid: Option, } /// An [`announcement_signatures`] message to be sent to or received from a peer. @@ -2805,6 +2815,8 @@ impl_writeable_msg!(ChannelReestablish, { my_current_per_commitment_point, }, { (0, next_funding_txid, option), + (1, your_last_funding_locked_txid, option), + (3, my_current_funding_locked_txid, option), }); impl_writeable_msg!(ClosingSigned, @@ -4275,6 +4287,8 @@ mod tests { your_last_per_commitment_secret: [9; 32], my_current_per_commitment_point: public_key, next_funding_txid: None, + your_last_funding_locked_txid: None, + my_current_funding_locked_txid: None, }; let encoded_value = cr.encode(); @@ -4326,6 +4340,8 @@ mod tests { ]) .unwrap(), )), + your_last_funding_locked_txid: None, + my_current_funding_locked_txid: None, }; let encoded_value = cr.encode(); @@ -4349,6 +4365,73 @@ mod tests { ); } + #[test] + fn encoding_channel_reestablish_with_funding_locked_txid() { + let public_key = { + let secp_ctx = Secp256k1::new(); + PublicKey::from_secret_key( + &secp_ctx, + &SecretKey::from_slice( + &>::from_hex( + "0101010101010101010101010101010101010101010101010101010101010101", + ) + .unwrap()[..], + ) + .unwrap(), + ) + }; + + let cr = msgs::ChannelReestablish { + channel_id: ChannelId::from_bytes([ + 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, + 0, 0, 0, 0, + ]), + next_local_commitment_number: 3, + next_remote_commitment_number: 4, + your_last_per_commitment_secret: [9; 32], + my_current_per_commitment_point: public_key, + next_funding_txid: None, + your_last_funding_locked_txid: Some(Txid::from_raw_hash( + bitcoin::hashes::Hash::from_slice(&[ + 48, 167, 250, 69, 152, 48, 103, 172, 164, 99, 59, 19, 23, 11, 92, 84, 15, 80, + 4, 12, 98, 82, 75, 31, 201, 11, 91, 23, 98, 23, 53, 124, + ]) + .unwrap(), + )), + my_current_funding_locked_txid: Some(Txid::from_raw_hash( + bitcoin::hashes::Hash::from_slice(&[ + 21, 167, 250, 69, 152, 48, 103, 172, 164, 99, 59, 19, 23, 11, 92, 84, 15, 80, + 4, 12, 98, 82, 75, 31, 201, 11, 91, 23, 98, 23, 53, 124, + ]) + .unwrap(), + )), + }; + + let encoded_value = cr.encode(); + assert_eq!( + encoded_value, + vec![ + 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, + 0, 0, 0, 0, // channel_id + 0, 0, 0, 0, 0, 0, 0, 3, // next_local_commitment_number + 0, 0, 0, 0, 0, 0, 0, 4, // next_remote_commitment_number + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, // your_last_per_commitment_secret + 3, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30, + 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, + 143, // my_current_per_commitment_point + 1, // Type (your_last_funding_locked_txid) + 32, // Length + 48, 167, 250, 69, 152, 48, 103, 172, 164, 99, 59, 19, 23, 11, 92, 84, 15, 80, 4, + 12, 98, 82, 75, 31, 201, 11, 91, 23, 98, 23, 53, 124, // Value + 3, // Type (my_current_funding_locked_txid) + 32, // Length + 21, 167, 250, 69, 152, 48, 103, 172, 164, 99, 59, 19, 23, 11, 92, 84, 15, 80, 4, + 12, 98, 82, 75, 31, 201, 11, 91, 23, 98, 23, 53, 124, // Value + ] + ); + } + macro_rules! get_keys_from { ($slice: expr, $secp_ctx: expr) => {{ let privkey = SecretKey::from_slice(&>::from_hex($slice).unwrap()[..]).unwrap(); From 77978248188b9b936e9b31d529bbf76c33948704 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Sat, 7 Jun 2025 19:22:48 -0500 Subject: [PATCH 03/11] Set funding_locked_txid TLVs in channel_reestablish The previous commit extended the channel_reestablish message with your_last_funding_locked_txid and my_current_funding_locked_txid for use as described there. This commit sets those fields to the funding txid most recently sent/received accordingly. --- lightning/src/ln/channel.rs | 63 ++++++++++++++++++++++++++---- lightning/src/ln/channelmanager.rs | 3 +- 2 files changed, 58 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 8aa916cc9af..85003f24fd1 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1662,12 +1662,12 @@ where /// send our peer to begin the channel reconnection process. #[rustfmt::skip] pub fn peer_connected_get_handshake( - &mut self, chain_hash: ChainHash, logger: &L, + &mut self, chain_hash: ChainHash, features: &InitFeatures, logger: &L, ) -> ReconnectionMsg where L::Target: Logger { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::Funded(chan) => - ReconnectionMsg::Reestablish(chan.get_channel_reestablish(logger)), + ReconnectionMsg::Reestablish(chan.get_channel_reestablish(features, logger)), ChannelPhase::UnfundedOutboundV1(chan) => { chan.get_open_channel(chain_hash, logger) .map(|msg| ReconnectionMsg::Open(OpenChannelMessage::V1(msg))) @@ -9403,6 +9403,13 @@ where false } + /// Returns true if thier channel_ready has been received + #[cfg(splicing)] + pub fn is_their_channel_ready(&self) -> bool { + matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)) + || matches!(self.context.channel_state, ChannelState::ChannelReady(_)) + } + /// Returns true if our channel_ready has been sent pub fn is_our_channel_ready(&self) -> bool { matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) @@ -10150,10 +10157,52 @@ where } } + #[cfg(splicing)] + fn maybe_get_your_last_funding_locked_txid(&self, features: &InitFeatures) -> Option { + if !features.supports_splicing() { + return None; + } + + self.pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.received_funding_txid) + .or_else(|| { + self.is_their_channel_ready().then(|| self.funding.get_funding_txid()).flatten() + }) + } + #[cfg(not(splicing))] + fn maybe_get_your_last_funding_locked_txid(&self, _features: &InitFeatures) -> Option { + None + } + + #[cfg(splicing)] + fn maybe_get_my_current_funding_locked_txid(&self, features: &InitFeatures) -> Option { + if !features.supports_splicing() { + return None; + } + + self.pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.sent_funding_txid) + .or_else(|| { + self.is_our_channel_ready().then(|| self.funding.get_funding_txid()).flatten() + }) + } + + #[cfg(not(splicing))] + fn maybe_get_my_current_funding_locked_txid(&self, _features: &InitFeatures) -> Option { + None + } + /// May panic if called on a channel that wasn't immediately-previously /// self.remove_uncommitted_htlcs_and_mark_paused()'d #[rustfmt::skip] - fn get_channel_reestablish(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger { + fn get_channel_reestablish( + &mut self, features: &InitFeatures, logger: &L, + ) -> msgs::ChannelReestablish + where + L::Target: Logger, + { assert!(self.context.channel_state.is_peer_disconnected()); assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER); // This is generally the first function which gets called on any given channel once we're @@ -10201,8 +10250,8 @@ where your_last_per_commitment_secret: remote_last_secret, my_current_per_commitment_point: dummy_pubkey, next_funding_txid: self.maybe_get_next_funding_txid(), - your_last_funding_locked_txid: None, - my_current_funding_locked_txid: None, + your_last_funding_locked_txid: self.maybe_get_your_last_funding_locked_txid(features), + my_current_funding_locked_txid: self.maybe_get_my_current_funding_locked_txid(features), } } @@ -13690,7 +13739,7 @@ mod tests { // Now disconnect the two nodes and check that the commitment point in // Node B's channel_reestablish message is sane. assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok()); - let msg = node_b_chan.get_channel_reestablish(&&logger); + let msg = node_b_chan.get_channel_reestablish(&channelmanager::provided_init_features(&config), &&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); @@ -13698,7 +13747,7 @@ mod tests { // Check that the commitment point in Node A's channel_reestablish message // is sane. assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok()); - let msg = node_a_chan.get_channel_reestablish(&&logger); + let msg = node_a_chan.get_channel_reestablish(&channelmanager::provided_init_features(&config), &&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index db6c21b8036..e54ee60fd00 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -11937,8 +11937,9 @@ where } for (_, chan) in peer_state.channel_by_id.iter_mut() { + let features = &peer_state.latest_features; let logger = WithChannelContext::from(&self.logger, &chan.context(), None); - match chan.peer_connected_get_handshake(self.chain_hash, &&logger) { + match chan.peer_connected_get_handshake(self.chain_hash, features, &&logger) { ReconnectionMsg::Reestablish(msg) => pending_msg_events.push(MessageSendEvent::SendChannelReestablish { node_id: chan.context().get_counterparty_node_id(), From cbd9bd2741d22e7c33d3335d59de202ad711557f Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 19 Jun 2025 17:24:59 -0500 Subject: [PATCH 04/11] Update channel_reestablish logic for channel_ready When splicing is negotiated, channel_ready must be retransmitted when your_last_funding_locked is not set. Further, the current logic for retransmitting channel_ready is only applicable when splicing is not negotiated. --- lightning/src/ln/channel.rs | 15 +++++++++++++-- lightning/src/ln/channelmanager.rs | 3 ++- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 85003f24fd1..6c9eadd7900 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8305,7 +8305,8 @@ where #[rustfmt::skip] pub fn channel_reestablish( &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, - chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock + chain_hash: ChainHash, features: &InitFeatures, user_config: &UserConfig, + best_block: &BestBlock, ) -> Result where L::Target: Logger, @@ -8429,9 +8430,19 @@ where let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke(); let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 }; - let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.holder_commitment_point.transaction_number() == 1 { + let splicing_negotiated = features.supports_splicing(); + let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.holder_commitment_point.transaction_number() == 1 && !splicing_negotiated { // We should never have to worry about MonitorUpdateInProgress resending ChannelReady self.get_channel_ready(logger) + } else if splicing_negotiated { + // A node: + // - if `option_splice` was negotiated and `your_last_funding_locked` is not + // set in the `channel_reestablish` it received: + // - MUST retransmit `channel_ready`. + msg.your_last_funding_locked_txid + .is_none() + .then(|| ()) + .and_then(|_| self.get_channel_ready(logger)) } else { None }; if msg.next_local_commitment_number == next_counterparty_commitment_number { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e54ee60fd00..9dfc3fb5af3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10025,12 +10025,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { + let features = &peer_state.latest_features; // Currently, we expect all holding cell update_adds to be dropped on peer // disconnect, so Channel's reestablish will never hand us any holding cell // freed HTLCs to fail backwards. If in the future we no longer drop pending // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. let responses = try_channel_entry!(self, peer_state, chan.channel_reestablish( - msg, &&logger, &self.node_signer, self.chain_hash, + msg, &&logger, &self.node_signer, self.chain_hash, features, &self.default_configuration, &*self.best_block.read().unwrap()), chan_entry); let mut channel_update = None; if let Some(msg) = responses.shutdown_msg { From bbe88b612d1c78ec9a724e9fa23ed1302e7fde48 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 23 Jun 2025 15:16:50 -0500 Subject: [PATCH 05/11] Update next_funding_txid logic for channel_reestablish The splicing spec updates the logic pertaining to next_funding_txid when handling a channel_reestablish message. Specifically: A receiving node: - if `next_funding_txid` is set: - if `next_funding_txid` matches the latest interactive funding transaction or the current channel funding transaction: - if `next_commitment_number` is equal to the commitment number of the `commitment_signed` message it sent for this funding transaction: - MUST retransmit its `commitment_signed` for that funding transaction. - if it has already received `commitment_signed` and it should sign first, as specified in the [`tx_signatures` requirements](#the-tx_signatures-message): - MUST send its `tx_signatures` for that funding transaction. - if it has already received `tx_signatures` for that funding transaction: - MUST send its `tx_signatures` for that funding transaction. - if it also sets `next_funding_txid` in its own `channel_reestablish`, but the values don't match: - MUST send an `error` and fail the channel. - otherwise: - MUST send `tx_abort` to let the sending node know that they can forget this funding transaction. This commit updates FundedChannel::channel_reestablish accordingly. Co-authored-by: Wilmer Paulino Co-authored-by: Jeffrey Czyz --- lightning/src/ln/channel.rs | 174 ++++++++++++++++++++---------------- 1 file changed, 97 insertions(+), 77 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 6c9eadd7900..fff7e4d29ee 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8445,89 +8445,104 @@ where .and_then(|_| self.get_channel_ready(logger)) } else { None }; - if msg.next_local_commitment_number == next_counterparty_commitment_number { - if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { - log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); - } else { - log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); - } + let mut commitment_update = None; + let mut tx_signatures = None; + let mut tx_abort = None; + + // if next_funding_txid is set: + if let Some(next_funding_txid) = msg.next_funding_txid { + // - if `next_funding_txid` matches the latest interactive funding transaction + // or the current channel funding transaction: + if let Some(session) = &self.interactive_tx_signing_session { + let our_next_funding_txid = self.maybe_get_next_funding_txid(); + if let Some(our_next_funding_txid) = our_next_funding_txid { + if our_next_funding_txid != next_funding_txid { + return Err(ChannelError::close(format!( + "Unexpected next_funding_txid: {}; expected: {}", + next_funding_txid, our_next_funding_txid, + ))); + } - // if next_funding_txid is set: - let (commitment_update, tx_signatures, tx_abort) = if let Some(next_funding_txid) = msg.next_funding_txid { - if let Some(session) = &self.interactive_tx_signing_session { - // if next_funding_txid matches the latest interactive funding transaction: - let our_next_funding_txid = session.unsigned_tx().compute_txid(); - if our_next_funding_txid == next_funding_txid { - debug_assert_eq!(session.unsigned_tx().compute_txid(), self.maybe_get_next_funding_txid().unwrap()); - - let commitment_update = if !self.context.channel_state.is_their_tx_signatures_sent() && msg.next_local_commitment_number == 0 { - // if it has not received tx_signatures for that funding transaction AND - // if next_commitment_number is zero: - // MUST retransmit its commitment_signed for that funding transaction. - let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger)?; - Some(msgs::CommitmentUpdate { - commitment_signed: vec![commitment_signed], - update_add_htlcs: vec![], - update_fulfill_htlcs: vec![], - update_fail_htlcs: vec![], - update_fail_malformed_htlcs: vec![], - update_fee: None, - }) - } else { None }; + if !session.has_received_commitment_signed() { + self.context.expecting_peer_commitment_signed = true; + } + + // - if `next_commitment_number` is equal to the commitment number of the + // `commitment_signed` message it sent for this funding transaction: + // - MUST retransmit its `commitment_signed` for that funding transaction. + if msg.next_local_commitment_number == next_counterparty_commitment_number { + // `next_counterparty_commitment_number` is guaranteed to always be the + // commitment number of the `commitment_signed` message we sent for this + // funding transaction. If they set `next_funding_txid`, then they should + // not have processed our `tx_signatures` yet, which implies that our state + // machine is still paused and no updates can happen that would increment + // our `next_counterparty_commitment_number`. + // + // If they did set `next_funding_txid` even after processing our + // `tx_signatures` erroneously, this may end up resulting in a force close. + // // TODO(dual_funding): For async signing support we need to hold back `tx_signatures` until the `commitment_signed` is ready. - let tx_signatures = if ( - // if it has not received tx_signatures for that funding transaction AND - // if it has already received commitment_signed AND it should sign first, as specified in the tx_signatures requirements: - // MUST send its tx_signatures for that funding transaction. - !self.context.channel_state.is_their_tx_signatures_sent() && session.has_received_commitment_signed() && session.holder_sends_tx_signatures_first() - // else if it has already received tx_signatures for that funding transaction: - // MUST send its tx_signatures for that funding transaction. - ) || self.context.channel_state.is_their_tx_signatures_sent() { - if self.context.channel_state.is_monitor_update_in_progress() { - // The `monitor_pending_tx_signatures` field should have already been set in `commitment_signed_initial_v2` - // if we were up first for signing and had a monitor update in progress, but check again just in case. - debug_assert!(self.context.monitor_pending_tx_signatures.is_some(), "monitor_pending_tx_signatures should already be set"); - log_debug!(logger, "Not sending tx_signatures: a monitor update is in progress. Setting monitor_pending_tx_signatures."); - if self.context.monitor_pending_tx_signatures.is_none() { - self.context.monitor_pending_tx_signatures = session.holder_tx_signatures().clone(); - } - None - } else { - // If `holder_tx_signatures` is `None` here, the `tx_signatures` message will be sent - // when the holder provides their witnesses as this will queue a `tx_signatures` if the - // holder must send one. - session.holder_tx_signatures().clone() + let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger)?; + commitment_update = Some(msgs::CommitmentUpdate { + commitment_signed: vec![commitment_signed], + update_add_htlcs: vec![], + update_fulfill_htlcs: vec![], + update_fail_htlcs: vec![], + update_fail_malformed_htlcs: vec![], + update_fee: None, + }); + } + + // - if it has already received `commitment_signed` and it should sign first, + // as specified in the [`tx_signatures` requirements](#the-tx_signatures-message): + // - MUST send its `tx_signatures` for that funding transaction. + // + // - if it has already received `tx_signatures` for that funding transaction: + // - MUST send its `tx_signatures` for that funding transaction. + if (session.has_received_commitment_signed() && session.holder_sends_tx_signatures_first()) + || self.context.channel_state.is_their_tx_signatures_sent() + { + if self.context.channel_state.is_monitor_update_in_progress() { + // The `monitor_pending_tx_signatures` field should have already been + // set in `commitment_signed_initial_v2` if we were up first for signing + // and had a monitor update in progress. + if session.holder_sends_tx_signatures_first() { + debug_assert!(self.context.monitor_pending_tx_signatures.is_some()); } } else { - None - }; - if !session.has_received_commitment_signed() { - self.context.expecting_peer_commitment_signed = true; + // If `holder_tx_signatures` is `None` here, the `tx_signatures` message + // will be sent when the user provides their witnesses. + tx_signatures = session.holder_tx_signatures().clone() } - (commitment_update, tx_signatures, None) - } else { - // The `next_funding_txid` does not match the latest interactive funding transaction so we - // MUST send tx_abort to let the remote know that they can forget this funding transaction. - (None, None, Some(msgs::TxAbort { - channel_id: self.context.channel_id(), - data: format!( - "next_funding_txid {} does match our latest interactive funding txid {}", - next_funding_txid, our_next_funding_txid, - ).into_bytes() })) } } else { - // We'll just send a `tx_abort` here if we don't have a signing session for this channel - // on reestablish and tell our peer to just forget about it. - // Our peer is doing something strange, but it doesn't warrant closing the channel. - (None, None, Some(msgs::TxAbort { + // The `next_funding_txid` does not match the latest interactive funding + // transaction so we MUST send tx_abort to let the remote know that they can + // forget this funding transaction. + tx_abort = Some(msgs::TxAbort { channel_id: self.context.channel_id(), - data: - "No active signing session. The associated funding transaction may have already been broadcast.".as_bytes().to_vec() })) + data: format!( + "Unexpected next_funding_txid {}", + next_funding_txid, + ).into_bytes() }); } } else { - // Don't send anything related to interactive signing if `next_funding_txid` is not set. - (None, None, None) - }; + // We'll just send a `tx_abort` here if we don't have a signing session for this channel + // on reestablish and tell our peer to just forget about it. + // Our peer is doing something strange, but it doesn't warrant closing the channel. + tx_abort = Some(msgs::TxAbort { + channel_id: self.context.channel_id(), + data: + "No active signing session. The associated funding transaction may have already been broadcast.".as_bytes().to_vec() }); + } + } + + if msg.next_local_commitment_number == next_counterparty_commitment_number { + if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { + log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); + } else { + log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); + } Ok(ReestablishResponses { channel_ready, shutdown_msg, announcement_sigs, @@ -8538,6 +8553,11 @@ where tx_abort, }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { + // We've made an update so we must have exchanged `tx_signatures`, implying that + // `commitment_signed` was also exchanged. However, we may still need to retransmit our + // `tx_signatures` if the counterparty sent theirs first but didn't get to process ours. + debug_assert!(commitment_update.is_none()); + if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id()); } else { @@ -8550,8 +8570,8 @@ where channel_ready, shutdown_msg, announcement_sigs, commitment_update: None, raa: None, order: self.context.resend_order.clone(), - tx_signatures: None, - tx_abort: None, + tx_signatures, + tx_abort, }) } else { let commitment_update = if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst @@ -8574,8 +8594,8 @@ where channel_ready, shutdown_msg, announcement_sigs, raa, commitment_update, order: self.context.resend_order.clone(), - tx_signatures: None, - tx_abort: None, + tx_signatures, + tx_abort, }) } } else if msg.next_local_commitment_number < next_counterparty_commitment_number { From 8848c9a1f22114897a8c0bac1a5284c279cd758b Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 23 Jun 2025 15:28:35 -0500 Subject: [PATCH 06/11] Update next_commitment_number logic for channel_reestablish The splicing spec updates the logic pertaining to next_commitment_number when sending a channel_reestablish message. Specifically: The sending node: - if it has sent `commitment_signed` for an interactive transaction construction but it has not received `tx_signatures`: - MUST set `next_funding_txid` to the txid of that interactive transaction. - if it has not received `commitment_signed` for that interactive transaction: - MUST set `next_commitment_number` to the commitment number of the `commitment_signed` it sent. --- lightning/src/ln/channel.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index fff7e4d29ee..93a72a34613 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -10168,6 +10168,19 @@ where self.sign_channel_announcement(node_signer, announcement).ok() } + fn get_next_local_commitment_number(&self) -> u64 { + if let Some(session) = &self.interactive_tx_signing_session { + if !self.context.channel_state.is_their_tx_signatures_sent() + && !session.has_received_commitment_signed() + { + // FIXME + return unimplemented!(); + } + } + + INITIAL_COMMITMENT_NUMBER - self.holder_commitment_point.transaction_number() + } + #[rustfmt::skip] fn maybe_get_next_funding_txid(&self) -> Option { // If we've sent `commtiment_signed` for an interactively constructed transaction @@ -10269,7 +10282,7 @@ where // next_local_commitment_number is the next commitment_signed number we expect to // receive (indicating if they need to resend one that we missed). - next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.holder_commitment_point.transaction_number(), + next_local_commitment_number: self.get_next_local_commitment_number(), // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to // receive, however we track it by the next commitment number for a remote transaction // (which is one further, as they always revoke previous commitment transaction, not From e95b15856886f824d69ff6cb846d8909c0415457 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 19 Jun 2025 19:43:08 -0500 Subject: [PATCH 07/11] Send splice_locked on channel_reestablish The channel_reestablish protocol supports retransmitting splice_locked messages as needed. Add support for doing such when handling channel_reestablish messages. --- lightning/src/ln/channel.rs | 55 ++++++++++++++++++++++++++++++ lightning/src/ln/channelmanager.rs | 16 +++++++-- 2 files changed, 68 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 93a72a34613..6356c8d2e5b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1213,6 +1213,7 @@ pub(super) struct ReestablishResponses { pub shutdown_msg: Option, pub tx_signatures: Option, pub tx_abort: Option, + pub splice_locked: Option, } /// The first message we send to our peer after connection @@ -2169,6 +2170,10 @@ impl FundingScope { pub fn get_short_channel_id(&self) -> Option { self.short_channel_id } + + fn is_splice(&self) -> bool { + self.channel_transaction_parameters.splice_parent_funding_txid.is_some() + } } /// Info about a pending splice, used in the pre-splice channel @@ -8389,6 +8394,7 @@ where shutdown_msg, announcement_sigs, tx_signatures: None, tx_abort: None, + splice_locked: None, }); } @@ -8400,6 +8406,7 @@ where shutdown_msg, announcement_sigs, tx_signatures: None, tx_abort: None, + splice_locked: None, }); } @@ -8445,6 +8452,25 @@ where .and_then(|_| self.get_channel_ready(logger)) } else { None }; + // A receiving node: + // - if `your_last_funding_locked` is set and it does not match the most recent + // `splice_locked` it has sent: + // - MUST retransmit `splice_locked`. + let sent_splice_txid = self + .maybe_get_my_current_funding_locked(features) + .filter(|funding| funding.is_splice()) + .map(|funding| { + funding.get_funding_txid().expect("Splice funding_txid should always be set") + }); + let splice_locked = msg.your_last_funding_locked_txid.and_then(|last_funding_txid| { + sent_splice_txid + .filter(|sent_splice_txid| last_funding_txid != *sent_splice_txid) + .map(|splice_txid| msgs::SpliceLocked { + channel_id: self.context.channel_id, + splice_txid, + }) + }); + let mut commitment_update = None; let mut tx_signatures = None; let mut tx_abort = None; @@ -8551,6 +8577,7 @@ where order: self.context.resend_order.clone(), tx_signatures, tx_abort, + splice_locked, }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { // We've made an update so we must have exchanged `tx_signatures`, implying that @@ -8572,6 +8599,7 @@ where order: self.context.resend_order.clone(), tx_signatures, tx_abort, + splice_locked, }) } else { let commitment_update = if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst @@ -8596,6 +8624,7 @@ where order: self.context.resend_order.clone(), tx_signatures, tx_abort, + splice_locked, }) } } else if msg.next_local_commitment_number < next_counterparty_commitment_number { @@ -10219,6 +10248,32 @@ where None } + #[cfg(splicing)] + fn maybe_get_my_current_funding_locked( + &self, features: &InitFeatures, + ) -> Option<&FundingScope> { + if !features.supports_splicing() { + return None; + } + + self.pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.sent_funding_txid) + .and_then(|funding_txid| { + self.pending_funding + .iter() + .find(|funding| funding.get_funding_txid() == Some(funding_txid)) + }) + .or_else(|| self.is_our_channel_ready().then(|| &self.funding)) + } + + #[cfg(not(splicing))] + fn maybe_get_my_current_funding_locked( + &self, _features: &InitFeatures, + ) -> Option<&FundingScope> { + None + } + #[cfg(splicing)] fn maybe_get_my_current_funding_locked_txid(&self, features: &InitFeatures) -> Option { if !features.supports_splicing() { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9dfc3fb5af3..3c67987c8cc 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3406,7 +3406,8 @@ macro_rules! handle_monitor_update_completion { &mut $peer_state.pending_msg_events, $chan, updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds, updates.funding_broadcastable, updates.channel_ready, - updates.announcement_sigs, updates.tx_signatures, None); + updates.announcement_sigs, updates.tx_signatures, None, None, + ); if let Some(upd) = channel_update { $peer_state.pending_msg_events.push(upd); } @@ -8059,9 +8060,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option, tx_signatures: Option, tx_abort: Option, + splice_locked: Option, ) -> (Option<(u64, Option, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { let logger = WithChannelContext::from(&self.logger, &channel.context, None); - log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", + log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort, {} splice_locked", &channel.context.channel_id(), if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, @@ -8071,6 +8073,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if announcement_sigs.is_some() { "sending" } else { "without" }, if tx_signatures.is_some() { "sending" } else { "without" }, if tx_abort.is_some() { "sending" } else { "without" }, + if splice_locked.is_some() { "sending" } else { "without" }, ); let counterparty_node_id = channel.context.get_counterparty_node_id(); @@ -8110,6 +8113,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ msg, }); } + if let Some(msg) = splice_locked { + pending_msg_events.push(MessageSendEvent::SendSpliceLocked { + node_id: counterparty_node_id, + msg, + }); + } macro_rules! handle_cs { () => { if let Some(update) = commitment_update { @@ -10054,7 +10063,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order, Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs, - responses.tx_signatures, responses.tx_abort); + responses.tx_signatures, responses.tx_abort, responses.splice_locked, + ); debug_assert!(htlc_forwards.is_none()); debug_assert!(decode_update_add_htlcs.is_none()); if let Some(upd) = channel_update { From 65b650058a8c8756f92655694188c29f77c715b0 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 25 Jun 2025 11:10:34 -0500 Subject: [PATCH 08/11] Retransmit channel_ready / splice_locked awaiting announcement_sigs The splicing spec updates channel_establishment logic to retransmit channel_ready or splice_locked for announced channels. Specifically: - if `my_current_funding_locked` is included: - if `announce_channel` is set for this channel: - if it has not received `announcement_signatures` for that transaction: - MUST retransmit `channel_ready` or `splice_locked` after exchanging `channel_reestablish`. --- lightning/src/ln/channel.rs | 53 ++++++++++++++++++++++++++++--------- 1 file changed, 41 insertions(+), 12 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 6356c8d2e5b..aaff95f4303 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8449,27 +8449,56 @@ where msg.your_last_funding_locked_txid .is_none() .then(|| ()) + // The sending node: + // - if `my_current_funding_locked` is included: + // - if `announce_channel` is set for this channel: + // - if it has not received `announcement_signatures` for that transaction: + // - MUST retransmit `channel_ready` or `splice_locked` after exchanging `channel_reestablish`. + .or_else(|| { + self.maybe_get_my_current_funding_locked(features) + .filter(|funding| !funding.is_splice()) + .filter(|_| self.context.config.announce_for_forwarding) + .filter(|_| self.context.announcement_sigs.is_none()) + .map(|_| ()) + }) .and_then(|_| self.get_channel_ready(logger)) } else { None }; - // A receiving node: - // - if `your_last_funding_locked` is set and it does not match the most recent - // `splice_locked` it has sent: - // - MUST retransmit `splice_locked`. let sent_splice_txid = self .maybe_get_my_current_funding_locked(features) .filter(|funding| funding.is_splice()) .map(|funding| { funding.get_funding_txid().expect("Splice funding_txid should always be set") }); - let splice_locked = msg.your_last_funding_locked_txid.and_then(|last_funding_txid| { - sent_splice_txid - .filter(|sent_splice_txid| last_funding_txid != *sent_splice_txid) - .map(|splice_txid| msgs::SpliceLocked { - channel_id: self.context.channel_id, - splice_txid, - }) - }); + let splice_locked = msg + // A receiving node: + // - if `your_last_funding_locked` is set and it does not match the most recent + // `splice_locked` it has sent: + // - MUST retransmit `splice_locked`. + .your_last_funding_locked_txid + .and_then(|last_funding_txid| { + sent_splice_txid.filter(|sent_splice_txid| last_funding_txid != *sent_splice_txid) + }) + // The sending node: + // - if `my_current_funding_locked` is included: + // - if `announce_channel` is set for this channel: + // - if it has not received `announcement_signatures` for that transaction: + // - MUST retransmit `channel_ready` or `splice_locked` after exchanging `channel_reestablish`. + .or_else(|| { + sent_splice_txid + .filter(|_| self.context.config.announce_for_forwarding) + .filter(|sent_splice_txid| { + if self.funding.get_funding_txid() == Some(*sent_splice_txid) { + self.context.announcement_sigs.is_none() + } else { + true + } + }) + }) + .map(|splice_txid| msgs::SpliceLocked { + channel_id: self.context.channel_id, + splice_txid, + }); let mut commitment_update = None; let mut tx_signatures = None; From 1718f454f09c64501d726abf3b2e11f12f60806f Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 25 Jun 2025 17:14:17 -0500 Subject: [PATCH 09/11] Clear announcement_sigs on FundingScope promotion When a splice transaction is promoted (i.e., when splice_locked has been exchanged), announcement_signatures must be sent. However, if we try to send a channel_announcement before they are received, then the signatures will be incorrect. To avoid this, clear the counterparty's announcement_signatures upon promoting a FundingScope. --- lightning/src/ln/channel.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index aaff95f4303..5da87c8861e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5961,6 +5961,7 @@ macro_rules! promote_splice_funding { core::mem::swap(&mut $self.funding, $funding); $self.pending_splice = None; $self.pending_funding.clear(); + $self.context.announcement_sigs = None; $self.context.announcement_sigs_state = AnnouncementSigsState::NotSent; }; } From 0b13b32abd7807ddb3ec414eba36a8b182c0096d Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 27 Jun 2025 10:45:11 -0500 Subject: [PATCH 10/11] Send channel_ready on channel_reestablish The channel_reestablish protocol supports retransmitting channel_ready messages as needed. Add support for doing such when handling channel_reestablish messages. --- lightning/src/ln/channel.rs | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5da87c8861e..dabdc20b67f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8443,24 +8443,42 @@ where // We should never have to worry about MonitorUpdateInProgress resending ChannelReady self.get_channel_ready(logger) } else if splicing_negotiated { + let funding_txid = self + .maybe_get_my_current_funding_locked(features) + .filter(|funding| !funding.is_splice()) + .map(|funding| { + funding.get_funding_txid().expect("funding_txid should always be set") + }); + // A node: // - if `option_splice` was negotiated and `your_last_funding_locked` is not // set in the `channel_reestablish` it received: // - MUST retransmit `channel_ready`. msg.your_last_funding_locked_txid .is_none() - .then(|| ()) + .then(|| funding_txid) + .flatten() // The sending node: // - if `my_current_funding_locked` is included: // - if `announce_channel` is set for this channel: // - if it has not received `announcement_signatures` for that transaction: // - MUST retransmit `channel_ready` or `splice_locked` after exchanging `channel_reestablish`. .or_else(|| { - self.maybe_get_my_current_funding_locked(features) - .filter(|funding| !funding.is_splice()) + funding_txid .filter(|_| self.context.config.announce_for_forwarding) .filter(|_| self.context.announcement_sigs.is_none()) - .map(|_| ()) + }) + // TODO: The language from the spec below should be updated to be in terms of + // `your_last_funding_locked` received and `my_current_funding_locked` sent rather + // than other messages received. + // + // - if it receives `channel_ready` for that transaction after exchanging `channel_reestablish`: + // - MUST retransmit `channel_ready` in response, if not already sent since reconnecting. + .or_else(|| { + msg.your_last_funding_locked_txid + .and_then(|last_funding_txid| { + funding_txid.filter(|funding_txid| last_funding_txid != *funding_txid) + }) }) .and_then(|_| self.get_channel_ready(logger)) } else { None }; From f000b766e1b4d0a961975c814f2e97a86524b2e4 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 27 Jun 2025 11:40:22 -0500 Subject: [PATCH 11/11] Handle implicit splice_locked during channel_reestablish When handling a counterparties channel_reestablish, the spec dictates that a splice_locked may be implied by my_current_funding_locked. Compare that against any pending splices and handle an implicit splice_locked message when applicable. --- lightning/src/ln/channel.rs | 30 ++++++++++++++++++++++++++++++ lightning/src/ln/channelmanager.rs | 13 +++++++++++-- 2 files changed, 41 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index dabdc20b67f..f8ffedfc8ad 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1214,6 +1214,7 @@ pub(super) struct ReestablishResponses { pub tx_signatures: Option, pub tx_abort: Option, pub splice_locked: Option, + pub implicit_splice_locked: Option, } /// The first message we send to our peer after connection @@ -8396,6 +8397,7 @@ where tx_signatures: None, tx_abort: None, splice_locked: None, + implicit_splice_locked: None, }); } @@ -8408,6 +8410,7 @@ where tx_signatures: None, tx_abort: None, splice_locked: None, + implicit_splice_locked: None, }); } @@ -8519,6 +8522,30 @@ where splice_txid, }); + // A receiving node: + // - if splice transactions are pending and `my_current_funding_locked` matches one of + // those splice transactions, for which it hasn't received `splice_locked` yet: + // - MUST process `my_current_funding_locked` as if it was receiving `splice_locked` + // for this `txid`. + #[cfg(splicing)] + let implicit_splice_locked = msg.my_current_funding_locked_txid.and_then(|funding_txid| { + self.pending_funding + .iter() + .find(|funding| funding.get_funding_txid() == Some(funding_txid)) + .and_then(|_| { + self.pending_splice.as_ref().and_then(|pending_splice| { + (Some(funding_txid) != pending_splice.received_funding_txid) + .then(|| funding_txid) + }) + }) + .map(|splice_txid| msgs::SpliceLocked { + channel_id: self.context.channel_id, + splice_txid, + }) + }); + #[cfg(not(splicing))] + let implicit_splice_locked = None; + let mut commitment_update = None; let mut tx_signatures = None; let mut tx_abort = None; @@ -8626,6 +8653,7 @@ where tx_signatures, tx_abort, splice_locked, + implicit_splice_locked, }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { // We've made an update so we must have exchanged `tx_signatures`, implying that @@ -8648,6 +8676,7 @@ where tx_signatures, tx_abort, splice_locked, + implicit_splice_locked, }) } else { let commitment_update = if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst @@ -8673,6 +8702,7 @@ where tx_signatures, tx_abort, splice_locked, + implicit_splice_locked, }) } } else if msg.next_local_commitment_number < next_counterparty_commitment_number { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 3c67987c8cc..74ec0b029b9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10017,7 +10017,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[rustfmt::skip] fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result { - let need_lnd_workaround = { + let (implicit_splice_locked, need_lnd_workaround) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -10070,7 +10070,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(upd) = channel_update { peer_state.pending_msg_events.push(upd); } - need_lnd_workaround + + (responses.implicit_splice_locked, need_lnd_workaround) } else { return try_channel_entry!(self, peer_state, Err(ChannelError::close( "Got a channel_reestablish message for an unfunded channel!".into())), chan_entry); @@ -10117,6 +10118,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(channel_ready_msg) = need_lnd_workaround { self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?; } + + #[cfg(not(splicing))] + let _ = implicit_splice_locked; + #[cfg(splicing)] + if let Some(splice_locked) = implicit_splice_locked { + self.internal_splice_locked(counterparty_node_id, &splice_locked)?; + } + Ok(NotifyOption::SkipPersistHandleEvents) }