From bd294b3207b70dfe56557be4c159e4b68b00d367 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 19 Jun 2025 17:16:07 -0500 Subject: [PATCH 01/18] Add features for splicing While splicing is not yet fully supported, checking if the feature has been negotiated is needed for changes to the channel_reestablish logic. --- lightning-types/src/features.rs | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/lightning-types/src/features.rs b/lightning-types/src/features.rs index aca4bb6e5a9..148242e6fce 100644 --- a/lightning-types/src/features.rs +++ b/lightning-types/src/features.rs @@ -80,6 +80,8 @@ //! (see [BOLT-2](https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#channel-quiescence) for more information). //! - `ZeroFeeCommitments` - A channel type which always uses zero transaction fee on commitment transactions. //! (see [BOLT PR #1228](https://github.com/lightning/bolts/pull/1228) for more info). +//! - `Splice` - Allows replacing the currently-locked funding transaction with a new one +//! (see [BOLT PR #1160](https://github.com/lightning/bolts/pull/1160) for more information). //! //! LDK knows about the following features, but does not support them: //! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be @@ -163,7 +165,7 @@ mod sealed { // Byte 6 ZeroConf, // Byte 7 - Trampoline | SimpleClose, + Trampoline | SimpleClose | Splice, ] ); define_context!( @@ -184,7 +186,7 @@ mod sealed { // Byte 6 ZeroConf | Keysend, // Byte 7 - Trampoline | SimpleClose, + Trampoline | SimpleClose | Splice, // Byte 8 - 31 ,,,,,,,,,,,,,,,,,,,,,,,, // Byte 32 @@ -673,9 +675,20 @@ mod sealed { supports_simple_close, requires_simple_close ); - // By default, allocate enough bytes to cover up to SimpleClose. Update this as new features are + define_feature!( + 63, + Splice, + [InitContext, NodeContext], + "Feature flags for channel splicing.", + set_splicing_optional, + set_splicing_required, + clear_splicing, + supports_splicing, + requires_splicing + ); + // By default, allocate enough bytes to cover up to Splice. Update this as new features are // added which we expect to appear commonly across contexts. - pub(super) const MIN_FEATURES_ALLOCATION_BYTES: usize = (61 + 7) / 8; + pub(super) const MIN_FEATURES_ALLOCATION_BYTES: usize = (63 + 7) / 8; define_feature!( 259, DnsResolver, @@ -1369,6 +1382,7 @@ mod tests { init_features.set_zero_conf_optional(); init_features.set_quiescence_optional(); init_features.set_simple_close_optional(); + init_features.set_splicing_optional(); assert!(init_features.initial_routing_sync()); assert!(!init_features.supports_upfront_shutdown_script()); @@ -1384,7 +1398,7 @@ mod tests { // - onion_messages // - option_channel_type | option_scid_alias // - option_zeroconf - // - option_simple_close + // - option_simple_close | option_splice assert_eq!(node_features.flags.len(), 8); assert_eq!(node_features.flags[0], 0b00000001); assert_eq!(node_features.flags[1], 0b01010001); @@ -1393,7 +1407,7 @@ mod tests { assert_eq!(node_features.flags[4], 0b10001000); assert_eq!(node_features.flags[5], 0b10100000); assert_eq!(node_features.flags[6], 0b00001000); - assert_eq!(node_features.flags[7], 0b00100000); + assert_eq!(node_features.flags[7], 0b10100000); } // Check that cleared flags are kept blank when converting back: From 99affd377bccc84d9aa06810f96f994cd9e9e8c9 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 5 Jun 2025 15:27:36 -0500 Subject: [PATCH 02/18] Add funding_locked_txid TLVs to channel_reestablish The splicing spec extends the channel_reestablish message with two more TLVs indicating which funding txid the sender has sent/received either explicitly via splice_locked or implicitly via channel_ready. This allows peers to detect if a splice_locked was lost during disconnection and must be retransmitted. This commit updates channel_reestablish with the TLVs. Subsequent commits will implement the spec requirements. --- lightning/src/ln/channel.rs | 2 + lightning/src/ln/channelmanager.rs | 2 + lightning/src/ln/msgs.rs | 83 ++++++++++++++++++++++++++++++ 3 files changed, 87 insertions(+) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 09c9f5804fc..1530fb1fbfe 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -10179,6 +10179,8 @@ where your_last_per_commitment_secret: remote_last_secret, my_current_per_commitment_point: dummy_pubkey, next_funding_txid: self.maybe_get_next_funding_txid(), + your_last_funding_locked_txid: None, + my_current_funding_locked_txid: None, } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 94bc9972a30..2d94b3e8dfe 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10120,6 +10120,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ your_last_per_commitment_secret: [1u8; 32], my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(), next_funding_txid: None, + your_last_funding_locked_txid: None, + my_current_funding_locked_txid: None, }, }); return Err(MsgHandleErrInternal::send_err_msg_no_close( diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 2cf7e109eb1..42ac56e388c 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -928,6 +928,16 @@ pub struct ChannelReestablish { /// * `channel_reestablish`-sending node: https:///github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L2466-L2470 /// * `channel_reestablish`-receiving node: https:///github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L2520-L2531 pub next_funding_txid: Option, + /// The last funding txid received by the sending node, which may be: + /// - the txid of the last `splice_locked` it received, otherwise + /// - the txid of the funding transaction if it received `channel_ready`, or else + /// - `None` if it has never received `channel_ready` or `splice_locked` + pub your_last_funding_locked_txid: Option, + /// The last funding txid sent by the sending node, which may be: + /// - the txid of the last `splice_locked` it sent, otherwise + /// - the txid of the funding transaction if it sent `channel_ready`, or else + /// - `None` if it has never sent `channel_ready` or `splice_locked` + pub my_current_funding_locked_txid: Option, } /// An [`announcement_signatures`] message to be sent to or received from a peer. @@ -2805,6 +2815,8 @@ impl_writeable_msg!(ChannelReestablish, { my_current_per_commitment_point, }, { (0, next_funding_txid, option), + (1, your_last_funding_locked_txid, option), + (3, my_current_funding_locked_txid, option), }); impl_writeable_msg!(ClosingSigned, @@ -4275,6 +4287,8 @@ mod tests { your_last_per_commitment_secret: [9; 32], my_current_per_commitment_point: public_key, next_funding_txid: None, + your_last_funding_locked_txid: None, + my_current_funding_locked_txid: None, }; let encoded_value = cr.encode(); @@ -4326,6 +4340,8 @@ mod tests { ]) .unwrap(), )), + your_last_funding_locked_txid: None, + my_current_funding_locked_txid: None, }; let encoded_value = cr.encode(); @@ -4349,6 +4365,73 @@ mod tests { ); } + #[test] + fn encoding_channel_reestablish_with_funding_locked_txid() { + let public_key = { + let secp_ctx = Secp256k1::new(); + PublicKey::from_secret_key( + &secp_ctx, + &SecretKey::from_slice( + &>::from_hex( + "0101010101010101010101010101010101010101010101010101010101010101", + ) + .unwrap()[..], + ) + .unwrap(), + ) + }; + + let cr = msgs::ChannelReestablish { + channel_id: ChannelId::from_bytes([ + 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, + 0, 0, 0, 0, + ]), + next_local_commitment_number: 3, + next_remote_commitment_number: 4, + your_last_per_commitment_secret: [9; 32], + my_current_per_commitment_point: public_key, + next_funding_txid: None, + your_last_funding_locked_txid: Some(Txid::from_raw_hash( + bitcoin::hashes::Hash::from_slice(&[ + 48, 167, 250, 69, 152, 48, 103, 172, 164, 99, 59, 19, 23, 11, 92, 84, 15, 80, + 4, 12, 98, 82, 75, 31, 201, 11, 91, 23, 98, 23, 53, 124, + ]) + .unwrap(), + )), + my_current_funding_locked_txid: Some(Txid::from_raw_hash( + bitcoin::hashes::Hash::from_slice(&[ + 21, 167, 250, 69, 152, 48, 103, 172, 164, 99, 59, 19, 23, 11, 92, 84, 15, 80, + 4, 12, 98, 82, 75, 31, 201, 11, 91, 23, 98, 23, 53, 124, + ]) + .unwrap(), + )), + }; + + let encoded_value = cr.encode(); + assert_eq!( + encoded_value, + vec![ + 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, + 0, 0, 0, 0, // channel_id + 0, 0, 0, 0, 0, 0, 0, 3, // next_local_commitment_number + 0, 0, 0, 0, 0, 0, 0, 4, // next_remote_commitment_number + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, // your_last_per_commitment_secret + 3, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30, + 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, + 143, // my_current_per_commitment_point + 1, // Type (your_last_funding_locked_txid) + 32, // Length + 48, 167, 250, 69, 152, 48, 103, 172, 164, 99, 59, 19, 23, 11, 92, 84, 15, 80, 4, + 12, 98, 82, 75, 31, 201, 11, 91, 23, 98, 23, 53, 124, // Value + 3, // Type (my_current_funding_locked_txid) + 32, // Length + 21, 167, 250, 69, 152, 48, 103, 172, 164, 99, 59, 19, 23, 11, 92, 84, 15, 80, 4, + 12, 98, 82, 75, 31, 201, 11, 91, 23, 98, 23, 53, 124, // Value + ] + ); + } + macro_rules! get_keys_from { ($slice: expr, $secp_ctx: expr) => {{ let privkey = SecretKey::from_slice(&>::from_hex($slice).unwrap()[..]).unwrap(); From fa4c9c9610e048011976cfb5a375a96f2f19aab4 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Sat, 7 Jun 2025 19:22:48 -0500 Subject: [PATCH 03/18] Set funding_locked_txid TLVs in channel_reestablish The previous commit extended the channel_reestablish message with your_last_funding_locked_txid and my_current_funding_locked_txid for use as described there. This commit sets those fields to the funding txid most recently sent/received accordingly. --- lightning/src/ln/channel.rs | 63 ++++++++++++++++++++++++++---- lightning/src/ln/channelmanager.rs | 3 +- 2 files changed, 58 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 1530fb1fbfe..c5a6e67983d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1665,12 +1665,12 @@ where /// send our peer to begin the channel reconnection process. #[rustfmt::skip] pub fn peer_connected_get_handshake( - &mut self, chain_hash: ChainHash, logger: &L, + &mut self, chain_hash: ChainHash, their_features: &InitFeatures, logger: &L, ) -> ReconnectionMsg where L::Target: Logger { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::Funded(chan) => - ReconnectionMsg::Reestablish(chan.get_channel_reestablish(logger)), + ReconnectionMsg::Reestablish(chan.get_channel_reestablish(their_features, logger)), ChannelPhase::UnfundedOutboundV1(chan) => { chan.get_open_channel(chain_hash, logger) .map(|msg| ReconnectionMsg::Open(OpenChannelMessage::V1(msg))) @@ -9381,6 +9381,13 @@ where self.context.latest_inbound_scid_alias.or(self.funding.get_short_channel_id()) } + /// Returns true if their channel_ready has been received + #[cfg(splicing)] + pub fn is_their_channel_ready(&self) -> bool { + matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)) + || matches!(self.context.channel_state, ChannelState::ChannelReady(_)) + } + /// Returns true if our channel_ready has been sent pub fn is_our_channel_ready(&self) -> bool { matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) @@ -10128,10 +10135,52 @@ where } } + #[cfg(splicing)] + fn maybe_get_your_last_funding_locked_txid(&self, features: &InitFeatures) -> Option { + if !features.supports_splicing() { + return None; + } + + self.pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.received_funding_txid) + .or_else(|| { + self.is_their_channel_ready().then(|| self.funding.get_funding_txid()).flatten() + }) + } + #[cfg(not(splicing))] + fn maybe_get_your_last_funding_locked_txid(&self, _features: &InitFeatures) -> Option { + None + } + + #[cfg(splicing)] + fn maybe_get_my_current_funding_locked_txid(&self, features: &InitFeatures) -> Option { + if !features.supports_splicing() { + return None; + } + + self.pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.sent_funding_txid) + .or_else(|| { + self.is_our_channel_ready().then(|| self.funding.get_funding_txid()).flatten() + }) + } + + #[cfg(not(splicing))] + fn maybe_get_my_current_funding_locked_txid(&self, _features: &InitFeatures) -> Option { + None + } + /// May panic if called on a channel that wasn't immediately-previously /// self.remove_uncommitted_htlcs_and_mark_paused()'d #[rustfmt::skip] - fn get_channel_reestablish(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger { + fn get_channel_reestablish( + &mut self, their_features: &InitFeatures, logger: &L, + ) -> msgs::ChannelReestablish + where + L::Target: Logger, + { assert!(self.context.channel_state.is_peer_disconnected()); assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER); // This is generally the first function which gets called on any given channel once we're @@ -10179,8 +10228,8 @@ where your_last_per_commitment_secret: remote_last_secret, my_current_per_commitment_point: dummy_pubkey, next_funding_txid: self.maybe_get_next_funding_txid(), - your_last_funding_locked_txid: None, - my_current_funding_locked_txid: None, + your_last_funding_locked_txid: self.maybe_get_your_last_funding_locked_txid(their_features), + my_current_funding_locked_txid: self.maybe_get_my_current_funding_locked_txid(their_features), } } @@ -13691,7 +13740,7 @@ mod tests { // Now disconnect the two nodes and check that the commitment point in // Node B's channel_reestablish message is sane. assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok()); - let msg = node_b_chan.get_channel_reestablish(&&logger); + let msg = node_b_chan.get_channel_reestablish(&channelmanager::provided_init_features(&config), &&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); @@ -13699,7 +13748,7 @@ mod tests { // Check that the commitment point in Node A's channel_reestablish message // is sane. assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok()); - let msg = node_a_chan.get_channel_reestablish(&&logger); + let msg = node_a_chan.get_channel_reestablish(&channelmanager::provided_init_features(&config), &&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 2d94b3e8dfe..a2b3e831af1 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -12007,8 +12007,9 @@ where } for (_, chan) in peer_state.channel_by_id.iter_mut() { + let features = &peer_state.latest_features; let logger = WithChannelContext::from(&self.logger, &chan.context(), None); - match chan.peer_connected_get_handshake(self.chain_hash, &&logger) { + match chan.peer_connected_get_handshake(self.chain_hash, features, &&logger) { ReconnectionMsg::Reestablish(msg) => pending_msg_events.push(MessageSendEvent::SendChannelReestablish { node_id: chan.context().get_counterparty_node_id(), From 2c790d0be1cd7943163a39103e4dde022fa19d64 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 11 Jul 2025 10:40:24 -0500 Subject: [PATCH 04/18] f - don't check splice feature bit --- lightning/src/ln/channel.rs | 35 ++++++++++-------------------- lightning/src/ln/channelmanager.rs | 3 +-- 2 files changed, 12 insertions(+), 26 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index c5a6e67983d..443bb0d1d0b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1665,12 +1665,12 @@ where /// send our peer to begin the channel reconnection process. #[rustfmt::skip] pub fn peer_connected_get_handshake( - &mut self, chain_hash: ChainHash, their_features: &InitFeatures, logger: &L, + &mut self, chain_hash: ChainHash, logger: &L, ) -> ReconnectionMsg where L::Target: Logger { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::Funded(chan) => - ReconnectionMsg::Reestablish(chan.get_channel_reestablish(their_features, logger)), + ReconnectionMsg::Reestablish(chan.get_channel_reestablish(logger)), ChannelPhase::UnfundedOutboundV1(chan) => { chan.get_open_channel(chain_hash, logger) .map(|msg| ReconnectionMsg::Open(OpenChannelMessage::V1(msg))) @@ -10136,11 +10136,7 @@ where } #[cfg(splicing)] - fn maybe_get_your_last_funding_locked_txid(&self, features: &InitFeatures) -> Option { - if !features.supports_splicing() { - return None; - } - + fn maybe_get_your_last_funding_locked_txid(&self) -> Option { self.pending_splice .as_ref() .and_then(|pending_splice| pending_splice.received_funding_txid) @@ -10149,16 +10145,12 @@ where }) } #[cfg(not(splicing))] - fn maybe_get_your_last_funding_locked_txid(&self, _features: &InitFeatures) -> Option { + fn maybe_get_your_last_funding_locked_txid(&self) -> Option { None } #[cfg(splicing)] - fn maybe_get_my_current_funding_locked_txid(&self, features: &InitFeatures) -> Option { - if !features.supports_splicing() { - return None; - } - + fn maybe_get_my_current_funding_locked_txid(&self) -> Option { self.pending_splice .as_ref() .and_then(|pending_splice| pending_splice.sent_funding_txid) @@ -10168,19 +10160,14 @@ where } #[cfg(not(splicing))] - fn maybe_get_my_current_funding_locked_txid(&self, _features: &InitFeatures) -> Option { + fn maybe_get_my_current_funding_locked_txid(&self) -> Option { None } /// May panic if called on a channel that wasn't immediately-previously /// self.remove_uncommitted_htlcs_and_mark_paused()'d #[rustfmt::skip] - fn get_channel_reestablish( - &mut self, their_features: &InitFeatures, logger: &L, - ) -> msgs::ChannelReestablish - where - L::Target: Logger, - { + fn get_channel_reestablish(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger { assert!(self.context.channel_state.is_peer_disconnected()); assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER); // This is generally the first function which gets called on any given channel once we're @@ -10228,8 +10215,8 @@ where your_last_per_commitment_secret: remote_last_secret, my_current_per_commitment_point: dummy_pubkey, next_funding_txid: self.maybe_get_next_funding_txid(), - your_last_funding_locked_txid: self.maybe_get_your_last_funding_locked_txid(their_features), - my_current_funding_locked_txid: self.maybe_get_my_current_funding_locked_txid(their_features), + your_last_funding_locked_txid: self.maybe_get_your_last_funding_locked_txid(), + my_current_funding_locked_txid: self.maybe_get_my_current_funding_locked_txid(), } } @@ -13740,7 +13727,7 @@ mod tests { // Now disconnect the two nodes and check that the commitment point in // Node B's channel_reestablish message is sane. assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok()); - let msg = node_b_chan.get_channel_reestablish(&channelmanager::provided_init_features(&config), &&logger); + let msg = node_b_chan.get_channel_reestablish(&&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); @@ -13748,7 +13735,7 @@ mod tests { // Check that the commitment point in Node A's channel_reestablish message // is sane. assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok()); - let msg = node_a_chan.get_channel_reestablish(&channelmanager::provided_init_features(&config), &&logger); + let msg = node_a_chan.get_channel_reestablish(&&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a2b3e831af1..2d94b3e8dfe 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -12007,9 +12007,8 @@ where } for (_, chan) in peer_state.channel_by_id.iter_mut() { - let features = &peer_state.latest_features; let logger = WithChannelContext::from(&self.logger, &chan.context(), None); - match chan.peer_connected_get_handshake(self.chain_hash, features, &&logger) { + match chan.peer_connected_get_handshake(self.chain_hash, &&logger) { ReconnectionMsg::Reestablish(msg) => pending_msg_events.push(MessageSendEvent::SendChannelReestablish { node_id: chan.context().get_counterparty_node_id(), From 5789c8f7e107889966255387870fe69419864306 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 19 Jun 2025 17:24:59 -0500 Subject: [PATCH 05/18] Update channel_reestablish logic for channel_ready When splicing is negotiated, channel_ready must be retransmitted when your_last_funding_locked is not set. Further, the current logic for retransmitting channel_ready is only applicable when splicing is not negotiated. --- lightning/src/ln/channel.rs | 15 +++++++++++++-- lightning/src/ln/channelmanager.rs | 3 ++- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 443bb0d1d0b..a15be071a9b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8277,7 +8277,8 @@ where #[rustfmt::skip] pub fn channel_reestablish( &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, - chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock + chain_hash: ChainHash, their_features: &InitFeatures, user_config: &UserConfig, + best_block: &BestBlock, ) -> Result where L::Target: Logger, @@ -8401,9 +8402,19 @@ where let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke(); let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 }; - let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.holder_commitment_point.transaction_number() == 1 { + let splicing_negotiated = their_features.supports_splicing(); + let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.holder_commitment_point.transaction_number() == 1 && !splicing_negotiated { // We should never have to worry about MonitorUpdateInProgress resending ChannelReady self.get_channel_ready(logger) + } else if splicing_negotiated { + // A node: + // - if `option_splice` was negotiated and `your_last_funding_locked` is not + // set in the `channel_reestablish` it received: + // - MUST retransmit `channel_ready`. + msg.your_last_funding_locked_txid + .is_none() + .then(|| ()) + .and_then(|_| self.get_channel_ready(logger)) } else { None }; if msg.next_local_commitment_number == next_counterparty_commitment_number { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 2d94b3e8dfe..0088a64e6b3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10054,12 +10054,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { + let features = &peer_state.latest_features; // Currently, we expect all holding cell update_adds to be dropped on peer // disconnect, so Channel's reestablish will never hand us any holding cell // freed HTLCs to fail backwards. If in the future we no longer drop pending // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. let responses = try_channel_entry!(self, peer_state, chan.channel_reestablish( - msg, &&logger, &self.node_signer, self.chain_hash, + msg, &&logger, &self.node_signer, self.chain_hash, features, &self.default_configuration, &*self.best_block.read().unwrap()), chan_entry); let mut channel_update = None; if let Some(msg) = responses.shutdown_msg { From ab5ccd13addc5d6f58826dd581dbb96642ad5a7d Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 9 Jul 2025 16:12:35 -0500 Subject: [PATCH 06/18] Enter FundingNegotiated state after constructing funding tx The ChannelState::NegotiatingFunding assertion check in ChannelContext::get_initial_commitment_signed will fail when implementing splicing's channel_reestablish logic. In order to support it and channel establishment, enter ChannelState::FundingNegotiated prior to calling the method and update the assertion accordingly. --- lightning/src/ln/channel.rs | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a15be071a9b..45b55d96949 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2960,6 +2960,8 @@ where }; self.funding.channel_transaction_parameters.funding_outpoint = Some(outpoint); + self.context.channel_state = ChannelState::FundingNegotiated(FundingNegotiatedFlags::new()); + self.context.assert_no_commitment_advancement(transaction_number, "initial commitment_signed"); let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger); let commitment_signed = match commitment_signed { @@ -3008,13 +3010,10 @@ where ))); }; - let mut channel_state = ChannelState::FundingNegotiated(FundingNegotiatedFlags::new()); - channel_state.set_interactive_signing(); - self.context.channel_state = channel_state; - // Clear the interactive transaction constructor self.interactive_tx_constructor.take(); self.interactive_tx_signing_session = Some(signing_session); + self.context.channel_state.set_interactive_signing(); Ok((commitment_signed, funding_ready_for_sig_event)) } @@ -5530,16 +5529,7 @@ where SP::Target: SignerProvider, L::Target: Logger { - if !matches!( - self.channel_state, ChannelState::NegotiatingFunding(flags) - if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT) - ) { - debug_assert!(false); - return Err(ChannelError::Close(("Tried to get an initial commitment_signed messsage at a time other than \ - immediately after initial handshake completion (or tried to get funding_created twice)".to_string(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) } - ))); - } + assert!(matches!(self.channel_state, ChannelState::FundingNegotiated(_))); let signature = match self.get_initial_counterparty_commitment_signature(funding, logger) { Ok(res) => res, From cb31e8970b56c579b416fee5002477fd976985f9 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 9 Jul 2025 16:22:27 -0500 Subject: [PATCH 07/18] Return an Option from ChannelContext::get_initial_commitment_signed When ChannelContext::get_initial_commitment_signed is called for V2 channel establishment, any errors should result in closing the channel. However, in the future, when this is used for splicing it should abort instead of closing the channel. Move the error construction to the call sites in anticipation of this. --- lightning/src/ln/channel.rs | 65 ++++++++++++++++++++----------------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 45b55d96949..8df0e559dc9 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2965,10 +2965,15 @@ where self.context.assert_no_commitment_advancement(transaction_number, "initial commitment_signed"); let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger); let commitment_signed = match commitment_signed { - Ok(commitment_signed) => commitment_signed, - Err(err) => { + Some(commitment_signed) => commitment_signed, + None => { self.funding.channel_transaction_parameters.funding_outpoint = None; - return Err(ChannelError::Close((err.to_string(), ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }))); + return Err(ChannelError::Close( + ( + "Failed to get signatures for new commitment_signed".to_owned(), + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, + ) + )); }, }; @@ -5494,7 +5499,7 @@ where #[rustfmt::skip] fn get_initial_counterparty_commitment_signature( &self, funding: &FundingScope, logger: &L - ) -> Result + ) -> Option where SP::Target: SignerProvider, L::Target: Logger @@ -5509,11 +5514,7 @@ where let channel_parameters = &funding.channel_transaction_parameters; ecdsa.sign_counterparty_commitment(channel_parameters, &counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx) .map(|(signature, _)| signature) - .map_err(|_| ChannelError::Close( - ( - "Failed to get signatures for new commitment_signed".to_owned(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - ))) + .ok() }, // TODO (taproot|arik) #[cfg(taproot)] @@ -5524,38 +5525,35 @@ where #[rustfmt::skip] fn get_initial_commitment_signed( &mut self, funding: &FundingScope, logger: &L - ) -> Result + ) -> Option where SP::Target: SignerProvider, L::Target: Logger { assert!(matches!(self.channel_state, ChannelState::FundingNegotiated(_))); - let signature = match self.get_initial_counterparty_commitment_signature(funding, logger) { - Ok(res) => res, - Err(e) => { - log_error!(logger, "Got bad signatures: {:?}!", e); - return Err(e); - } - }; - - log_info!(logger, "Generated commitment_signed for peer for channel {}", &self.channel_id()); - - Ok(msgs::CommitmentSigned { - channel_id: self.channel_id, - htlc_signatures: vec![], - signature, - funding_txid: funding.get_funding_txo().map(|funding_txo| funding_txo.txid), - #[cfg(taproot)] - partial_signature_with_nonce: None, - }) + let signature = self.get_initial_counterparty_commitment_signature(funding, logger); + if let Some(signature) = signature { + log_info!(logger, "Generated commitment_signed for peer for channel {}", &self.channel_id()); + Some(msgs::CommitmentSigned { + channel_id: self.channel_id, + htlc_signatures: vec![], + signature, + funding_txid: funding.get_funding_txo().map(|funding_txo| funding_txo.txid), + #[cfg(taproot)] + partial_signature_with_nonce: None, + }) + } else { + // TODO: Support async signing + None + } } #[cfg(all(test))] pub fn get_initial_counterparty_commitment_signature_for_test( &mut self, funding: &mut FundingScope, logger: &L, counterparty_cur_commitment_point_override: PublicKey, - ) -> Result + ) -> Option where SP::Target: SignerProvider, L::Target: Logger, @@ -8426,7 +8424,14 @@ where // if it has not received tx_signatures for that funding transaction AND // if next_commitment_number is zero: // MUST retransmit its commitment_signed for that funding transaction. - let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger)?; + let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger) + // TODO Support async signing + .ok_or_else(|| ChannelError::Close( + ( + "Failed to get signatures for new commitment_signed".to_owned(), + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, + ) + ))?; Some(msgs::CommitmentUpdate { commitment_signed: vec![commitment_signed], update_add_htlcs: vec![], From cc4152f0b2f58df114b91e2cedebda34e21c7cb8 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 11 Jul 2025 11:58:12 -0500 Subject: [PATCH 08/18] f - TODO(splicing): Support async signing --- lightning/src/ln/channel.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 8df0e559dc9..4a487985a2c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2966,6 +2966,7 @@ where let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger); let commitment_signed = match commitment_signed { Some(commitment_signed) => commitment_signed, + // TODO(splicing): Support async signing None => { self.funding.channel_transaction_parameters.funding_outpoint = None; return Err(ChannelError::Close( @@ -5544,7 +5545,7 @@ where partial_signature_with_nonce: None, }) } else { - // TODO: Support async signing + // TODO(splicing): Support async signing None } } @@ -8425,7 +8426,7 @@ where // if next_commitment_number is zero: // MUST retransmit its commitment_signed for that funding transaction. let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger) - // TODO Support async signing + // TODO(splicing): Support async signing .ok_or_else(|| ChannelError::Close( ( "Failed to get signatures for new commitment_signed".to_owned(), From 4a0e3434f6baece27116947fec8fb4ac568b259a Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 23 Jun 2025 15:16:50 -0500 Subject: [PATCH 09/18] Update next_funding_txid logic for channel_reestablish The splicing spec updates the logic pertaining to next_funding_txid when handling a channel_reestablish message. Specifically: A receiving node: - if `next_funding_txid` is set: - if `next_funding_txid` matches the latest interactive funding transaction or the current channel funding transaction: - if `next_commitment_number` is equal to the commitment number of the `commitment_signed` message it sent for this funding transaction: - MUST retransmit its `commitment_signed` for that funding transaction. - if it has already received `commitment_signed` and it should sign first, as specified in the [`tx_signatures` requirements](#the-tx_signatures-message): - MUST send its `tx_signatures` for that funding transaction. - if it has already received `tx_signatures` for that funding transaction: - MUST send its `tx_signatures` for that funding transaction. - if it also sets `next_funding_txid` in its own `channel_reestablish`, but the values don't match: - MUST send an `error` and fail the channel. - otherwise: - MUST send `tx_abort` to let the sending node know that they can forget this funding transaction. This commit updates FundedChannel::channel_reestablish accordingly. Co-authored-by: Wilmer Paulino Co-authored-by: Jeffrey Czyz --- lightning/src/ln/channel.rs | 188 ++++++++++++++++++++---------------- 1 file changed, 104 insertions(+), 84 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 4a487985a2c..0dc87c023de 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8406,96 +8406,111 @@ where .and_then(|_| self.get_channel_ready(logger)) } else { None }; - if msg.next_local_commitment_number == next_counterparty_commitment_number { - if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { - log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); - } else { - log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); - } + let mut commitment_update = None; + let mut tx_signatures = None; + let mut tx_abort = None; + + // if next_funding_txid is set: + if let Some(next_funding_txid) = msg.next_funding_txid { + // - if `next_funding_txid` matches the latest interactive funding transaction + // or the current channel funding transaction: + if let Some(session) = &self.interactive_tx_signing_session { + let our_next_funding_txid = self.maybe_get_next_funding_txid(); + if let Some(our_next_funding_txid) = our_next_funding_txid { + if our_next_funding_txid != next_funding_txid { + return Err(ChannelError::close(format!( + "Unexpected next_funding_txid: {}; expected: {}", + next_funding_txid, our_next_funding_txid, + ))); + } - // if next_funding_txid is set: - let (commitment_update, tx_signatures, tx_abort) = if let Some(next_funding_txid) = msg.next_funding_txid { - if let Some(session) = &self.interactive_tx_signing_session { - // if next_funding_txid matches the latest interactive funding transaction: - let our_next_funding_txid = session.unsigned_tx().compute_txid(); - if our_next_funding_txid == next_funding_txid { - debug_assert_eq!(session.unsigned_tx().compute_txid(), self.maybe_get_next_funding_txid().unwrap()); - - let commitment_update = if !self.context.channel_state.is_their_tx_signatures_sent() && msg.next_local_commitment_number == 0 { - // if it has not received tx_signatures for that funding transaction AND - // if next_commitment_number is zero: - // MUST retransmit its commitment_signed for that funding transaction. - let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger) - // TODO(splicing): Support async signing - .ok_or_else(|| ChannelError::Close( - ( - "Failed to get signatures for new commitment_signed".to_owned(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - ) - ))?; - Some(msgs::CommitmentUpdate { - commitment_signed: vec![commitment_signed], - update_add_htlcs: vec![], - update_fulfill_htlcs: vec![], - update_fail_htlcs: vec![], - update_fail_malformed_htlcs: vec![], - update_fee: None, - }) - } else { None }; + if !session.has_received_commitment_signed() { + self.context.expecting_peer_commitment_signed = true; + } + + // - if `next_commitment_number` is equal to the commitment number of the + // `commitment_signed` message it sent for this funding transaction: + // - MUST retransmit its `commitment_signed` for that funding transaction. + if msg.next_local_commitment_number == next_counterparty_commitment_number { + // `next_counterparty_commitment_number` is guaranteed to always be the + // commitment number of the `commitment_signed` message we sent for this + // funding transaction. If they set `next_funding_txid`, then they should + // not have processed our `tx_signatures` yet, which implies that our state + // machine is still paused and no updates can happen that would increment + // our `next_counterparty_commitment_number`. + // + // If they did set `next_funding_txid` even after processing our + // `tx_signatures` erroneously, this may end up resulting in a force close. + // // TODO(dual_funding): For async signing support we need to hold back `tx_signatures` until the `commitment_signed` is ready. - let tx_signatures = if ( - // if it has not received tx_signatures for that funding transaction AND - // if it has already received commitment_signed AND it should sign first, as specified in the tx_signatures requirements: - // MUST send its tx_signatures for that funding transaction. - !self.context.channel_state.is_their_tx_signatures_sent() && session.has_received_commitment_signed() && session.holder_sends_tx_signatures_first() - // else if it has already received tx_signatures for that funding transaction: - // MUST send its tx_signatures for that funding transaction. - ) || self.context.channel_state.is_their_tx_signatures_sent() { - if self.context.channel_state.is_monitor_update_in_progress() { - // The `monitor_pending_tx_signatures` field should have already been set in `commitment_signed_initial_v2` - // if we were up first for signing and had a monitor update in progress, but check again just in case. - debug_assert!(self.context.monitor_pending_tx_signatures.is_some(), "monitor_pending_tx_signatures should already be set"); - log_debug!(logger, "Not sending tx_signatures: a monitor update is in progress. Setting monitor_pending_tx_signatures."); - if self.context.monitor_pending_tx_signatures.is_none() { - self.context.monitor_pending_tx_signatures = session.holder_tx_signatures().clone(); - } - None - } else { - // If `holder_tx_signatures` is `None` here, the `tx_signatures` message will be sent - // when the holder provides their witnesses as this will queue a `tx_signatures` if the - // holder must send one. - session.holder_tx_signatures().clone() + let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger) + // TODO(splicing): Support async signing + .ok_or_else(|| ChannelError::Close( + ( + "Failed to get signatures for new commitment_signed".to_owned(), + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, + ) + ))?; + commitment_update = Some(msgs::CommitmentUpdate { + commitment_signed: vec![commitment_signed], + update_add_htlcs: vec![], + update_fulfill_htlcs: vec![], + update_fail_htlcs: vec![], + update_fail_malformed_htlcs: vec![], + update_fee: None, + }); + } + + // - if it has already received `commitment_signed` and it should sign first, + // as specified in the [`tx_signatures` requirements](#the-tx_signatures-message): + // - MUST send its `tx_signatures` for that funding transaction. + // + // - if it has already received `tx_signatures` for that funding transaction: + // - MUST send its `tx_signatures` for that funding transaction. + if (session.has_received_commitment_signed() && session.holder_sends_tx_signatures_first()) + || self.context.channel_state.is_their_tx_signatures_sent() + { + if self.context.channel_state.is_monitor_update_in_progress() { + // The `monitor_pending_tx_signatures` field should have already been + // set in `commitment_signed_initial_v2` if we were up first for signing + // and had a monitor update in progress. + if session.holder_sends_tx_signatures_first() { + debug_assert!(self.context.monitor_pending_tx_signatures.is_some()); } } else { - None - }; - if !session.has_received_commitment_signed() { - self.context.expecting_peer_commitment_signed = true; + // If `holder_tx_signatures` is `None` here, the `tx_signatures` message + // will be sent when the user provides their witnesses. + tx_signatures = session.holder_tx_signatures().clone() } - (commitment_update, tx_signatures, None) - } else { - // The `next_funding_txid` does not match the latest interactive funding transaction so we - // MUST send tx_abort to let the remote know that they can forget this funding transaction. - (None, None, Some(msgs::TxAbort { - channel_id: self.context.channel_id(), - data: format!( - "next_funding_txid {} does match our latest interactive funding txid {}", - next_funding_txid, our_next_funding_txid, - ).into_bytes() })) } } else { - // We'll just send a `tx_abort` here if we don't have a signing session for this channel - // on reestablish and tell our peer to just forget about it. - // Our peer is doing something strange, but it doesn't warrant closing the channel. - (None, None, Some(msgs::TxAbort { + // The `next_funding_txid` does not match the latest interactive funding + // transaction so we MUST send tx_abort to let the remote know that they can + // forget this funding transaction. + tx_abort = Some(msgs::TxAbort { channel_id: self.context.channel_id(), - data: - "No active signing session. The associated funding transaction may have already been broadcast.".as_bytes().to_vec() })) + data: format!( + "Unexpected next_funding_txid {}", + next_funding_txid, + ).into_bytes() }); } } else { - // Don't send anything related to interactive signing if `next_funding_txid` is not set. - (None, None, None) - }; + // We'll just send a `tx_abort` here if we don't have a signing session for this channel + // on reestablish and tell our peer to just forget about it. + // Our peer is doing something strange, but it doesn't warrant closing the channel. + tx_abort = Some(msgs::TxAbort { + channel_id: self.context.channel_id(), + data: + "No active signing session. The associated funding transaction may have already been broadcast.".as_bytes().to_vec() }); + } + } + + if msg.next_local_commitment_number == next_counterparty_commitment_number { + if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { + log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); + } else { + log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); + } Ok(ReestablishResponses { channel_ready, shutdown_msg, announcement_sigs, @@ -8506,6 +8521,11 @@ where tx_abort, }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { + // We've made an update so we must have exchanged `tx_signatures`, implying that + // `commitment_signed` was also exchanged. However, we may still need to retransmit our + // `tx_signatures` if the counterparty sent theirs first but didn't get to process ours. + debug_assert!(commitment_update.is_none()); + if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id()); } else { @@ -8518,8 +8538,8 @@ where channel_ready, shutdown_msg, announcement_sigs, commitment_update: None, raa: None, order: self.context.resend_order.clone(), - tx_signatures: None, - tx_abort: None, + tx_signatures, + tx_abort, }) } else { let commitment_update = if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst @@ -8542,8 +8562,8 @@ where channel_ready, shutdown_msg, announcement_sigs, raa, commitment_update, order: self.context.resend_order.clone(), - tx_signatures: None, - tx_abort: None, + tx_signatures, + tx_abort, }) } } else if msg.next_local_commitment_number < next_counterparty_commitment_number { From 5123cc8a83c96c513dcbc0a171952b142f603b36 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 11 Jul 2025 13:20:28 -0500 Subject: [PATCH 10/18] f - clean up spec requirement --- lightning/src/ln/channel.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 0dc87c023de..9cc4608fe1a 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8461,8 +8461,7 @@ where }); } - // - if it has already received `commitment_signed` and it should sign first, - // as specified in the [`tx_signatures` requirements](#the-tx_signatures-message): + // - if it has already received `commitment_signed` and it should sign first // - MUST send its `tx_signatures` for that funding transaction. // // - if it has already received `tx_signatures` for that funding transaction: From 5619b37a4eb1cd357c51be1cb5d1979e6eef6d98 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 11 Jul 2025 12:12:34 -0500 Subject: [PATCH 11/18] Use consistent initial commitment_signed naming --- lightning/src/ln/channel.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 9cc4608fe1a..716d96c7d78 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1814,7 +1814,7 @@ where #[cfg(splicing)] pending_splice: None, }; - let res = funded_channel.commitment_signed_initial_v2(msg, best_block, signer_provider, logger) + let res = funded_channel.initial_commitment_signed_v2(msg, best_block, signer_provider, logger) .map(|monitor| (Some(monitor), None)) // TODO: Change to `inspect_err` when MSRV is high enough. .map_err(|err| { @@ -2963,7 +2963,7 @@ where self.context.channel_state = ChannelState::FundingNegotiated(FundingNegotiatedFlags::new()); self.context.assert_no_commitment_advancement(transaction_number, "initial commitment_signed"); - let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger); + let commitment_signed = self.context.get_initial_commitment_signed_v2(&self.funding, logger); let commitment_signed = match commitment_signed { Some(commitment_signed) => commitment_signed, // TODO(splicing): Support async signing @@ -5524,7 +5524,7 @@ where } #[rustfmt::skip] - fn get_initial_commitment_signed( + fn get_initial_commitment_signed_v2( &mut self, funding: &FundingScope, logger: &L ) -> Option where @@ -6640,7 +6640,7 @@ where } #[rustfmt::skip] - pub fn commitment_signed_initial_v2( + pub fn initial_commitment_signed_v2( &mut self, msg: &msgs::CommitmentSigned, best_block: BestBlock, signer_provider: &SP, logger: &L ) -> Result::EcdsaSigner>, ChannelError> where L::Target: Logger @@ -8443,7 +8443,7 @@ where // `tx_signatures` erroneously, this may end up resulting in a force close. // // TODO(dual_funding): For async signing support we need to hold back `tx_signatures` until the `commitment_signed` is ready. - let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger) + let commitment_signed = self.context.get_initial_commitment_signed_v2(&self.funding, logger) // TODO(splicing): Support async signing .ok_or_else(|| ChannelError::Close( ( @@ -8471,7 +8471,7 @@ where { if self.context.channel_state.is_monitor_update_in_progress() { // The `monitor_pending_tx_signatures` field should have already been - // set in `commitment_signed_initial_v2` if we were up first for signing + // set in `initial_commitment_signed_v2` if we were up first for signing // and had a monitor update in progress. if session.holder_sends_tx_signatures_first() { debug_assert!(self.context.monitor_pending_tx_signatures.is_some()); From feffea4356793b6b92ba72c669bd43a1ac67dda0 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 23 Jun 2025 15:28:35 -0500 Subject: [PATCH 12/18] Update next_commitment_number logic for channel_reestablish The splicing spec updates the logic pertaining to next_commitment_number when sending a channel_reestablish message. Specifically: The sending node: - if it has sent `commitment_signed` for an interactive transaction construction but it has not received `tx_signatures`: - MUST set `next_funding_txid` to the txid of that interactive transaction. - if it has not received `commitment_signed` for that interactive transaction: - MUST set `next_commitment_number` to the commitment number of the `commitment_signed` it sent. --- lightning/src/ln/channel.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 716d96c7d78..87a7bc07da9 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -10141,6 +10141,21 @@ where self.sign_channel_announcement(node_signer, announcement).ok() } + fn get_next_local_commitment_number(&self) -> u64 { + let next_local_commitment_number = + INITIAL_COMMITMENT_NUMBER - self.holder_commitment_point.transaction_number(); + + if let Some(session) = &self.interactive_tx_signing_session { + if !self.context.channel_state.is_their_tx_signatures_sent() + && !session.has_received_commitment_signed() + { + return next_local_commitment_number + 1; + } + } + + next_local_commitment_number + } + #[rustfmt::skip] fn maybe_get_next_funding_txid(&self) -> Option { // If we've sent `commtiment_signed` for an interactively constructed transaction @@ -10229,7 +10244,7 @@ where // next_local_commitment_number is the next commitment_signed number we expect to // receive (indicating if they need to resend one that we missed). - next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.holder_commitment_point.transaction_number(), + next_local_commitment_number: self.get_next_local_commitment_number(), // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to // receive, however we track it by the next commitment number for a remote transaction // (which is one further, as they always revoke previous commitment transaction, not From 1047dcdfef2448f04f1040b91694ce5b51c59cfa Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 11 Jul 2025 12:17:07 -0500 Subject: [PATCH 13/18] f - add comment with spec requirement --- lightning/src/ln/channel.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 87a7bc07da9..a397286f881 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -10145,6 +10145,12 @@ where let next_local_commitment_number = INITIAL_COMMITMENT_NUMBER - self.holder_commitment_point.transaction_number(); + // The sending node: + // - if it has sent `commitment_signed` for an interactive transaction construction but + // it has not received `tx_signatures`: + // - MUST set `next_funding_txid` to the txid of that interactive transaction. + // - if it has not received `commitment_signed` for that interactive transaction: + // - MUST set `next_commitment_number` to the commitment number of the `commitment_signed` it sent. if let Some(session) = &self.interactive_tx_signing_session { if !self.context.channel_state.is_their_tx_signatures_sent() && !session.has_received_commitment_signed() From 71ffb50fb98fb1fd74248a74cb5469154102ebcd Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 19 Jun 2025 19:43:08 -0500 Subject: [PATCH 14/18] Send splice_locked on channel_reestablish The channel_reestablish protocol supports retransmitting splice_locked messages as needed. Add support for doing such when handling channel_reestablish messages. --- lightning/src/ln/channel.rs | 47 ++++++++++++++++++++++++++++++ lightning/src/ln/channelmanager.rs | 16 ++++++++-- 2 files changed, 60 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a397286f881..a57e45a7841 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1216,6 +1216,7 @@ pub(super) struct ReestablishResponses { pub shutdown_msg: Option, pub tx_signatures: Option, pub tx_abort: Option, + pub splice_locked: Option, } /// The first message we send to our peer after connection @@ -2172,6 +2173,10 @@ impl FundingScope { pub fn get_short_channel_id(&self) -> Option { self.short_channel_id } + + fn is_splice(&self) -> bool { + self.channel_transaction_parameters.splice_parent_funding_txid.is_some() + } } /// Info about a pending splice, used in the pre-splice channel @@ -8350,6 +8355,7 @@ where shutdown_msg, announcement_sigs, tx_signatures: None, tx_abort: None, + splice_locked: None, }); } @@ -8361,6 +8367,7 @@ where shutdown_msg, announcement_sigs, tx_signatures: None, tx_abort: None, + splice_locked: None, }); } @@ -8406,6 +8413,25 @@ where .and_then(|_| self.get_channel_ready(logger)) } else { None }; + // A receiving node: + // - if `your_last_funding_locked` is set and it does not match the most recent + // `splice_locked` it has sent: + // - MUST retransmit `splice_locked`. + let sent_splice_txid = self + .maybe_get_my_current_funding_locked() + .filter(|funding| funding.is_splice()) + .map(|funding| { + funding.get_funding_txid().expect("Splice funding_txid should always be set") + }); + let splice_locked = msg.your_last_funding_locked_txid.and_then(|last_funding_txid| { + sent_splice_txid + .filter(|sent_splice_txid| last_funding_txid != *sent_splice_txid) + .map(|splice_txid| msgs::SpliceLocked { + channel_id: self.context.channel_id, + splice_txid, + }) + }); + let mut commitment_update = None; let mut tx_signatures = None; let mut tx_abort = None; @@ -8518,6 +8544,7 @@ where order: self.context.resend_order.clone(), tx_signatures, tx_abort, + splice_locked, }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { // We've made an update so we must have exchanged `tx_signatures`, implying that @@ -8539,6 +8566,7 @@ where order: self.context.resend_order.clone(), tx_signatures, tx_abort, + splice_locked, }) } else { let commitment_update = if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst @@ -8563,6 +8591,7 @@ where order: self.context.resend_order.clone(), tx_signatures, tx_abort, + splice_locked, }) } } else if msg.next_local_commitment_number < next_counterparty_commitment_number { @@ -10196,6 +10225,24 @@ where None } + #[cfg(splicing)] + fn maybe_get_my_current_funding_locked(&self) -> Option<&FundingScope> { + self.pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.sent_funding_txid) + .and_then(|funding_txid| { + self.pending_funding + .iter() + .find(|funding| funding.get_funding_txid() == Some(funding_txid)) + }) + .or_else(|| self.is_our_channel_ready().then(|| &self.funding)) + } + + #[cfg(not(splicing))] + fn maybe_get_my_current_funding_locked(&self) -> Option<&FundingScope> { + None + } + #[cfg(splicing)] fn maybe_get_my_current_funding_locked_txid(&self) -> Option { self.pending_splice diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0088a64e6b3..c0c698c1f6d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3408,7 +3408,8 @@ macro_rules! handle_monitor_update_completion { &mut $peer_state.pending_msg_events, $chan, updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds, updates.funding_broadcastable, updates.channel_ready, - updates.announcement_sigs, updates.tx_signatures, None); + updates.announcement_sigs, updates.tx_signatures, None, None, + ); if let Some(upd) = channel_update { $peer_state.pending_msg_events.push(upd); } @@ -8088,9 +8089,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option, tx_signatures: Option, tx_abort: Option, + splice_locked: Option, ) -> (Option<(u64, Option, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { let logger = WithChannelContext::from(&self.logger, &channel.context, None); - log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", + log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort, {} splice_locked", &channel.context.channel_id(), if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, @@ -8100,6 +8102,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if announcement_sigs.is_some() { "sending" } else { "without" }, if tx_signatures.is_some() { "sending" } else { "without" }, if tx_abort.is_some() { "sending" } else { "without" }, + if splice_locked.is_some() { "sending" } else { "without" }, ); let counterparty_node_id = channel.context.get_counterparty_node_id(); @@ -8139,6 +8142,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ msg, }); } + if let Some(msg) = splice_locked { + pending_msg_events.push(MessageSendEvent::SendSpliceLocked { + node_id: counterparty_node_id, + msg, + }); + } macro_rules! handle_cs { () => { if let Some(update) = commitment_update { @@ -10083,7 +10092,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order, Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs, - responses.tx_signatures, responses.tx_abort); + responses.tx_signatures, responses.tx_abort, responses.splice_locked, + ); debug_assert!(htlc_forwards.is_none()); debug_assert!(decode_update_add_htlcs.is_none()); if let Some(upd) = channel_update { From b64728078714ed1b60dbf82e33f87327494903d0 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 25 Jun 2025 11:10:34 -0500 Subject: [PATCH 15/18] Retransmit channel_ready / splice_locked awaiting announcement_sigs The splicing spec updates channel_establishment logic to retransmit channel_ready or splice_locked for announced channels. Specifically: - if `my_current_funding_locked` is included: - if `announce_channel` is set for this channel: - if it has not received `announcement_signatures` for that transaction: - MUST retransmit `channel_ready` or `splice_locked` after exchanging `channel_reestablish`. --- lightning/src/ln/channel.rs | 53 ++++++++++++++++++++++++++++--------- 1 file changed, 41 insertions(+), 12 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a57e45a7841..5d3647a5da9 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8410,27 +8410,56 @@ where msg.your_last_funding_locked_txid .is_none() .then(|| ()) + // The sending node: + // - if `my_current_funding_locked` is included: + // - if `announce_channel` is set for this channel: + // - if it has not received `announcement_signatures` for that transaction: + // - MUST retransmit `channel_ready` or `splice_locked` after exchanging `channel_reestablish`. + .or_else(|| { + self.maybe_get_my_current_funding_locked() + .filter(|funding| !funding.is_splice()) + .filter(|_| self.context.config.announce_for_forwarding) + .filter(|_| self.context.announcement_sigs.is_none()) + .map(|_| ()) + }) .and_then(|_| self.get_channel_ready(logger)) } else { None }; - // A receiving node: - // - if `your_last_funding_locked` is set and it does not match the most recent - // `splice_locked` it has sent: - // - MUST retransmit `splice_locked`. let sent_splice_txid = self .maybe_get_my_current_funding_locked() .filter(|funding| funding.is_splice()) .map(|funding| { funding.get_funding_txid().expect("Splice funding_txid should always be set") }); - let splice_locked = msg.your_last_funding_locked_txid.and_then(|last_funding_txid| { - sent_splice_txid - .filter(|sent_splice_txid| last_funding_txid != *sent_splice_txid) - .map(|splice_txid| msgs::SpliceLocked { - channel_id: self.context.channel_id, - splice_txid, - }) - }); + let splice_locked = msg + // A receiving node: + // - if `your_last_funding_locked` is set and it does not match the most recent + // `splice_locked` it has sent: + // - MUST retransmit `splice_locked`. + .your_last_funding_locked_txid + .and_then(|last_funding_txid| { + sent_splice_txid.filter(|sent_splice_txid| last_funding_txid != *sent_splice_txid) + }) + // The sending node: + // - if `my_current_funding_locked` is included: + // - if `announce_channel` is set for this channel: + // - if it has not received `announcement_signatures` for that transaction: + // - MUST retransmit `channel_ready` or `splice_locked` after exchanging `channel_reestablish`. + .or_else(|| { + sent_splice_txid + .filter(|_| self.context.config.announce_for_forwarding) + .filter(|sent_splice_txid| { + if self.funding.get_funding_txid() == Some(*sent_splice_txid) { + self.context.announcement_sigs.is_none() + } else { + true + } + }) + }) + .map(|splice_txid| msgs::SpliceLocked { + channel_id: self.context.channel_id, + splice_txid, + }); let mut commitment_update = None; let mut tx_signatures = None; From a907398bfbf0de173d97fce61651ea0564e237a5 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 25 Jun 2025 17:14:17 -0500 Subject: [PATCH 16/18] Clear announcement_sigs on FundingScope promotion When a splice transaction is promoted (i.e., when splice_locked has been exchanged), announcement_signatures must be sent. However, if we try to send a channel_announcement before they are received, then the signatures will be incorrect. To avoid this, clear the counterparty's announcement_signatures upon promoting a FundingScope. --- lightning/src/ln/channel.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5d3647a5da9..a119dae2ced 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5904,6 +5904,7 @@ macro_rules! promote_splice_funding { core::mem::swap(&mut $self.funding, $funding); $self.pending_splice = None; $self.pending_funding.clear(); + $self.context.announcement_sigs = None; $self.context.announcement_sigs_state = AnnouncementSigsState::NotSent; }; } From 15eef7bd69d0288c237bc5e9f12c342d44bd2ca2 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 27 Jun 2025 10:45:11 -0500 Subject: [PATCH 17/18] Send channel_ready on channel_reestablish The channel_reestablish protocol supports retransmitting channel_ready messages as needed. Add support for doing such when handling channel_reestablish messages. --- lightning/src/ln/channel.rs | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a119dae2ced..cee07feafae 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8404,24 +8404,42 @@ where // We should never have to worry about MonitorUpdateInProgress resending ChannelReady self.get_channel_ready(logger) } else if splicing_negotiated { + let funding_txid = self + .maybe_get_my_current_funding_locked() + .filter(|funding| !funding.is_splice()) + .map(|funding| { + funding.get_funding_txid().expect("funding_txid should always be set") + }); + // A node: // - if `option_splice` was negotiated and `your_last_funding_locked` is not // set in the `channel_reestablish` it received: // - MUST retransmit `channel_ready`. msg.your_last_funding_locked_txid .is_none() - .then(|| ()) + .then(|| funding_txid) + .flatten() // The sending node: // - if `my_current_funding_locked` is included: // - if `announce_channel` is set for this channel: // - if it has not received `announcement_signatures` for that transaction: // - MUST retransmit `channel_ready` or `splice_locked` after exchanging `channel_reestablish`. .or_else(|| { - self.maybe_get_my_current_funding_locked() - .filter(|funding| !funding.is_splice()) + funding_txid .filter(|_| self.context.config.announce_for_forwarding) .filter(|_| self.context.announcement_sigs.is_none()) - .map(|_| ()) + }) + // TODO: The language from the spec below should be updated to be in terms of + // `your_last_funding_locked` received and `my_current_funding_locked` sent rather + // than other messages received. + // + // - if it receives `channel_ready` for that transaction after exchanging `channel_reestablish`: + // - MUST retransmit `channel_ready` in response, if not already sent since reconnecting. + .or_else(|| { + msg.your_last_funding_locked_txid + .and_then(|last_funding_txid| { + funding_txid.filter(|funding_txid| last_funding_txid != *funding_txid) + }) }) .and_then(|_| self.get_channel_ready(logger)) } else { None }; From 8004c6f445064b6d456c4ffc761ec5ac2d1fa0bb Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 27 Jun 2025 11:40:22 -0500 Subject: [PATCH 18/18] Handle implicit splice_locked during channel_reestablish When handling a counterparties channel_reestablish, the spec dictates that a splice_locked may be implied by my_current_funding_locked. Compare that against any pending splices and handle an implicit splice_locked message when applicable. --- lightning/src/ln/channel.rs | 30 ++++++++++++++++++++++++++++++ lightning/src/ln/channelmanager.rs | 14 ++++++++++++-- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index cee07feafae..f1cf2ddfe03 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1217,6 +1217,7 @@ pub(super) struct ReestablishResponses { pub tx_signatures: Option, pub tx_abort: Option, pub splice_locked: Option, + pub implicit_splice_locked: Option, } /// The first message we send to our peer after connection @@ -8357,6 +8358,7 @@ where tx_signatures: None, tx_abort: None, splice_locked: None, + implicit_splice_locked: None, }); } @@ -8369,6 +8371,7 @@ where tx_signatures: None, tx_abort: None, splice_locked: None, + implicit_splice_locked: None, }); } @@ -8480,6 +8483,30 @@ where splice_txid, }); + // A receiving node: + // - if splice transactions are pending and `my_current_funding_locked` matches one of + // those splice transactions, for which it hasn't received `splice_locked` yet: + // - MUST process `my_current_funding_locked` as if it was receiving `splice_locked` + // for this `txid`. + #[cfg(splicing)] + let implicit_splice_locked = msg.my_current_funding_locked_txid.and_then(|funding_txid| { + self.pending_funding + .iter() + .find(|funding| funding.get_funding_txid() == Some(funding_txid)) + .and_then(|_| { + self.pending_splice.as_ref().and_then(|pending_splice| { + (Some(funding_txid) != pending_splice.received_funding_txid) + .then(|| funding_txid) + }) + }) + .map(|splice_txid| msgs::SpliceLocked { + channel_id: self.context.channel_id, + splice_txid, + }) + }); + #[cfg(not(splicing))] + let implicit_splice_locked = None; + let mut commitment_update = None; let mut tx_signatures = None; let mut tx_abort = None; @@ -8593,6 +8620,7 @@ where tx_signatures, tx_abort, splice_locked, + implicit_splice_locked, }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { // We've made an update so we must have exchanged `tx_signatures`, implying that @@ -8615,6 +8643,7 @@ where tx_signatures, tx_abort, splice_locked, + implicit_splice_locked, }) } else { let commitment_update = if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst @@ -8640,6 +8669,7 @@ where tx_signatures, tx_abort, splice_locked, + implicit_splice_locked, }) } } else if msg.next_local_commitment_number < next_counterparty_commitment_number { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index c0c698c1f6d..48d011f9277 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10046,7 +10046,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[rustfmt::skip] fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result { - let need_lnd_workaround = { + let (implicit_splice_locked, need_lnd_workaround) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -10099,7 +10099,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(upd) = channel_update { peer_state.pending_msg_events.push(upd); } - need_lnd_workaround + + (responses.implicit_splice_locked, need_lnd_workaround) } else { return try_channel_entry!(self, peer_state, Err(ChannelError::close( "Got a channel_reestablish message for an unfunded channel!".into())), chan_entry); @@ -10146,6 +10147,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(channel_ready_msg) = need_lnd_workaround { self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?; } + + #[cfg(not(splicing))] + let _ = implicit_splice_locked; + #[cfg(splicing)] + if let Some(splice_locked) = implicit_splice_locked { + self.internal_splice_locked(counterparty_node_id, &splice_locked)?; + return Ok(NotifyOption::DoPersist); + } + Ok(NotifyOption::SkipPersistHandleEvents) }