From 377ca5f2538795953841b74a51014fb7fb147e87 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 14 Nov 2025 11:36:18 +0100 Subject: [PATCH 1/4] Channel logging improvements Additional trace logs to help with debugging. --- lightning/src/ln/channel.rs | 4 ++-- lightning/src/ln/channelmanager.rs | 13 +++++++++---- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index e55e4144ef2..b2b9520b94f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8877,8 +8877,8 @@ where ); return_with_htlcs_to_fail!(htlcs_to_fail); } else { - log_debug!(logger, "Received a valid revoke_and_ack with no reply necessary. {} monitor update.", - release_state_str); + log_debug!(logger, "Received a valid revoke_and_ack with no reply necessary. {} monitor update {}.", + release_state_str, monitor_update.update_id); self.monitor_updating_paused( false, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d84d46e8995..3234f62eef4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9379,6 +9379,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ for action in actions.into_iter() { match action { MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => { + let logger = WithContext::from(&self.logger, None, None, Some(payment_hash)); + log_trace!(logger, "Handling PaymentClaimed monitor update completion action"); + if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim { let per_peer_state = self.per_peer_state.read().unwrap(); per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| { @@ -9454,6 +9457,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // `payment_id` should suffice to ensure we never spuriously drop a second // event for a duplicate payment. if !pending_events.contains(&event_action) { + log_trace!(logger, "Queuing PaymentClaimed event with event completion action {:?}", event_action.1); pending_events.push_back(event_action); } } @@ -16984,10 +16988,6 @@ where let logger = WithChannelMonitor::from(&args.logger, monitor, None); let channel_id = monitor.channel_id(); - log_info!( - logger, - "Queueing monitor update to ensure missing channel is force closed", - ); let monitor_update = ChannelMonitorUpdate { update_id: monitor.get_latest_update_id().saturating_add(1), updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { @@ -16995,6 +16995,11 @@ where }], channel_id: Some(monitor.channel_id()), }; + log_info!( + logger, + "Queueing monitor update {} to ensure missing channel is force closed", + monitor_update.update_id + ); let funding_txo = monitor.get_funding_txo(); let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, From d4d0879e3b8683d3e7aba3f87cc64ed38dabf826 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 11 Nov 2025 15:17:36 +0100 Subject: [PATCH 2/4] Store and check channel data in channel monitors Add a new `safe_channels` feature flag that gates in-development work toward persisting channel monitors and channels atomically, preventing them from desynchronizing and causing force closures. This commit begins that transition by storing both pieces together and adding consistency checks during writes. These checks mirror what the channel manager currently validates only on reload, but performing them earlier increases coverage and surfaces inconsistencies sooner. --- ci/ci-tests.sh | 5 + lightning/Cargo.toml | 1 + lightning/src/chain/channelmonitor.rs | 167 +++++++++++++++++++++++++- lightning/src/ln/channel.rs | 118 +++++++++++++++++- lightning/src/ln/channelmanager.rs | 20 +++ 5 files changed, 304 insertions(+), 7 deletions(-) diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 91ead9903cb..98d8086bfd5 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -46,6 +46,11 @@ cargo test -p lightning --verbose --color always --features dnssec cargo check -p lightning --verbose --color always --features dnssec cargo doc -p lightning --document-private-items --features dnssec +echo -e "\n\nChecking and testing lightning with safe_channels" +cargo test -p lightning --verbose --color always --features safe_channels +cargo check -p lightning --verbose --color always --features safe_channels +cargo doc -p lightning --document-private-items --features safe_channels + echo -e "\n\nChecking and testing Block Sync Clients with features" cargo test -p lightning-block-sync --verbose --color always --features rest-client diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index 9df4a725e54..2d3d2a8d967 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -23,6 +23,7 @@ _externalize_tests = ["inventory", "_test_utils"] # Allow signing of local transactions that may have been revoked or will be revoked, for functional testing (e.g. justice tx handling). # This is unsafe to use in production because it may result in the counterparty publishing taking our funds. unsafe_revoked_tx_signing = [] +safe_channels = [] std = [] diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 10e5049682e..afb247c41d5 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -50,6 +50,8 @@ use crate::ln::chan_utils::{ self, ChannelTransactionParameters, CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCClaim, HTLCOutputInCommitment, HolderCommitmentTransaction, }; +#[cfg(feature = "safe_channels")] +use crate::ln::channel::read_check_data; use crate::ln::channel::INITIAL_COMMITMENT_NUMBER; use crate::ln::channel_keys::{ DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, HtlcKey, RevocationBasepoint, @@ -111,6 +113,10 @@ pub struct ChannelMonitorUpdate { /// Will be `None` for `ChannelMonitorUpdate`s constructed on LDK versions prior to 0.0.121 and /// always `Some` otherwise. pub channel_id: Option, + + /// The encoded channel data associated with this ChannelMonitor, if any. + #[cfg(feature = "safe_channels")] + pub encoded_channel: Option>, } impl ChannelMonitorUpdate { @@ -156,9 +162,16 @@ impl Writeable for ChannelMonitorUpdate { for update_step in self.updates.iter() { update_step.write(w)?; } + #[cfg(not(feature = "safe_channels"))] + write_tlv_fields!(w, { + // 1 was previously used to store `counterparty_node_id` + (3, self.channel_id, option), + }); + #[cfg(feature = "safe_channels")] write_tlv_fields!(w, { // 1 was previously used to store `counterparty_node_id` (3, self.channel_id, option), + (5, self.encoded_channel, option) }); Ok(()) } @@ -176,11 +189,24 @@ impl Readable for ChannelMonitorUpdate { } } let mut channel_id = None; + #[cfg(not(feature = "safe_channels"))] read_tlv_fields!(r, { // 1 was previously used to store `counterparty_node_id` (3, channel_id, option), }); - Ok(Self { update_id, updates, channel_id }) + #[cfg(feature = "safe_channels")] + let mut encoded_channel = None; + #[cfg(feature = "safe_channels")] + read_tlv_fields!(r, { + // 1 was previously used to store `counterparty_node_id` + (3, channel_id, option), + (5, encoded_channel, option) + }); + Ok(Self { + update_id, updates, channel_id, + #[cfg(feature = "safe_channels")] + encoded_channel + }) } } @@ -1402,6 +1428,11 @@ pub(crate) struct ChannelMonitorImpl { /// make deciding whether to do so simple, here we track whether this monitor was last written /// prior to 0.1. written_by_0_1_or_later: bool, + + /// The serialized channel state as provided via the last `ChannelMonitorUpdate` or via a call to + /// [`ChannelMonitor::update_encoded_channel`]. + #[cfg(feature = "safe_channels")] + encoded_channel: Option>, } // Returns a `&FundingScope` for the one we are currently observing/handling commitment transactions @@ -1521,6 +1552,12 @@ const MIN_SERIALIZATION_VERSION: u8 = 1; pub(crate) fn write_chanmon_internal( channel_monitor: &ChannelMonitorImpl, _is_stub: bool, writer: &mut W, ) -> Result<(), Error> { + // Check that the encoded channel (if present) is consistent with the rest of the monitor. This sets an invariant + // for the safe_channels feature. + #[cfg(feature = "safe_channels")] + if let Some(ref encoded_channel) = channel_monitor.encoded_channel { + channel_monitor.check_encoded_channel_consistency(encoded_channel); + } write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); channel_monitor.latest_update_id.write(writer)?; @@ -1733,6 +1770,7 @@ pub(crate) fn write_chanmon_internal( _ => channel_monitor.pending_monitor_events.clone(), }; + #[cfg(not(feature = "safe_channels"))] write_tlv_fields!(writer, { (1, channel_monitor.funding_spend_confirmed, option), (3, channel_monitor.htlcs_resolved_on_chain, required_vec), @@ -1757,6 +1795,32 @@ pub(crate) fn write_chanmon_internal( (37, channel_monitor.funding_seen_onchain, required), }); + #[cfg(feature = "safe_channels")] + write_tlv_fields!(writer, { + (1, channel_monitor.funding_spend_confirmed, option), + (3, channel_monitor.htlcs_resolved_on_chain, required_vec), + (5, pending_monitor_events, required_vec), + (7, channel_monitor.funding_spend_seen, required), + (9, channel_monitor.counterparty_node_id, required), + (11, channel_monitor.confirmed_commitment_tx_counterparty_output, option), + (13, channel_monitor.spendable_txids_confirmed, required_vec), + (15, channel_monitor.counterparty_fulfilled_htlcs, required), + (17, channel_monitor.initial_counterparty_commitment_info, option), + (19, channel_monitor.channel_id, required), + (21, channel_monitor.balances_empty_height, option), + (23, channel_monitor.holder_pays_commitment_tx_fee, option), + (25, channel_monitor.payment_preimages, required), + (27, channel_monitor.first_negotiated_funding_txo, required), + (29, channel_monitor.initial_counterparty_commitment_tx, option), + (31, channel_monitor.funding.channel_parameters, required), + (32, channel_monitor.pending_funding, optional_vec), + (33, channel_monitor.htlcs_resolved_to_user, required), + (34, channel_monitor.alternative_funding_confirmed, option), + (35, channel_monitor.is_manual_broadcast, required), + (37, channel_monitor.funding_seen_onchain, required), + (39, channel_monitor.encoded_channel, option), + }); + Ok(()) } @@ -1994,6 +2058,8 @@ impl ChannelMonitor { alternative_funding_confirmed: None, written_by_0_1_or_later: true, + #[cfg(feature = "safe_channels")] + encoded_channel: None, }) } @@ -2114,6 +2180,19 @@ impl ChannelMonitor { inner.update_monitor(updates, broadcaster, fee_estimator, &logger) } + /// Gets the encoded channel data, if any, associated with this ChannelMonitor. + #[cfg(feature = "safe_channels")] + pub fn get_encoded_channel(&self) -> Option> { + self.inner.lock().unwrap().encoded_channel.clone() + } + + /// Updates the encoded channel data associated with this ChannelMonitor. To clear the encoded channel data (for + /// example after shut down of a channel), pass an empty vector. + #[cfg(feature = "safe_channels")] + pub fn update_encoded_channel(&self, encoded: Vec) { + self.inner.lock().unwrap().update_encoded_channel(encoded); + } + /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this /// ChannelMonitor. /// @@ -2719,6 +2798,55 @@ impl ChannelMonitor { } impl ChannelMonitorImpl { + #[cfg(feature = "safe_channels")] + fn check_encoded_channel_consistency(&self, encoded: &[u8]) { + let encoded_channel_reader = &mut &encoded[..]; + let check_res = read_check_data(encoded_channel_reader); + if let Ok(check_data) = check_res { + debug_assert!( + check_data.cur_holder_commitment_transaction_number + <= self.get_cur_holder_commitment_number(), + "cur_holder_commitment_transaction_number - channel: {} vs monitor: {}", + check_data.cur_holder_commitment_transaction_number, + self.get_cur_holder_commitment_number() + ); + debug_assert!( + check_data.revoked_counterparty_commitment_transaction_number + <= self.get_min_seen_secret(), + "revoked_counterparty_commitment_transaction_number - channel: {} vs monitor: {}", + check_data.revoked_counterparty_commitment_transaction_number, + self.get_min_seen_secret() + ); + debug_assert!( + check_data.cur_counterparty_commitment_transaction_number + <= self.get_cur_counterparty_commitment_number(), + "cur_counterparty_commitment_transaction_number - channel: {} vs monitor: {}", + check_data.cur_counterparty_commitment_transaction_number, + self.get_cur_counterparty_commitment_number() + ); + debug_assert!( + check_data.latest_monitor_update_id >= self.get_latest_update_id(), + "latest_monitor_update_id - channel: {} vs monitor: {}", + check_data.latest_monitor_update_id, + self.get_latest_update_id() + ); + } else { + debug_assert!(false, "Failed to read check data from encoded channel"); + } + } + + #[cfg(feature = "safe_channels")] + fn update_encoded_channel(&mut self, encoded: Vec) { + if encoded.len() > 0 { + // Check that the encoded channel is consistent with the rest of the monitor. This sets an invariant for the + // safe_channels feature. + self.check_encoded_channel_consistency(&encoded); + self.encoded_channel = Some(encoded); + } else { + self.encoded_channel = None; + } + } + /// Helper for get_claimable_balances which does the work for an individual HTLC, generating up /// to one `Balance` for the HTLC. #[rustfmt::skip] @@ -4405,6 +4533,13 @@ impl ChannelMonitorImpl { } } + // Assume that if the update contains no encoded channel, that the channel remained unchanged. We + // therefore do not update the monitor. + #[cfg(feature="safe_channels")] + if let Some(encoded_channel) = updates.encoded_channel.as_ref() { + self.update_encoded_channel(encoded_channel.clone()); + } + if ret.is_ok() && self.no_further_updates_allowed() && is_pre_close_update { log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); Err(()) @@ -6644,6 +6779,33 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let mut alternative_funding_confirmed = None; let mut is_manual_broadcast = RequiredWrapper(None); let mut funding_seen_onchain = RequiredWrapper(None); + #[cfg(not(feature="safe_channels"))] + read_tlv_fields!(reader, { + (1, funding_spend_confirmed, option), + (3, htlcs_resolved_on_chain, optional_vec), + (5, pending_monitor_events, optional_vec), + (7, funding_spend_seen, option), + (9, counterparty_node_id, option), + (11, confirmed_commitment_tx_counterparty_output, option), + (13, spendable_txids_confirmed, optional_vec), + (15, counterparty_fulfilled_htlcs, option), + (17, initial_counterparty_commitment_info, option), + (19, channel_id, option), + (21, balances_empty_height, option), + (23, holder_pays_commitment_tx_fee, option), + (25, payment_preimages_with_info, option), + (27, first_negotiated_funding_txo, (default_value, outpoint)), + (29, initial_counterparty_commitment_tx, option), + (31, channel_parameters, (option: ReadableArgs, None)), + (32, pending_funding, optional_vec), + (33, htlcs_resolved_to_user, option), + (34, alternative_funding_confirmed, option), + (35, is_manual_broadcast, (default_value, false)), + (37, funding_seen_onchain, (default_value, true)), + }); + #[cfg(feature="safe_channels")] + let mut encoded_channel = None; + #[cfg(feature="safe_channels")] read_tlv_fields!(reader, { (1, funding_spend_confirmed, option), (3, htlcs_resolved_on_chain, optional_vec), @@ -6666,6 +6828,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (34, alternative_funding_confirmed, option), (35, is_manual_broadcast, (default_value, false)), (37, funding_seen_onchain, (default_value, true)), + (39, encoded_channel, option), }); // Note that `payment_preimages_with_info` was added (and is always written) in LDK 0.1, so // we can use it to determine if this monitor was last written by LDK 0.1 or later. @@ -6843,6 +7006,8 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP alternative_funding_confirmed, written_by_0_1_or_later, + #[cfg(feature="safe_channels")] + encoded_channel, }); if counterparty_node_id.is_none() { diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index b2b9520b94f..772671a1d31 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -6073,6 +6073,8 @@ where should_broadcast: broadcast, }], channel_id: Some(self.channel_id()), + #[cfg(feature = "safe_channels")] + encoded_channel: Some(Vec::new()), // Clear channel on shut down. }; Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), update)) } else { @@ -7316,6 +7318,8 @@ where payment_info, }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + encoded_channel: Some(self.encode()), }; if !self.context.channel_state.can_generate_new_commitment() { @@ -7429,6 +7433,10 @@ where // to be strictly increasing by one, so decrement it here. self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); + #[cfg(feature = "safe_channels")] + { + monitor_update.encoded_channel = Some(self.encode()); + } } else { let blocked_upd = self.context.blocked_monitor_updates.get(0); let new_mon_id = blocked_upd @@ -7932,7 +7940,8 @@ where ); self.context.latest_monitor_update_id += 1; - let monitor_update = ChannelMonitorUpdate { + #[allow(unused_mut)] + let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::RenegotiatedFunding { channel_parameters: pending_splice_funding.channel_transaction_parameters.clone(), @@ -7940,6 +7949,8 @@ where counterparty_commitment_tx, }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + encoded_channel: None, }; self.context @@ -7949,6 +7960,10 @@ where .received_commitment_signed(); self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + #[cfg(feature = "safe_channels")] + { + monitor_update.encoded_channel = Some(self.encode()); + } Ok(self.push_ret_blockable_mon_update(monitor_update)) } @@ -8205,6 +8220,8 @@ where update_id: self.context.latest_monitor_update_id, updates: vec![update], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + encoded_channel: None, }; self.context.expecting_peer_commitment_signed = false; @@ -8229,6 +8246,10 @@ where } log_debug!(logger, "Received valid commitment_signed from peer, updated HTLC state but awaiting a monitor update resolution to reply.", ); + #[cfg(feature = "safe_channels")] + { + monitor_update.encoded_channel = Some(self.encode()); + } return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -8257,6 +8278,10 @@ where Vec::new(), Vec::new(), ); + #[cfg(feature = "safe_channels")] + { + monitor_update.encoded_channel = Some(self.encode()); + } return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -8309,6 +8334,8 @@ where update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet! updates: Vec::new(), channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + encoded_channel: None, }; let mut htlc_updates = Vec::new(); @@ -8404,6 +8431,11 @@ where unreachable!() }; update_fulfill_count += 1; + + #[cfg(feature = "safe_channels")] + { + additional_monitor_update.encoded_channel = Some(self.encode()); + } monitor_update.updates.append(&mut additional_monitor_update.updates); None }, @@ -8462,6 +8494,11 @@ where update_add_count, update_fulfill_count, update_fail_count); self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + + #[cfg(feature = "safe_channels")] + { + monitor_update.encoded_channel = Some(self.encode()); + } (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) } else { (None, Vec::new()) @@ -8578,6 +8615,8 @@ where secret: msg.per_commitment_secret, }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + encoded_channel: None, }; // Update state now that we've passed all the can-fail calls... @@ -8799,6 +8838,10 @@ where }; macro_rules! return_with_htlcs_to_fail { ($htlcs_to_fail: expr) => { + #[cfg(feature = "safe_channels")] + { + monitor_update.encoded_channel = Some(self.encode()); + } if !release_monitor { self.context .blocked_monitor_updates @@ -10417,6 +10460,8 @@ where scriptpubkey: self.get_closing_scriptpubkey(), }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + encoded_channel: Some(self.encode()), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); self.push_ret_blockable_mon_update(monitor_update) @@ -10871,10 +10916,13 @@ where if self.context.blocked_monitor_updates.is_empty() { return None; } - Some(( - self.context.blocked_monitor_updates.remove(0).update, - !self.context.blocked_monitor_updates.is_empty(), - )) + #[allow(unused_mut)] + let mut update = self.context.blocked_monitor_updates.remove(0).update; + #[cfg(feature = "safe_channels")] + { + update.encoded_channel = Some(self.encode()); + } + Some((update, !self.context.blocked_monitor_updates.is_empty())) } /// Pushes a new monitor update into our monitor update queue, returning it if it should be @@ -11174,6 +11222,8 @@ where funding_txid: funding_txo.txid, }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + encoded_channel: Some(self.encode()), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); let monitor_update = self.push_ret_blockable_mon_update(monitor_update); @@ -12723,12 +12773,14 @@ where } self.context.latest_monitor_update_id += 1; + self.context.channel_state.set_awaiting_remote_revoke(); let monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, updates: vec![update], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + encoded_channel: Some(self.encode()), }; - self.context.channel_state.set_awaiting_remote_revoke(); monitor_update } @@ -12973,6 +13025,8 @@ where scriptpubkey: self.get_closing_scriptpubkey(), }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + encoded_channel: Some(self.encode()), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); self.push_ret_blockable_mon_update(monitor_update) @@ -15604,6 +15658,58 @@ where } } +#[cfg(feature = "safe_channels")] +pub struct ChannelStateCheckData { + pub cur_holder_commitment_transaction_number: u64, + pub revoked_counterparty_commitment_transaction_number: u64, + pub cur_counterparty_commitment_transaction_number: u64, + pub latest_monitor_update_id: u64, +} + +#[cfg(feature = "safe_channels")] +pub fn read_check_data(reader: &mut R) -> Result { + let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + if ver <= 2 { + return Err(DecodeError::UnknownVersion); + } + + let _user_id_low: u64 = Readable::read(reader)?; + + let mut _config = LegacyChannelConfig::default(); + let mut _val: u64 = Readable::read(reader)?; + + let _channel_id: ChannelId = Readable::read(reader)?; + let channel_state = + ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?; + let _channel_value_satoshis: u64 = Readable::read(reader)?; + + let latest_monitor_update_id = Readable::read(reader)?; + + // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set. + let mut _shutdown_scriptpubkey = match ::read(reader) { + Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)), + Err(_) => None, + }; + let _destination_script: ScriptBuf = Readable::read(reader)?; + + let holder_commitment_next_transaction_number: u64 = Readable::read(reader)?; + let counterparty_next_commitment_transaction_number: u64 = Readable::read(reader)?; + + let cur_holder_commitment_transaction_number = holder_commitment_next_transaction_number + 1; + let revoked_counterparty_commitment_transaction_number = + counterparty_next_commitment_transaction_number + 2; + let cur_counterparty_commitment_transaction_number = + counterparty_next_commitment_transaction_number + + if channel_state.is_awaiting_remote_revoke() { 0 } else { 1 }; + + Ok(ChannelStateCheckData { + cur_holder_commitment_transaction_number, + revoked_counterparty_commitment_transaction_number, + cur_counterparty_commitment_transaction_number, + latest_monitor_update_id, + }) +} + fn duration_since_epoch() -> Option { #[cfg(not(feature = "std"))] let now = None; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 3234f62eef4..7b0b247d592 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9087,6 +9087,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ payment_info, }], channel_id: Some(prev_hop.channel_id), + #[cfg(feature = "safe_channels")] + encoded_channel: None, }; // We don't have any idea if this is a duplicate claim without interrogating the @@ -10305,6 +10307,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fail_chan!("Already had channel with the new channel_id"); }, hash_map::Entry::Vacant(e) => { + #[cfg(feature = "safe_channels")] + { + monitor.update_encoded_channel(chan.encode()); + } let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { // There's no problem signing a counterparty's funding transaction if our monitor @@ -10469,6 +10475,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match chan .funding_signed(&msg, best_block, &self.signer_provider, &self.logger) .and_then(|(funded_chan, monitor)| { + #[cfg(feature = "safe_channels")] + { + monitor.update_encoded_channel(funded_chan.encode()); + } self.chain_monitor .watch_channel(funded_chan.context.channel_id(), monitor) .map_err(|()| { @@ -11186,6 +11196,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = chan.as_funded_mut() { if let Some(monitor) = monitor_opt { + #[cfg(feature = "safe_channels")] + { + monitor.update_encoded_channel(chan.encode()); + } let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { handle_initial_monitor!(self, persist_state, peer_state_lock, peer_state, @@ -13608,6 +13622,8 @@ where updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id, }], + #[cfg(feature = "safe_channels")] + encoded_channel: None, }; let during_startup = @@ -16994,6 +17010,8 @@ where should_broadcast: true, }], channel_id: Some(monitor.channel_id()), + #[cfg(feature = "safe_channels")] + encoded_channel: Some(Vec::new()), }; log_info!( logger, @@ -17630,6 +17648,8 @@ where updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id, }], + #[cfg(feature = "safe_channels")] + encoded_channel: None, }, }); } From b096bf74ed5e35828781f580c15e39c5eb2e71b4 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 11 Nov 2025 17:22:04 +0100 Subject: [PATCH 3/4] Load chan mgr from monitors --- ci/ci-tests.sh | 41 +++++++++++++++- lightning/src/chain/channelmonitor.rs | 3 ++ lightning/src/ln/channel.rs | 4 ++ lightning/src/ln/channelmanager.rs | 70 +++++++++++++++++++++++++++ 4 files changed, 117 insertions(+), 1 deletion(-) diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 98d8086bfd5..8e5c22a16e4 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -47,7 +47,46 @@ cargo check -p lightning --verbose --color always --features dnssec cargo doc -p lightning --document-private-items --features dnssec echo -e "\n\nChecking and testing lightning with safe_channels" -cargo test -p lightning --verbose --color always --features safe_channels +cargo test -p lightning --verbose --color always --features safe_channels -- \ + --skip channel_holding_cell_serialize \ + --skip test_blocked_chan_preimage_release \ + --skip test_durable_preimages_on_closed_channel \ + --skip test_inbound_reload_without_init_mon \ + --skip test_inverted_mon_completion_order \ + --skip test_outbound_reload_without_init_mon \ + --skip test_partial_claim_mon_update_compl_actions \ + --skip test_reload_mon_update_completion_actions \ + --skip test_multi_post_event_actions \ + --skip test_anchors_aggregated_revoked_htlc_tx \ + --skip test_anchors_monitor_fixes_counterparty_payment_script_on_reload \ + --skip test_claim_event_never_handled \ + --skip test_lost_timeout_monitor_events \ + --skip no_double_pay_with_stale_channelmanager \ + --skip test_onion_failure_stale_channel_update \ + --skip no_missing_sent_on_midpoint_reload \ + --skip no_missing_sent_on_reload \ + --skip retry_with_no_persist \ + --skip test_completed_payment_not_retryable_on_reload \ + --skip test_fulfill_restart_failure \ + --skip test_payment_metadata_consistency \ + --skip test_priv_forwarding_rejection \ + --skip test_quiescence_termination_on_disconnect \ + --skip forwarded_payment_no_manager_persistence \ + --skip intercepted_payment_no_manager_persistence \ + --skip removed_payment_no_manager_persistence \ + --skip test_data_loss_protect \ + --skip test_htlc_localremoved_persistence \ + --skip test_manager_serialize_deserialize_events \ + --skip test_manager_serialize_deserialize_inconsistent_monitor \ + --skip test_no_txn_manager_serialize_deserialize \ + --skip test_partial_claim_before_restart \ + --skip test_reload_partial_funding_batch \ + --skip test_unconf_chan \ + --skip test_unconf_chan_via_funding_unconfirmed \ + --skip test_unconf_chan_via_listen \ + --skip test_propose_splice_while_disconnected \ + --skip test_splice_reestablish \ + --skip test_splice_state_reset_on_disconnect cargo check -p lightning --verbose --color always --features safe_channels cargo doc -p lightning --document-private-items --features safe_channels diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index afb247c41d5..d4339f20de3 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -2389,14 +2389,17 @@ impl ChannelMonitor { self.inner.lock().unwrap().sign_to_local_justice_tx(justice_tx, input_idx, value, commitment_number) } + #[cfg(not(feature = "safe_channels"))] pub(crate) fn get_min_seen_secret(&self) -> u64 { self.inner.lock().unwrap().get_min_seen_secret() } + #[cfg(not(feature = "safe_channels"))] pub(crate) fn get_cur_counterparty_commitment_number(&self) -> u64 { self.inner.lock().unwrap().get_cur_counterparty_commitment_number() } + #[cfg(not(feature = "safe_channels"))] pub(crate) fn get_cur_holder_commitment_number(&self) -> u64 { self.inner.lock().unwrap().get_cur_holder_commitment_number() } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 772671a1d31..810d6e96f0f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2423,6 +2423,7 @@ impl FundingScope { self.channel_transaction_parameters.channel_value_satoshis } + #[cfg(not(feature = "safe_channels"))] pub(crate) fn get_value_to_self_msat(&self) -> u64 { self.value_to_self_msat } @@ -10853,10 +10854,12 @@ where .try_for_each(|funding| self.context.can_accept_incoming_htlc(funding, dust_exposure_limiting_feerate, &logger)) } + #[cfg(not(feature = "safe_channels"))] pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 { self.holder_commitment_point.current_transaction_number() } + #[cfg(not(feature = "safe_channels"))] pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 { self.context.counterparty_next_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 } @@ -10970,6 +10973,7 @@ where /// transaction. If the channel is inbound, this implies simply that the channel has not /// advanced state. #[rustfmt::skip] + #[cfg(not(feature = "safe_channels"))] pub fn is_awaiting_initial_mon_persist(&self) -> bool { if !self.is_awaiting_monitor_update() { return false; } if matches!( diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7b0b247d592..8416f126805 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -16769,6 +16769,8 @@ where let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); + + #[cfg(not(feature = "safe_channels"))] for _ in 0..channel_count { let mut channel: FundedChannel = FundedChannel::read( reader, @@ -16968,6 +16970,74 @@ where } } + // Suppress unused mutable variable warning when safe_channels is enabled. + #[cfg(feature = "safe_channels")] + let _ = &mut args; + + // Discard channel manager versions of channels. + #[cfg(feature = "safe_channels")] + for _ in 0..channel_count { + _ = FundedChannel::read( + reader, + ( + &args.entropy_source, + &args.signer_provider, + &provided_channel_type_features(&args.config), + ), + )?; + } + + // Decode channels from monitors. + #[cfg(feature = "safe_channels")] + for (_, monitor) in args.channel_monitors.iter() { + let opt_encoded_channel = monitor.get_encoded_channel(); + if opt_encoded_channel.is_none() { + // Monitor still exists, but there is no more channel state. This can happen after channel shut down. + continue; + } + let encoded_channel = opt_encoded_channel.unwrap(); + let encoded_channel_reader = &mut &encoded_channel[..]; + let mut channel: FundedChannel = FundedChannel::read( + encoded_channel_reader, + ( + &args.entropy_source, + &args.signer_provider, + &provided_channel_type_features(&args.config), + ), + )?; + let logger = WithChannelContext::from(&args.logger, &channel.context, None); + let channel_id = channel.context.channel_id(); + channel_id_set.insert(channel_id); + + channel.on_startup_drop_completed_blocked_mon_updates_through( + &logger, + monitor.get_latest_update_id(), + ); + log_info!(logger, "Successfully loaded at update_id {} against monitor at update id {} with {} blocked updates", + channel.context.get_latest_monitor_update_id(), + monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending()); + if let Some(short_channel_id) = channel.funding.get_short_channel_id() { + short_to_chan_info.insert( + short_channel_id, + (channel.context.get_counterparty_node_id(), channel.context.channel_id()), + ); + } + + for short_channel_id in channel.context.historical_scids() { + let cp_id = channel.context.get_counterparty_node_id(); + let chan_id = channel.context.channel_id(); + short_to_chan_info.insert(*short_channel_id, (cp_id, chan_id)); + } + + per_peer_state + .entry(channel.context.get_counterparty_node_id()) + .or_insert_with(|| Mutex::new(empty_peer_state())) + .get_mut() + .unwrap() + .channel_by_id + .insert(channel.context.channel_id(), Channel::from(channel)); + } + for (channel_id, monitor) in args.channel_monitors.iter() { if !channel_id_set.contains(channel_id) { let mut should_queue_fc_update = false; From e87a72706dace8324236270548dc4d4a76159c87 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 19 Nov 2025 17:38:19 +0100 Subject: [PATCH 4/4] don't load any other data from chanmgr --- lightning/src/ln/channelmanager.rs | 487 +---------------------------- 1 file changed, 15 insertions(+), 472 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8416f126805..a5d9894b194 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -16738,13 +16738,12 @@ where L::Target: Logger, { fn read( - reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, + reader_unused: &mut Reader, + mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, ) -> Result { - let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); - - let chain_hash: ChainHash = Readable::read(reader)?; - let best_block_height: u32 = Readable::read(reader)?; - let best_block_hash: BlockHash = Readable::read(reader)?; + let chain_hash: ChainHash = Readable::read(reader_unused)?; + let best_block_height: u32 = Readable::read(reader_unused)?; + let best_block_hash: BlockHash = Readable::read(reader_unused)?; let empty_peer_state = || PeerState { channel_by_id: new_hash_map(), @@ -16760,233 +16759,16 @@ where }; let mut failed_htlcs = Vec::new(); - let channel_count: u64 = Readable::read(reader)?; - let mut channel_id_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); - let mut per_peer_state = hash_map_with_capacity(cmp::min( - channel_count as usize, - MAX_ALLOC_SIZE / mem::size_of::<(PublicKey, Mutex>)>(), - )); - let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); + let mut channel_id_set = new_hash_set(); + let mut per_peer_state = new_hash_map(); + let mut short_to_chan_info = new_hash_map(); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); - #[cfg(not(feature = "safe_channels"))] - for _ in 0..channel_count { - let mut channel: FundedChannel = FundedChannel::read( - reader, - ( - &args.entropy_source, - &args.signer_provider, - &provided_channel_type_features(&args.config), - ), - )?; - let logger = WithChannelContext::from(&args.logger, &channel.context, None); - let channel_id = channel.context.channel_id(); - channel_id_set.insert(channel_id); - if let Some(ref mut monitor) = args.channel_monitors.get_mut(&channel_id) { - if channel.get_cur_holder_commitment_transaction_number() - > monitor.get_cur_holder_commitment_number() - || channel.get_revoked_counterparty_commitment_transaction_number() - > monitor.get_min_seen_secret() - || channel.get_cur_counterparty_commitment_transaction_number() - > monitor.get_cur_counterparty_commitment_number() - || channel.context.get_latest_monitor_update_id() - < monitor.get_latest_update_id() - { - // But if the channel is behind of the monitor, close the channel: - log_error!( - logger, - "A ChannelManager is stale compared to the current ChannelMonitor!" - ); - log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); - if channel.context.get_latest_monitor_update_id() - < monitor.get_latest_update_id() - { - log_error!(logger, " The ChannelMonitor is at update_id {} but the ChannelManager is at update_id {}.", - monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); - } - if channel.get_cur_holder_commitment_transaction_number() - > monitor.get_cur_holder_commitment_number() - { - log_error!(logger, " The ChannelMonitor is at holder commitment number {} but the ChannelManager is at holder commitment number {}.", - monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number()); - } - if channel.get_revoked_counterparty_commitment_transaction_number() - > monitor.get_min_seen_secret() - { - log_error!(logger, " The ChannelMonitor is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.", - monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number()); - } - if channel.get_cur_counterparty_commitment_transaction_number() - > monitor.get_cur_counterparty_commitment_number() - { - log_error!(logger, " The ChannelMonitor is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", - monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); - } - let shutdown_result = - channel.force_shutdown(ClosureReason::OutdatedChannelManager); - if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { - return Err(DecodeError::InvalidValue); - } - if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = - shutdown_result.monitor_update - { - // Our channel information is out of sync with the `ChannelMonitor`, so - // force the update to use the `ChannelMonitor`'s update_id for the close - // update. - let latest_update_id = monitor.get_latest_update_id().saturating_add(1); - update.update_id = latest_update_id; - per_peer_state - .entry(counterparty_node_id) - .or_insert_with(|| Mutex::new(empty_peer_state())) - .lock() - .unwrap() - .closed_channel_monitor_update_ids - .entry(channel_id) - .and_modify(|v| *v = cmp::max(latest_update_id, *v)) - .or_insert(latest_update_id); - - close_background_events.push( - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, - funding_txo, - channel_id, - update, - }, - ); - } - for (source, hash, cp_id, chan_id) in shutdown_result.dropped_outbound_htlcs { - let reason = LocalHTLCFailureReason::ChannelClosed; - failed_htlcs.push((source, hash, cp_id, chan_id, reason, None)); - } - channel_closures.push_back(( - events::Event::ChannelClosed { - channel_id: channel.context.channel_id(), - user_channel_id: channel.context.get_user_id(), - reason: ClosureReason::OutdatedChannelManager, - counterparty_node_id: Some(channel.context.get_counterparty_node_id()), - channel_capacity_sats: Some(channel.funding.get_value_satoshis()), - channel_funding_txo: channel.funding.get_funding_txo(), - last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()), - }, - None, - )); - for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { - let mut found_htlc = false; - for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() { - if *channel_htlc_source == monitor_htlc_source { - found_htlc = true; - break; - } - } - if !found_htlc { - // If we have some HTLCs in the channel which are not present in the newer - // ChannelMonitor, they have been removed and should be failed back to - // ensure we don't forget them entirely. Note that if the missing HTLC(s) - // were actually claimed we'd have generated and ensured the previous-hop - // claim update ChannelMonitor updates were persisted prior to persising - // the ChannelMonitor update for the forward leg, so attempting to fail the - // backwards leg of the HTLC will simply be rejected. - let logger = WithChannelContext::from( - &args.logger, - &channel.context, - Some(*payment_hash), - ); - log_info!(logger, - "Failing HTLC with hash {} as it is missing in the ChannelMonitor but was present in the (stale) ChannelManager", - &payment_hash); - failed_htlcs.push(( - channel_htlc_source.clone(), - *payment_hash, - channel.context.get_counterparty_node_id(), - channel.context.channel_id(), - LocalHTLCFailureReason::ChannelClosed, - None, - )); - } - } - } else { - channel.on_startup_drop_completed_blocked_mon_updates_through( - &logger, - monitor.get_latest_update_id(), - ); - log_info!(logger, "Successfully loaded at update_id {} against monitor at update id {} with {} blocked updates", - channel.context.get_latest_monitor_update_id(), - monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending()); - if let Some(short_channel_id) = channel.funding.get_short_channel_id() { - short_to_chan_info.insert( - short_channel_id, - ( - channel.context.get_counterparty_node_id(), - channel.context.channel_id(), - ), - ); - } - - for short_channel_id in channel.context.historical_scids() { - let cp_id = channel.context.get_counterparty_node_id(); - let chan_id = channel.context.channel_id(); - short_to_chan_info.insert(*short_channel_id, (cp_id, chan_id)); - } - - per_peer_state - .entry(channel.context.get_counterparty_node_id()) - .or_insert_with(|| Mutex::new(empty_peer_state())) - .get_mut() - .unwrap() - .channel_by_id - .insert(channel.context.channel_id(), Channel::from(channel)); - } - } else if channel.is_awaiting_initial_mon_persist() { - // If we were persisted and shut down while the initial ChannelMonitor persistence - // was in-progress, we never broadcasted the funding transaction and can still - // safely discard the channel. - channel_closures.push_back(( - events::Event::ChannelClosed { - channel_id: channel.context.channel_id(), - user_channel_id: channel.context.get_user_id(), - reason: ClosureReason::DisconnectedPeer, - counterparty_node_id: Some(channel.context.get_counterparty_node_id()), - channel_capacity_sats: Some(channel.funding.get_value_satoshis()), - channel_funding_txo: channel.funding.get_funding_txo(), - last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()), - }, - None, - )); - } else { - log_error!( - logger, - "Missing ChannelMonitor for channel {} needed by ChannelManager.", - &channel.context.channel_id() - ); - log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); - log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); - log_error!( - logger, - " Without the ChannelMonitor we cannot continue without risking funds." - ); - log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); - return Err(DecodeError::InvalidValue); - } - } - // Suppress unused mutable variable warning when safe_channels is enabled. #[cfg(feature = "safe_channels")] let _ = &mut args; - // Discard channel manager versions of channels. - #[cfg(feature = "safe_channels")] - for _ in 0..channel_count { - _ = FundedChannel::read( - reader, - ( - &args.entropy_source, - &args.signer_provider, - &provided_channel_type_features(&args.config), - ), - )?; - } - // Decode channels from monitors. #[cfg(feature = "safe_channels")] for (_, monitor) in args.channel_monitors.iter() { @@ -17038,167 +16820,15 @@ where .insert(channel.context.channel_id(), Channel::from(channel)); } - for (channel_id, monitor) in args.channel_monitors.iter() { - if !channel_id_set.contains(channel_id) { - let mut should_queue_fc_update = false; - let counterparty_node_id = monitor.get_counterparty_node_id(); - - // If the ChannelMonitor had any updates, we may need to update it further and - // thus track it in `closed_channel_monitor_update_ids`. If the channel never - // had any updates at all, there can't be any HTLCs pending which we need to - // claim. - // Note that a `ChannelMonitor` is created with `update_id` 0 and after we - // provide it with a closure update its `update_id` will be at 1. - if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 { - should_queue_fc_update = !monitor.no_further_updates_allowed(); - let mut latest_update_id = monitor.get_latest_update_id(); - if should_queue_fc_update { - // Note that for channels closed pre-0.1, the latest update_id is - // `u64::MAX`. - latest_update_id = latest_update_id.saturating_add(1); - } - per_peer_state - .entry(counterparty_node_id) - .or_insert_with(|| Mutex::new(empty_peer_state())) - .lock() - .unwrap() - .closed_channel_monitor_update_ids - .entry(monitor.channel_id()) - .and_modify(|v| *v = cmp::max(latest_update_id, *v)) - .or_insert(latest_update_id); - } - - if !should_queue_fc_update { - continue; - } - - let logger = WithChannelMonitor::from(&args.logger, monitor, None); - let channel_id = monitor.channel_id(); - let monitor_update = ChannelMonitorUpdate { - update_id: monitor.get_latest_update_id().saturating_add(1), - updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { - should_broadcast: true, - }], - channel_id: Some(monitor.channel_id()), - #[cfg(feature = "safe_channels")] - encoded_channel: Some(Vec::new()), - }; - log_info!( - logger, - "Queueing monitor update {} to ensure missing channel is force closed", - monitor_update.update_id - ); - let funding_txo = monitor.get_funding_txo(); - let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, - funding_txo, - channel_id, - update: monitor_update, - }; - close_background_events.push(update); - } - } - const MAX_ALLOC_SIZE: usize = 1024 * 64; - let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); - for _ in 0..forward_htlcs_count { - let short_channel_id = Readable::read(reader)?; - let pending_forwards_count: u64 = Readable::read(reader)?; - let mut pending_forwards = Vec::with_capacity(cmp::min( - pending_forwards_count as usize, - MAX_ALLOC_SIZE / mem::size_of::(), - )); - for _ in 0..pending_forwards_count { - pending_forwards.push(Readable::read(reader)?); - } - forward_htlcs.insert(short_channel_id, pending_forwards); - } - - let claimable_htlcs_count: u64 = Readable::read(reader)?; - let mut claimable_htlcs_list = - Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); - for _ in 0..claimable_htlcs_count { - let payment_hash = Readable::read(reader)?; - let previous_hops_len: u64 = Readable::read(reader)?; - let mut previous_hops = Vec::with_capacity(cmp::min( - previous_hops_len as usize, - MAX_ALLOC_SIZE / mem::size_of::(), - )); - for _ in 0..previous_hops_len { - previous_hops.push(::read(reader)?); - } - claimable_htlcs_list.push((payment_hash, previous_hops)); - } - - let peer_count: u64 = Readable::read(reader)?; - for _ in 0..peer_count { - let peer_pubkey: PublicKey = Readable::read(reader)?; - let latest_features = Readable::read(reader)?; - if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) { - peer_state.get_mut().unwrap().latest_features = latest_features; - } - } + let mut forward_htlcs = new_hash_map(); - let event_count: u64 = Readable::read(reader)?; let mut pending_events_read: VecDeque<(events::Event, Option)> = - VecDeque::with_capacity(cmp::min( - event_count as usize, - MAX_ALLOC_SIZE / mem::size_of::<(events::Event, Option)>(), - )); - for _ in 0..event_count { - match MaybeReadable::read(reader)? { - Some(event) => pending_events_read.push_back((event, None)), - None => continue, - } - } + VecDeque::new(); - let background_event_count: u64 = Readable::read(reader)?; - for _ in 0..background_event_count { - match ::read(reader)? { - 0 => { - // LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here, - // however we really don't (and never did) need them - we regenerate all - // on-startup monitor updates. - let _: OutPoint = Readable::read(reader)?; - let _: ChannelMonitorUpdate = Readable::read(reader)?; - }, - _ => return Err(DecodeError::InvalidValue), - } - } - - let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111 - let highest_seen_timestamp: u32 = Readable::read(reader)?; - - // The last version where a pending inbound payment may have been added was 0.0.116. - let pending_inbound_payment_count: u64 = Readable::read(reader)?; - for _ in 0..pending_inbound_payment_count { - let payment_hash: PaymentHash = Readable::read(reader)?; - let logger = WithContext::from(&args.logger, None, None, Some(payment_hash)); - let inbound: PendingInboundPayment = Readable::read(reader)?; - log_warn!( - logger, - "Ignoring deprecated pending inbound payment with payment hash {}: {:?}", - payment_hash, - inbound - ); - } + let mut pending_outbound_payments_compat = new_hash_map(); - let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; - let mut pending_outbound_payments_compat: HashMap = - hash_map_with_capacity(cmp::min( - pending_outbound_payments_count_compat as usize, - MAX_ALLOC_SIZE / 32, - )); - for _ in 0..pending_outbound_payments_count_compat { - let session_priv = Readable::read(reader)?; - let payment = PendingOutboundPayment::Legacy { - session_privs: hash_set_from_iter([session_priv]), - }; - if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() { - return Err(DecodeError::InvalidValue); - }; - } + let highest_seen_timestamp: u32 = 0; // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = @@ -17209,8 +16839,6 @@ where let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; let mut probing_cookie_secret: Option<[u8; 32]> = None; - let mut claimable_htlc_purposes = None; - let mut claimable_htlc_onion_fields = None; let mut pending_claiming_payments = Some(new_hash_map()); let mut monitor_update_blocked_actions_per_peer: Option>)>> = Some(Vec::new()); @@ -17227,25 +16855,6 @@ where let mut inbound_payment_id_secret = None; let mut peer_storage_dir: Option)>> = None; let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); - read_tlv_fields!(reader, { - (1, pending_outbound_payments_no_retry, option), - (2, pending_intercepted_htlcs, option), - (3, pending_outbound_payments, option), - (4, pending_claiming_payments, option), - (5, received_network_pubkey, option), - (6, monitor_update_blocked_actions_per_peer, option), - (7, fake_scid_rand_bytes, option), - (8, events_override, option), - (9, claimable_htlc_purposes, optional_vec), - (10, legacy_in_flight_monitor_updates, option), - (11, probing_cookie_secret, option), - (13, claimable_htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs, option), - (15, inbound_payment_id_secret, option), - (17, in_flight_monitor_updates, option), - (19, peer_storage_dir, optional_vec), - (21, async_receive_offer_cache, (default_value, async_receive_offer_cache)), - }); let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map()); let peer_storage_dir: Vec<(PublicKey, Vec)> = peer_storage_dir.unwrap_or_else(Vec::new); if fake_scid_rand_bytes.is_none() { @@ -17631,7 +17240,7 @@ where }); !update_add_htlcs.is_empty() }); - forward_htlcs.retain(|_, forwards| { + forward_htlcs.retain(|_, forwards: &mut Vec| { forwards.retain(|forward| { if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { if pending_forward_matches_htlc(&htlc_info) { @@ -17869,74 +17478,8 @@ where let expanded_inbound_key = args.node_signer.get_expanded_key(); - let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len()); - if let Some(purposes) = claimable_htlc_purposes { - if purposes.len() != claimable_htlcs_list.len() { - return Err(DecodeError::InvalidValue); - } - if let Some(onion_fields) = claimable_htlc_onion_fields { - if onion_fields.len() != claimable_htlcs_list.len() { - return Err(DecodeError::InvalidValue); - } - for (purpose, (onion, (payment_hash, htlcs))) in purposes - .into_iter() - .zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter())) - { - let claimable = ClaimablePayment { purpose, htlcs, onion_fields: onion }; - let existing_payment = claimable_payments.insert(payment_hash, claimable); - if existing_payment.is_some() { - return Err(DecodeError::InvalidValue); - } - } - } else { - for (purpose, (payment_hash, htlcs)) in - purposes.into_iter().zip(claimable_htlcs_list.into_iter()) - { - let claimable = ClaimablePayment { purpose, htlcs, onion_fields: None }; - let existing_payment = claimable_payments.insert(payment_hash, claimable); - if existing_payment.is_some() { - return Err(DecodeError::InvalidValue); - } - } - } - } else { - // LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do - // include a `_legacy_hop_data` in the `OnionPayload`. - for (payment_hash, htlcs) in claimable_htlcs_list.drain(..) { - if htlcs.is_empty() { - return Err(DecodeError::InvalidValue); - } - let purpose = match &htlcs[0].onion_payload { - OnionPayload::Invoice { _legacy_hop_data } => { - if let Some(hop_data) = _legacy_hop_data { - events::PaymentPurpose::Bolt11InvoicePayment { - payment_preimage: match inbound_payment::verify( - payment_hash, - &hop_data, - 0, - &expanded_inbound_key, - &args.logger, - ) { - Ok((payment_preimage, _)) => payment_preimage, - Err(()) => { - log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash); - return Err(DecodeError::InvalidValue); - }, - }, - payment_secret: hop_data.payment_secret, - } - } else { - return Err(DecodeError::InvalidValue); - } - }, - OnionPayload::Spontaneous(payment_preimage) => { - events::PaymentPurpose::SpontaneousPayment(*payment_preimage) - }, - }; - claimable_payments - .insert(payment_hash, ClaimablePayment { purpose, htlcs, onion_fields: None }); - } - } + let mut claimable_payments: hash_map::HashMap = + new_hash_map(); // Similar to the above cases for forwarded payments, if we have any pending inbound HTLCs // which haven't yet been claimed, we may be missing counterparty_node_id info and would