From 79c8707ecebec430fb867808f6793fc79dd1f3b4 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 11 May 2026 14:02:44 +0200 Subject: [PATCH 01/30] fuzz: model chanmon persistence in harness Replace the chanmon consistency harness' Watch wrapper with a Persist implementation backed by HarnessPersister. Monitor writes now flow through the real ChainMonitor persistence hooks. Track restart candidates separately from monitor completion callbacks. A monitor can stop being a valid reload candidate once a newer baseline is durable, while its callback may still be needed to unblock the live ChainMonitor. On reload, choose the durable baseline, first pending snapshot, or last pending snapshot. Startup monitor registration completes immediately before the configured persistence style is restored. --- fuzz/src/chanmon_consistency.rs | 567 ++++++++++++++++++++------------ 1 file changed, 348 insertions(+), 219 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 4a182c33beb..aadc58ff4ce 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -41,8 +41,7 @@ use lightning::chain; use lightning::chain::chaininterface::{ BroadcasterInterface, ConfirmationTarget, FeeEstimator, TransactionType, }; -use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent}; -use lightning::chain::transaction::OutPoint; +use lightning::chain::channelmonitor::ChannelMonitor; use lightning::chain::{ chainmonitor, channelmonitor, BlockLocator, ChannelMonitorUpdateStatus, Confirm, Watch, }; @@ -87,7 +86,6 @@ use lightning::util::wallet_utils::{WalletSourceSync, WalletSync}; use lightning_invoice::RawBolt11Invoice; use crate::utils::test_logger::{self, Output}; -use crate::utils::test_persister::TestPersister; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; @@ -293,144 +291,302 @@ impl Writer for VecWriter { } } -/// The LDK API requires that any time we tell it we're done persisting a `ChannelMonitor[Update]` -/// we never pass it in as the "latest" `ChannelMonitor` on startup. However, we can pass -/// out-of-date monitors as long as we never told LDK we finished persisting them, which we do by -/// storing both old `ChannelMonitor`s and ones that are "being persisted" here. +fn serialize_monitor(monitor: &ChannelMonitor) -> Vec { + let mut ser = VecWriter(Vec::new()); + monitor.write(&mut ser).unwrap(); + ser.0 +} + +/// LDK requires the `ChannelMonitor` loaded on startup to be at least as current as the +/// `ChannelManager` state, except for monitor updates that `ChannelManager` still records as +/// in-flight and can replay. This harness tracks the monitor blobs that remain valid restart +/// candidates under that rule. /// -/// Note that such "being persisted" `ChannelMonitor`s are stored in `ChannelManager` and will -/// simply be replayed on startup. +/// Separately, we track every `InProgress` persistence operation that still needs a +/// `channel_monitor_updated` call. A newer persisted monitor can make an older monitor invalid for +/// restart while the older update still needs to be completed to unblock the live `ChainMonitor`. +/// +/// Off-chain monitor updates that are still "being persisted" are stored in `ChannelManager` and +/// will be replayed on startup. Full-monitor snapshots from chain sync or archive paths that return +/// `InProgress` are only restart candidates; losing one on restart does not require a +/// `channel_monitor_updated` callback. struct LatestMonitorState { /// The latest monitor id which we told LDK we've persisted. /// - /// Note that there may still be earlier pending monitor updates in [`Self::pending_monitors`] - /// which we haven't yet completed. We're allowed to reload with those as well, at least until - /// they're completed. + /// Note that earlier updates may still need a `channel_monitor_updated` callback via + /// [`Self::pending_monitor_completions`]. persisted_monitor_id: u64, /// The latest serialized `ChannelMonitor` that we told LDK we persisted. persisted_monitor: Vec, - /// A set of (monitor id, serialized `ChannelMonitor`)s which we're currently "persisting", - /// from LDK's perspective. + /// An ordered list of (monitor id, serialized `ChannelMonitor`)s which remain safe to use as + /// stale monitors on reload. pending_monitors: Vec<(u64, Vec)>, + /// An ordered list of (monitor id, serialized `ChannelMonitor`)s which still need a + /// `channel_monitor_updated` callback. + pending_monitor_completions: Vec<(u64, Vec)>, } +impl LatestMonitorState { + fn insert_pending_entry( + pending: &mut Vec<(u64, Vec)>, monitor_id: u64, serialized_monitor: Vec, + ) { + // Monitor update ids must arrive in order. Assert at insertion time so duplicates or + // out-of-order updates fail close to the write that caused them instead of being sorted + // into place. + assert!( + pending.last().map_or(true, |(last_id, _)| *last_id < monitor_id), + "pending monitor updates should arrive in order" + ); + pending.push((monitor_id, serialized_monitor)); + } -struct TestChainMonitor { - pub logger: Arc, - pub keys: Arc, - pub persister: Arc, - pub chain_monitor: Arc< - chainmonitor::ChainMonitor< - TestChannelSigner, - Arc, - Arc, - Arc, - Arc, - Arc, - Arc, - >, - >, - pub latest_monitors: Mutex>, -} -impl TestChainMonitor { - pub fn new( - broadcaster: Arc, logger: Arc, feeest: Arc, - persister: Arc, keys: Arc, - ) -> Self { - Self { - chain_monitor: Arc::new(chainmonitor::ChainMonitor::new( - None, - broadcaster, - logger.clone(), - feeest, - Arc::clone(&persister), - Arc::clone(&keys), - keys.get_peer_storage_key(), - false, - )), - logger, - keys, - persister, - latest_monitors: Mutex::new(new_hash_map()), + fn insert_pending_monitor_candidate(&mut self, monitor_id: u64, serialized_monitor: Vec) { + // Full-monitor persists from chain sync or archive paths use the monitor's current + // latest_update_id rather than a fresh ChannelMonitorUpdate id. Keep duplicate ids so + // reload can choose between multiple same-id full snapshots that were in flight together. + if let Some((last_id, _)) = self.pending_monitors.last() { + assert!(*last_id <= monitor_id, "pending monitor updates should arrive in order"); } + self.pending_monitors.push((monitor_id, serialized_monitor)); } -} -impl chain::Watch for TestChainMonitor { - fn watch_channel( - &self, channel_id: ChannelId, monitor: channelmonitor::ChannelMonitor, - ) -> Result { - let mut ser = VecWriter(Vec::new()); - monitor.write(&mut ser).unwrap(); - let monitor_id = monitor.get_latest_update_id(); - let res = self.chain_monitor.watch_channel(channel_id, monitor); - let state = match res { - Ok(chain::ChannelMonitorUpdateStatus::Completed) => LatestMonitorState { - persisted_monitor_id: monitor_id, - persisted_monitor: ser.0, - pending_monitors: Vec::new(), - }, - Ok(chain::ChannelMonitorUpdateStatus::InProgress) => LatestMonitorState { - persisted_monitor_id: monitor_id, - persisted_monitor: Vec::new(), - pending_monitors: vec![(monitor_id, ser.0)], - }, - Ok(chain::ChannelMonitorUpdateStatus::UnrecoverableError) => panic!(), - Err(()) => panic!(), - }; - if self.latest_monitors.lock().unwrap().insert(channel_id, state).is_some() { - panic!("Already had monitor pre-watch_channel"); + + fn mark_persisted(&mut self, monitor_id: u64, serialized_monitor: Vec) { + // Once a monitor is durable, use it as the restart baseline and stop tracking candidates + // at or behind that update id. Completion obligations are tracked separately and are + // deliberately not pruned here. + self.pending_monitors.retain(|(id, _)| *id > monitor_id); + if monitor_id >= self.persisted_monitor_id { + self.persisted_monitor_id = monitor_id; + self.persisted_monitor = serialized_monitor; } - res } - fn update_channel( - &self, channel_id: ChannelId, update: &channelmonitor::ChannelMonitorUpdate, - ) -> chain::ChannelMonitorUpdateStatus { - let mut map_lock = self.latest_monitors.lock().unwrap(); - let map_entry = map_lock.get_mut(&channel_id).expect("Didn't have monitor on update call"); - let latest_monitor_data = map_entry - .pending_monitors - .last() - .as_ref() - .map(|(_, data)| data) - .unwrap_or(&map_entry.persisted_monitor); - let deserialized_monitor = - <(BlockLocator, channelmonitor::ChannelMonitor)>::read( - &mut &latest_monitor_data[..], - (&*self.keys, &*self.keys), - ) - .unwrap() - .1; - deserialized_monitor - .update_monitor( - update, - &&TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }, - &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, - &self.logger, - ) - .unwrap(); - let mut ser = VecWriter(Vec::new()); - deserialized_monitor.write(&mut ser).unwrap(); - let res = self.chain_monitor.update_channel(channel_id, update); - match res { - chain::ChannelMonitorUpdateStatus::Completed => { - map_entry.persisted_monitor_id = update.update_id; - map_entry.persisted_monitor = ser.0; + fn insert_pending( + &mut self, monitor_id: u64, serialized_monitor: Vec, needs_completion: bool, + ) { + if needs_completion { + // persist_new_channel and update_persisted_channel(Some(_)) require a later + // channel_monitor_updated callback if persistence returns InProgress. + Self::insert_pending_entry( + &mut self.pending_monitors, + monitor_id, + serialized_monitor.clone(), + ); + Self::insert_pending_entry( + &mut self.pending_monitor_completions, + monitor_id, + serialized_monitor, + ); + } else { + // This harness treats update_persisted_channel(None, ...) as the chain-sync/archive + // case: the full monitor may be used on restart, but ChainMonitor does not wait for a + // channel_monitor_updated callback. + self.insert_pending_monitor_candidate(monitor_id, serialized_monitor); + } + } + + fn mark_completed_update_persisted(&mut self, monitor_id: u64, serialized_monitor: Vec) { + // The selector/drain path should already have removed this entry before + // finish_monitor_update calls channel_monitor_updated. This check catches accidental + // double-completion or pruning of the wrong list. + assert!( + self.pending_monitor_completions.iter().all(|(id, _)| *id != monitor_id), + "completed monitor update should already be removed from the completion queue" + ); + self.mark_persisted(monitor_id, serialized_monitor); + } + + fn drain_pending_completions(&mut self) -> Vec<(u64, Vec)> { + std::mem::take(&mut self.pending_monitor_completions) + } + + fn take_pending_completion( + &mut self, selector: MonitorUpdateSelector, + ) -> Option<(u64, Vec)> { + // The fuzzer chooses which outstanding callback to deliver. These choices apply to + // completion obligations, not to the set of monitors that may be used on restart. + match selector { + MonitorUpdateSelector::First => { + if self.pending_monitor_completions.is_empty() { + None + } else { + Some(self.pending_monitor_completions.remove(0)) + } }, - chain::ChannelMonitorUpdateStatus::InProgress => { - map_entry.pending_monitors.push((update.update_id, ser.0)); + MonitorUpdateSelector::Second => { + if self.pending_monitor_completions.len() > 1 { + Some(self.pending_monitor_completions.remove(1)) + } else { + None + } }, - chain::ChannelMonitorUpdateStatus::UnrecoverableError => panic!(), + MonitorUpdateSelector::Last => self.pending_monitor_completions.pop(), } - res } - fn release_pending_monitor_events( - &self, - ) -> Vec<(OutPoint, ChannelId, Vec, PublicKey)> { - return self.chain_monitor.release_pending_monitor_events(); + fn select_monitor_for_reload(&mut self, selector: MonitorReloadSelector) { + // A restart can load the last monitor we told LDK was persisted, or a monitor snapshot + // whose write was started before the simulated crash. + let old_mon = (self.persisted_monitor_id, std::mem::take(&mut self.persisted_monitor)); + let (monitor_id, serialized_monitor) = match selector { + MonitorReloadSelector::Persisted => old_mon, + MonitorReloadSelector::FirstPending => { + if self.pending_monitors.is_empty() { + old_mon + } else { + self.pending_monitors.remove(0) + } + }, + MonitorReloadSelector::LastPending => self.pending_monitors.pop().unwrap_or(old_mon), + }; + self.persisted_monitor_id = monitor_id; + self.persisted_monitor = serialized_monitor; + // After restart, stop tracking pre-restart in-flight writes. ChannelManager will replay + // off-chain monitor updates that still matter; full-monitor snapshots may simply be absent. + self.pending_monitors.clear(); + self.pending_monitor_completions.clear(); + } +} + +struct HarnessPersister { + pub update_ret: Mutex, + pub latest_monitors: Mutex>, +} +impl HarnessPersister { + fn track_monitor_update( + &self, channel_id: ChannelId, monitor_id: u64, serialized_monitor: Vec, + status: chain::ChannelMonitorUpdateStatus, needs_completion: bool, + ) { + let mut latest_monitors = self.latest_monitors.lock().unwrap(); + if let Some(state) = latest_monitors.get_mut(&channel_id) { + match status { + chain::ChannelMonitorUpdateStatus::Completed => { + // A completed write advances the restart baseline. Once LDK can rely on that + // monitor state being durable, the harness stops offering candidates at or + // behind that update id. + state.mark_persisted(monitor_id, serialized_monitor); + }, + chain::ChannelMonitorUpdateStatus::InProgress => { + // InProgress always creates a restart candidate, but only some calls also need + // an explicit channel_monitor_updated completion. + state.insert_pending(monitor_id, serialized_monitor, needs_completion); + }, + chain::ChannelMonitorUpdateStatus::UnrecoverableError => {}, + } + } else { + let state = match status { + chain::ChannelMonitorUpdateStatus::Completed => LatestMonitorState { + persisted_monitor_id: monitor_id, + persisted_monitor: serialized_monitor, + pending_monitors: Vec::new(), + pending_monitor_completions: Vec::new(), + }, + chain::ChannelMonitorUpdateStatus::InProgress => { + // The first persist for a channel is persist_new_channel, which always needs a + // completion callback when it returns InProgress. A full-monitor update without + // existing state would mean the harness missed the channel's initial monitor. + assert!(needs_completion, "missing monitor state for full monitor update"); + LatestMonitorState { + persisted_monitor_id: monitor_id, + persisted_monitor: Vec::new(), + pending_monitors: vec![(monitor_id, serialized_monitor.clone())], + pending_monitor_completions: vec![(monitor_id, serialized_monitor)], + } + }, + chain::ChannelMonitorUpdateStatus::UnrecoverableError => return, + }; + assert!( + latest_monitors.insert(channel_id, state).is_none(), + "Already had monitor state pre-persist" + ); + } + } + + fn mark_update_completed( + &self, channel_id: ChannelId, monitor_id: u64, serialized_monitor: Vec, + ) { + let mut latest_monitors = self.latest_monitors.lock().unwrap(); + let state = latest_monitors + .get_mut(&channel_id) + .expect("missing monitor state for completed update"); + // Once we tell LDK update N is completed, use the completed monitor as the restart + // baseline and drop restart candidates at or behind N. + state.mark_completed_update_persisted(monitor_id, serialized_monitor); + } + + fn drain_pending_updates(&self, channel_id: &ChannelId) -> Vec<(u64, Vec)> { + self.latest_monitors + .lock() + .unwrap() + .get_mut(channel_id) + .map_or_else(Vec::new, |state| state.drain_pending_completions()) + } + + fn drain_all_pending_updates(&self) -> Vec<(ChannelId, u64, Vec)> { + let mut completed_updates = Vec::new(); + for (channel_id, state) in self.latest_monitors.lock().unwrap().iter_mut() { + for (monitor_id, data) in state.drain_pending_completions() { + completed_updates.push((*channel_id, monitor_id, data)); + } + } + completed_updates + } + + fn take_pending_update( + &self, channel_id: &ChannelId, selector: MonitorUpdateSelector, + ) -> Option<(u64, Vec)> { + self.latest_monitors + .lock() + .unwrap() + .get_mut(channel_id) + .and_then(|state| state.take_pending_completion(selector)) + } +} +impl chainmonitor::Persist for HarnessPersister { + fn persist_new_channel( + &self, _monitor_name: lightning::util::persist::MonitorName, + data: &channelmonitor::ChannelMonitor, + ) -> chain::ChannelMonitorUpdateStatus { + let status = self.update_ret.lock().unwrap().clone(); + let monitor_id = data.get_latest_update_id(); + let serialized_monitor = serialize_monitor(data); + self.track_monitor_update(data.channel_id(), monitor_id, serialized_monitor, status, true); + status + } + + fn update_persisted_channel( + &self, _monitor_name: lightning::util::persist::MonitorName, + update: Option<&channelmonitor::ChannelMonitorUpdate>, + data: &channelmonitor::ChannelMonitor, + ) -> chain::ChannelMonitorUpdateStatus { + let status = self.update_ret.lock().unwrap().clone(); + let monitor_id = update.map_or_else(|| data.get_latest_update_id(), |upd| upd.update_id); + let serialized_monitor = serialize_monitor(data); + self.track_monitor_update( + data.channel_id(), + monitor_id, + serialized_monitor, + status, + // `None` normally comes from chain-sync or archive writes, which need no completion + // callback. `update_channel_internal` can also use `None` after `update_monitor` + // fails, but this harness does not model that error-recovery path. + update.is_some(), + ); + status } + + fn archive_persisted_channel(&self, _monitor_name: lightning::util::persist::MonitorName) {} } +type TestChainMonitor = chainmonitor::ChainMonitor< + TestChannelSigner, + Arc, + Arc, + Arc, + Arc, + Arc, + Arc, +>; + struct KeyProvider { node_secret: SecretKey, rand_bytes_id: atomic::AtomicU32, @@ -654,6 +810,7 @@ struct HarnessNode<'a> { node_id: u8, node: ChanMan<'a>, monitor: Arc, + persister: Arc, keys_manager: Arc, logger: Arc, broadcaster: Arc, @@ -674,26 +831,33 @@ impl<'a> std::ops::Deref for HarnessNode<'a> { } impl<'a> HarnessNode<'a> { - fn build_loggers( + fn build_logger( node_id: u8, out: &Out, - ) -> (Arc, Arc) { - let raw_logger = Arc::new(test_logger::TestLogger::new(node_id.to_string(), out.clone())); - let logger_for_monitor: Arc = raw_logger.clone(); - let logger: Arc = raw_logger; - (logger_for_monitor, logger) + ) -> Arc { + Arc::new(test_logger::TestLogger::new(node_id.to_string(), out.clone())) + } + + fn build_persister(persistence_style: ChannelMonitorUpdateStatus) -> Arc { + Arc::new(HarnessPersister { + update_ret: Mutex::new(persistence_style), + latest_monitors: Mutex::new(new_hash_map()), + }) } fn build_chain_monitor( broadcaster: &Arc, fee_estimator: &Arc, - keys_manager: &Arc, logger_for_monitor: Arc, - persistence_style: ChannelMonitorUpdateStatus, + keys_manager: &Arc, logger: Arc, + persister: &Arc, ) -> Arc { - Arc::new(TestChainMonitor::new( + Arc::new(chainmonitor::ChainMonitor::new( + None, Arc::clone(broadcaster), - logger_for_monitor, + logger, Arc::clone(fee_estimator), - Arc::new(TestPersister { update_ret: Mutex::new(persistence_style) }), + Arc::clone(persister), Arc::clone(keys_manager), + keys_manager.get_peer_storage_key(), + false, )) } @@ -702,7 +866,7 @@ impl<'a> HarnessNode<'a> { broadcaster: Arc, persistence_style: ChannelMonitorUpdateStatus, out: &Out, router: &'a FuzzRouter, chan_type: ChanType, ) -> Self { - let (logger_for_monitor, logger) = Self::build_loggers(node_id, out); + let logger = Self::build_logger(node_id, out); let node_secret = SecretKey::from_slice(&[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, node_id, @@ -713,12 +877,13 @@ impl<'a> HarnessNode<'a> { rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(new_hash_map()), }); + let persister = Self::build_persister(persistence_style); let monitor = Self::build_chain_monitor( &broadcaster, &fee_estimator, &keys_manager, - logger_for_monitor, - persistence_style, + Arc::clone(&logger), + &persister, ); let network = Network::Bitcoin; let best_block_timestamp = genesis_block(network).header.time; @@ -741,6 +906,7 @@ impl<'a> HarnessNode<'a> { node_id, node, monitor, + persister, keys_manager, logger, broadcaster, @@ -754,67 +920,31 @@ impl<'a> HarnessNode<'a> { } fn set_persistence_style(&mut self, style: ChannelMonitorUpdateStatus) { + // Store the style for the next reload. The active persister is intentionally not changed + // in place. self.persistence_style = style; } + fn finish_monitor_update(&self, chan_id: ChannelId, monitor_id: u64, data: Vec) { + self.monitor.channel_monitor_updated(chan_id, monitor_id).unwrap(); + self.persister.mark_update_completed(chan_id, monitor_id, data); + } + fn complete_all_monitor_updates(&self, chan_id: &ChannelId) { - if let Some(state) = self.monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { - assert!( - state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), - "updates should be sorted by id" - ); - for (id, data) in state.pending_monitors.drain(..) { - self.monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); - if id > state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } + for (monitor_id, data) in self.persister.drain_pending_updates(chan_id) { + self.finish_monitor_update(*chan_id, monitor_id, data); } } fn complete_all_pending_monitor_updates(&self) { - for (channel_id, state) in self.monitor.latest_monitors.lock().unwrap().iter_mut() { - for (id, data) in state.pending_monitors.drain(..) { - self.monitor.chain_monitor.channel_monitor_updated(*channel_id, id).unwrap(); - if id >= state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } + for (channel_id, monitor_id, data) in self.persister.drain_all_pending_updates() { + self.finish_monitor_update(channel_id, monitor_id, data); } } fn complete_monitor_update(&self, chan_id: &ChannelId, selector: MonitorUpdateSelector) { - if let Some(state) = self.monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { - assert!( - state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), - "updates should be sorted by id" - ); - let update = match selector { - MonitorUpdateSelector::First => { - if state.pending_monitors.is_empty() { - None - } else { - Some(state.pending_monitors.remove(0)) - } - }, - MonitorUpdateSelector::Second => { - if state.pending_monitors.len() > 1 { - Some(state.pending_monitors.remove(1)) - } else { - None - } - }, - MonitorUpdateSelector::Last => state.pending_monitors.pop(), - }; - if let Some((id, data)) = update { - self.monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); - if id > state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } + if let Some((monitor_id, data)) = self.persister.take_pending_update(chan_id, selector) { + self.finish_monitor_update(*chan_id, monitor_id, data); } } @@ -942,50 +1072,39 @@ impl<'a> HarnessNode<'a> { fn reload( &mut self, use_old_mons: u8, out: &Out, router: &'a FuzzRouter, chan_type: ChanType, ) { - let (logger_for_monitor, logger) = Self::build_loggers(self.node_id, out); + let logger = Self::build_logger(self.node_id, out); + // Re-registering monitors during reload reflects data that was already selected from + // simulated storage, so these startup watch_channel calls should complete immediately. + let persister = Self::build_persister(ChannelMonitorUpdateStatus::Completed); let chain_monitor = Self::build_chain_monitor( &self.broadcaster, &self.fee_estimator, &self.keys_manager, - logger_for_monitor, - ChannelMonitorUpdateStatus::Completed, + Arc::clone(&logger), + &persister, ); let mut monitors = new_hash_map(); let mut use_old_mons = use_old_mons; { - let mut old_monitors = self.monitor.latest_monitors.lock().unwrap(); + let mut old_monitors = self.persister.latest_monitors.lock().unwrap(); for (channel_id, mut prev_state) in old_monitors.drain() { - let (mon_id, serialized_mon) = if use_old_mons % 3 == 0 { - // Reload with the oldest `ChannelMonitor` (the one that we already told - // `ChannelManager` we finished persisting). - (prev_state.persisted_monitor_id, prev_state.persisted_monitor) - } else if use_old_mons % 3 == 1 { - // Reload with the second-oldest `ChannelMonitor`. - let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); - prev_state.pending_monitors.drain(..).next().unwrap_or(old_mon) - } else { - // Reload with the newest `ChannelMonitor`. - let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); - prev_state.pending_monitors.pop().unwrap_or(old_mon) + let selector = match use_old_mons % 3 { + 0 => MonitorReloadSelector::Persisted, + 1 => MonitorReloadSelector::FirstPending, + _ => MonitorReloadSelector::LastPending, }; - // Use a different value of `use_old_mons` if we have another monitor - // (only for node B) by shifting `use_old_mons` one in base-3. + prev_state.select_monitor_for_reload(selector); + // Use a different trit for each monitor so one restart byte can vary the stale + // monitor depth across multiple monitors for the node. use_old_mons /= 3; let mon = <(BlockLocator, ChannelMonitor)>::read( - &mut &serialized_mon[..], + &mut &prev_state.persisted_monitor[..], (&*self.keys_manager, &*self.keys_manager), ) .expect("Failed to read monitor"); monitors.insert(channel_id, mon.1); - // Update the latest `ChannelMonitor` state to match what we just told LDK. - prev_state.persisted_monitor = serialized_mon; - prev_state.persisted_monitor_id = mon_id; - // Wipe any `ChannelMonitor`s which we never told LDK we finished persisting, - // considering them discarded. LDK should replay these for us as they're stored in - // the `ChannelManager`. - prev_state.pending_monitors.clear(); - chain_monitor.latest_monitors.lock().unwrap().insert(channel_id, prev_state); + persister.latest_monitors.lock().unwrap().insert(channel_id, prev_state); } } let mut monitor_refs = new_hash_map(); @@ -1011,17 +1130,27 @@ impl<'a> HarnessNode<'a> { .expect("Failed to read manager"); for (channel_id, mon) in monitors.drain() { assert_eq!( - chain_monitor.chain_monitor.watch_channel(channel_id, mon), + chain_monitor.watch_channel(channel_id, mon), Ok(ChannelMonitorUpdateStatus::Completed) ); } - *chain_monitor.persister.update_ret.lock().unwrap() = self.persistence_style; + // Future monitor writes should follow the node's configured persistence style; only the + // startup watch_channel registration above is forced to Completed. + *persister.update_ret.lock().unwrap() = self.persistence_style; self.node = manager.1; self.monitor = chain_monitor; + self.persister = persister; self.logger = logger; } } +#[derive(Copy, Clone)] +enum MonitorReloadSelector { + Persisted, + FirstPending, + LastPending, +} + #[derive(Copy, Clone)] enum MonitorUpdateSelector { First, @@ -1921,7 +2050,7 @@ fn make_channel( } }; dest.handle_funding_created(source.get_our_node_id(), &funding_created); - // Complete any pending monitor updates for dest after watch_channel. + // Complete any pending monitor persistence callbacks for dest after watch_channel. dest.complete_all_pending_monitor_updates(); let (funding_signed, channel_id) = { @@ -1942,7 +2071,7 @@ fn make_channel( } source.handle_funding_signed(dest.get_our_node_id(), &funding_signed); - // Complete any pending monitor updates for source after watch_channel. + // Complete any pending monitor persistence callbacks for source after watch_channel. source.complete_all_pending_monitor_updates(); let events = source.get_and_clear_pending_events(); @@ -2620,7 +2749,7 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { "It may take may iterations to settle the state, but it should not take forever" ); } - // Next, make sure no monitor updates are pending. + // Next, make sure no monitor completion callbacks are pending. self.ab_link.complete_all_monitor_updates(&self.nodes); self.bc_link.complete_all_monitor_updates(&self.nodes); // Then, make sure any current forwards make their way to their destination. @@ -3019,18 +3148,18 @@ pub fn do_test(data: &[u8], out: Out) { }, 0xb0 | 0xb1 | 0xb2 => { - // Restart node A, picking among the in-flight `ChannelMonitor`s to use based on - // the value of `v` we're matching. + // Restart node A, picking among persisted and in-flight `ChannelMonitor` + // candidates based on the value of `v` we're matching. harness.restart_node(0, v, &router); }, 0xb3..=0xbb => { - // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on - // the value of `v` we're matching. + // Restart node B, picking among persisted and in-flight `ChannelMonitor` + // candidates based on the value of `v` we're matching. harness.restart_node(1, v, &router); }, 0xbc | 0xbd | 0xbe => { - // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on - // the value of `v` we're matching. + // Restart node C, picking among persisted and in-flight `ChannelMonitor` + // candidates based on the value of `v` we're matching. harness.restart_node(2, v, &router); }, From 64dfdcb4183ebe118471d7e244238a4c986e1cde Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 13 May 2026 13:52:16 +0200 Subject: [PATCH 02/30] fuzz: keep settling after progress-only passes Treat HTLC-forward processing and monitor completion as real progress in the chanmon harness. This keeps the settle loop running after passes that only unblock follow-up work instead of stopping before the next event or message batch. --- fuzz/src/chanmon_consistency.rs | 39 +++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index aadc58ff4ce..7a003c3bd61 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -930,10 +930,13 @@ impl<'a> HarnessNode<'a> { self.persister.mark_update_completed(chan_id, monitor_id, data); } - fn complete_all_monitor_updates(&self, chan_id: &ChannelId) { - for (monitor_id, data) in self.persister.drain_pending_updates(chan_id) { + fn complete_all_monitor_updates(&self, chan_id: &ChannelId) -> bool { + let completed_updates = self.persister.drain_pending_updates(chan_id); + let completed_any = !completed_updates.is_empty(); + for (monitor_id, data) in completed_updates { self.finish_monitor_update(*chan_id, monitor_id, data); } + completed_any } fn complete_all_pending_monitor_updates(&self) { @@ -966,9 +969,12 @@ impl<'a> HarnessNode<'a> { } } - fn refresh_serialized_manager(&mut self) { + fn refresh_serialized_manager(&mut self) -> bool { if self.node.get_and_clear_needs_persistence() { self.serialized_manager = self.node.encode(); + true + } else { + false } } @@ -1362,11 +1368,13 @@ impl PeerLink { || (self.node_a == node_b && self.node_b == node_a) } - fn complete_all_monitor_updates(&self, nodes: &[HarnessNode<'_>; 3]) { + fn complete_all_monitor_updates(&self, nodes: &[HarnessNode<'_>; 3]) -> bool { + let mut completed_updates = false; for id in &self.channel_ids { - nodes[self.node_a].complete_all_monitor_updates(id); - nodes[self.node_b].complete_all_monitor_updates(id); + completed_updates |= nodes[self.node_a].complete_all_monitor_updates(id); + completed_updates |= nodes[self.node_b].complete_all_monitor_updates(id); } + completed_updates } fn complete_monitor_updates_for_node( @@ -2143,7 +2151,6 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { ChannelMonitorUpdateStatus::Completed }, ]; - let wallet_a = TestWalletSource::new(SecretKey::from_slice(&[1; 32]).unwrap()); let wallet_b = TestWalletSource::new(SecretKey::from_slice(&[2; 32]).unwrap()); let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); @@ -2671,7 +2678,7 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { // claim/fail handling per event batch. let mut claim_set = new_hash_map(); let mut events = nodes[node_idx].get_and_clear_pending_events(); - let had_events = !events.is_empty(); + let mut had_events = !events.is_empty(); for event in events.drain(..) { match event { events::Event::PaymentClaimable { payment_hash, .. } => { @@ -2727,6 +2734,7 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { } while nodes[node_idx].needs_pending_htlc_processing() { nodes[node_idx].process_pending_htlc_forwards(); + had_events = true; } had_events } @@ -2749,9 +2757,10 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { "It may take may iterations to settle the state, but it should not take forever" ); } + let mut made_progress = self.refresh_serialized_managers(); // Next, make sure no monitor completion callbacks are pending. - self.ab_link.complete_all_monitor_updates(&self.nodes); - self.bc_link.complete_all_monitor_updates(&self.nodes); + made_progress |= self.ab_link.complete_all_monitor_updates(&self.nodes); + made_progress |= self.bc_link.complete_all_monitor_updates(&self.nodes); // Then, make sure any current forwards make their way to their destination. if self.process_msg_events(0, false, ProcessMessages::AllMessages) { last_pass_no_updates = false; @@ -2778,6 +2787,10 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { last_pass_no_updates = false; continue; } + if made_progress { + last_pass_no_updates = false; + continue; + } if last_pass_no_updates { // In some cases, we may generate a message to send in // `process_msg_events`, but block sending until @@ -2893,10 +2906,12 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { self.nodes[2].record_last_htlc_clear_fee(); } - fn refresh_serialized_managers(&mut self) { + fn refresh_serialized_managers(&mut self) -> bool { + let mut made_progress = false; for node in &mut self.nodes { - node.refresh_serialized_manager(); + made_progress |= node.refresh_serialized_manager(); } + made_progress } } From 2ce533379f04d01d8eafc50d2c2fdeeb319e9b30 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 13 May 2026 13:53:01 +0200 Subject: [PATCH 03/30] fuzz: reload monitors with the configured status Build the replacement persister with the configured monitor update status during reload. This keeps non-deferred restart behavior aligned with the active persistence-style matrix. --- fuzz/src/chanmon_consistency.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 7a003c3bd61..f67a8ab1c31 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1079,9 +1079,7 @@ impl<'a> HarnessNode<'a> { &mut self, use_old_mons: u8, out: &Out, router: &'a FuzzRouter, chan_type: ChanType, ) { let logger = Self::build_logger(self.node_id, out); - // Re-registering monitors during reload reflects data that was already selected from - // simulated storage, so these startup watch_channel calls should complete immediately. - let persister = Self::build_persister(ChannelMonitorUpdateStatus::Completed); + let persister = Self::build_persister(self.persistence_style); let chain_monitor = Self::build_chain_monitor( &self.broadcaster, &self.fee_estimator, @@ -1135,14 +1133,8 @@ impl<'a> HarnessNode<'a> { let manager = <(BlockLocator, ChanMan)>::read(&mut &self.serialized_manager[..], read_args) .expect("Failed to read manager"); for (channel_id, mon) in monitors.drain() { - assert_eq!( - chain_monitor.watch_channel(channel_id, mon), - Ok(ChannelMonitorUpdateStatus::Completed) - ); + assert_eq!(chain_monitor.watch_channel(channel_id, mon), Ok(self.persistence_style)); } - // Future monitor writes should follow the node's configured persistence style; only the - // startup watch_channel registration above is forced to Completed. - *persister.update_ret.lock().unwrap() = self.persistence_style; self.node = manager.1; self.monitor = chain_monitor; self.persister = persister; From bdcdbb68ab359d9ecff3cbbab3ebcf1b7fbda3d0 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 13 May 2026 13:56:10 +0200 Subject: [PATCH 04/30] fuzz: add deferred chanmon checkpoints Track deferred monitor writes in the harness and checkpoint the ChannelManager state before flushing them to the persister. This extends setup, reload, and settle paths to model deferred ChainMonitor persistence ordering. --- fuzz/src/chanmon_consistency.rs | 88 +++++++++++++++++++++++++-------- 1 file changed, 67 insertions(+), 21 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index f67a8ab1c31..dfaa7d97387 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -817,6 +817,7 @@ struct HarnessNode<'a> { fee_estimator: Arc, wallet: TestWalletSource, persistence_style: ChannelMonitorUpdateStatus, + deferred: bool, serialized_manager: Vec, height: u32, last_htlc_clear_fee: u32, @@ -847,7 +848,7 @@ impl<'a> HarnessNode<'a> { fn build_chain_monitor( broadcaster: &Arc, fee_estimator: &Arc, keys_manager: &Arc, logger: Arc, - persister: &Arc, + persister: &Arc, deferred: bool, ) -> Arc { Arc::new(chainmonitor::ChainMonitor::new( None, @@ -857,14 +858,14 @@ impl<'a> HarnessNode<'a> { Arc::clone(persister), Arc::clone(keys_manager), keys_manager.get_peer_storage_key(), - false, + deferred, )) } fn new( node_id: u8, wallet: TestWalletSource, fee_estimator: Arc, broadcaster: Arc, persistence_style: ChannelMonitorUpdateStatus, - out: &Out, router: &'a FuzzRouter, chan_type: ChanType, + deferred: bool, out: &Out, router: &'a FuzzRouter, chan_type: ChanType, ) -> Self { let logger = Self::build_logger(node_id, out); let node_secret = SecretKey::from_slice(&[ @@ -884,6 +885,7 @@ impl<'a> HarnessNode<'a> { &keys_manager, Arc::clone(&logger), &persister, + deferred, ); let network = Network::Bitcoin; let best_block_timestamp = genesis_block(network).header.time; @@ -913,6 +915,7 @@ impl<'a> HarnessNode<'a> { fee_estimator, wallet, persistence_style, + deferred, serialized_manager: Vec::new(), height: 0, last_htlc_clear_fee: 253, @@ -969,15 +972,33 @@ impl<'a> HarnessNode<'a> { } } - fn refresh_serialized_manager(&mut self) -> bool { + fn checkpoint_manager_persistence(&mut self) -> bool { if self.node.get_and_clear_needs_persistence() { + let pending_monitor_writes = self.monitor.pending_operation_count(); self.serialized_manager = self.node.encode(); + if self.deferred { + self.monitor.flush(pending_monitor_writes, &self.logger); + } else { + assert_eq!(pending_monitor_writes, 0); + } true } else { + assert_eq!(self.monitor.pending_operation_count(), 0); false } } + fn force_checkpoint_manager_persistence(&mut self) { + let pending_monitor_writes = self.monitor.pending_operation_count(); + self.serialized_manager = self.node.encode(); + self.node.get_and_clear_needs_persistence(); + if self.deferred { + self.monitor.flush(pending_monitor_writes, &self.logger); + } else { + assert_eq!(pending_monitor_writes, 0); + } + } + fn bump_fee_estimate(&mut self, chan_type: ChanType) { let mut max_feerate = self.last_htlc_clear_fee; if matches!(chan_type, ChanType::Legacy) { @@ -1086,6 +1107,7 @@ impl<'a> HarnessNode<'a> { &self.keys_manager, Arc::clone(&logger), &persister, + self.deferred, ); let mut monitors = new_hash_map(); @@ -1132,13 +1154,22 @@ impl<'a> HarnessNode<'a> { let manager = <(BlockLocator, ChanMan)>::read(&mut &self.serialized_manager[..], read_args) .expect("Failed to read manager"); + let expected_status = if self.deferred { + ChannelMonitorUpdateStatus::InProgress + } else { + self.persistence_style + }; for (channel_id, mon) in monitors.drain() { - assert_eq!(chain_monitor.watch_channel(channel_id, mon), Ok(self.persistence_style)); + assert_eq!(chain_monitor.watch_channel(channel_id, mon), Ok(expected_status)); } self.node = manager.1; self.monitor = chain_monitor; self.persister = persister; self.logger = logger; + // In deferred mode, the startup watch_channel registrations above queue monitor operations + // even if the reloaded ChannelManager does not need persistence. Always checkpoint here so + // those registrations can be flushed against the manager snapshot they belong to. + self.force_checkpoint_manager_persistence(); } } @@ -1937,9 +1968,12 @@ fn connect_peers(source: &ChanMan<'_>, dest: &ChanMan<'_>) { } fn make_channel( - source: &HarnessNode<'_>, dest: &HarnessNode<'_>, chan_id: i32, trusted_open: bool, - trusted_accept: bool, chain_state: &mut ChainState, + nodes: &mut [HarnessNode<'_>; 3], source_idx: usize, dest_idx: usize, chan_id: i32, + trusted_open: bool, trusted_accept: bool, chain_state: &mut ChainState, ) { + assert!(source_idx < dest_idx); + let (left, right) = nodes.split_at_mut(dest_idx); + let (source, dest) = (&mut left[source_idx], &mut right[0]); if trusted_open { source .create_channel_to_trusted_peer_0reserve( @@ -2050,7 +2084,8 @@ fn make_channel( } }; dest.handle_funding_created(source.get_our_node_id(), &funding_created); - // Complete any pending monitor persistence callbacks for dest after watch_channel. + dest.checkpoint_manager_persistence(); + // Complete any monitor persistence callbacks made available for dest after watch_channel. dest.complete_all_pending_monitor_updates(); let (funding_signed, channel_id) = { @@ -2071,7 +2106,8 @@ fn make_channel( } source.handle_funding_signed(dest.get_our_node_id(), &funding_signed); - // Complete any pending monitor persistence callbacks for source after watch_channel. + source.checkpoint_manager_persistence(); + // Complete any monitor persistence callbacks made available for source after watch_channel. source.complete_all_pending_monitor_updates(); let events = source.get_and_clear_pending_events(); @@ -2143,6 +2179,12 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { ChannelMonitorUpdateStatus::Completed }, ]; + let deferred = [ + config_byte & 0b0010_0000 != 0, + config_byte & 0b0100_0000 != 0, + config_byte & 0b1000_0000 != 0, + ]; + let wallet_a = TestWalletSource::new(SecretKey::from_slice(&[1; 32]).unwrap()); let wallet_b = TestWalletSource::new(SecretKey::from_slice(&[2; 32]).unwrap()); let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); @@ -2179,6 +2221,7 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { Arc::clone(&fee_est_a), Arc::clone(&broadcast_a), persistence_styles[0], + deferred[0], &out, router, chan_type, @@ -2189,6 +2232,7 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { Arc::clone(&fee_est_b), Arc::clone(&broadcast_b), persistence_styles[1], + deferred[1], &out, router, chan_type, @@ -2199,6 +2243,7 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { Arc::clone(&fee_est_c), Arc::clone(&broadcast_c), persistence_styles[2], + deferred[2], &out, router, chan_type, @@ -2216,14 +2261,14 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { // channel gets its own txid and funding outpoint. // A-B: channel 2 A and B have 0-reserve (trusted open + trusted accept), // channel 3 A has 0-reserve (trusted accept). - make_channel(&nodes[0], &nodes[1], 1, false, false, &mut chain_state); - make_channel(&nodes[0], &nodes[1], 2, true, true, &mut chain_state); - make_channel(&nodes[0], &nodes[1], 3, false, true, &mut chain_state); + make_channel(&mut nodes, 0, 1, 1, false, false, &mut chain_state); + make_channel(&mut nodes, 0, 1, 2, true, true, &mut chain_state); + make_channel(&mut nodes, 0, 1, 3, false, true, &mut chain_state); // B-C: channel 4 B has 0-reserve (via trusted accept), // channel 5 C has 0-reserve (via trusted open). - make_channel(&nodes[1], &nodes[2], 4, false, true, &mut chain_state); - make_channel(&nodes[1], &nodes[2], 5, true, false, &mut chain_state); - make_channel(&nodes[1], &nodes[2], 6, false, false, &mut chain_state); + make_channel(&mut nodes, 1, 2, 4, false, true, &mut chain_state); + make_channel(&mut nodes, 1, 2, 5, true, false, &mut chain_state); + make_channel(&mut nodes, 1, 2, 6, false, false, &mut chain_state); // Wipe the transactions-broadcasted set to make sure we don't broadcast // any transactions during normal operation after setup. @@ -2250,7 +2295,7 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { }; for node in &mut nodes { - node.serialized_manager = node.encode(); + node.force_checkpoint_manager_persistence(); } Self { @@ -2749,7 +2794,7 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { "It may take may iterations to settle the state, but it should not take forever" ); } - let mut made_progress = self.refresh_serialized_managers(); + let mut made_progress = self.checkpoint_manager_persistences(); // Next, make sure no monitor completion callbacks are pending. made_progress |= self.ab_link.complete_all_monitor_updates(&self.nodes); made_progress |= self.bc_link.complete_all_monitor_updates(&self.nodes); @@ -2898,10 +2943,10 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { self.nodes[2].record_last_htlc_clear_fee(); } - fn refresh_serialized_managers(&mut self) -> bool { + fn checkpoint_manager_persistences(&mut self) -> bool { let mut made_progress = false; for node in &mut self.nodes { - made_progress |= node.refresh_serialized_manager(); + made_progress |= node.checkpoint_manager_persistence(); } made_progress } @@ -2910,9 +2955,10 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { #[inline] pub fn do_test(data: &[u8], out: Out) { let router = FuzzRouter {}; - // Read initial monitor styles and channel type from fuzz input byte 0: + // Read initial monitor styles, channel type, and deferred write mode from fuzz input byte 0: // bits 0-2: monitor styles (1 bit per node) // bits 3-4: channel type (0=Legacy, 1=KeyedAnchors, 2=ZeroFeeCommitments) + // bits 5-7: deferred monitor write mode (1 bit per node) let config_byte = if !data.is_empty() { data[0] } else { 0 }; let mut harness = Harness::new(config_byte, out, &router); let mut read_pos = 1; // First byte was consumed for initial config. @@ -3324,7 +3370,7 @@ pub fn do_test(data: &[u8], out: Out) { _ => break 'fuzz_loop, } - harness.refresh_serialized_managers(); + harness.checkpoint_manager_persistences(); } harness.finish(); } From 8a0f1dd9a8a0c9ac0993a4c553006e3febfb6a94 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 15:33:44 +0200 Subject: [PATCH 05/30] lightning: introduce singular claim requests Have ChannelMonitor hand singular ClaimRequests to OnchainTxHandler. Convert them to PackageTemplates only after duplicate filtering. This makes the single-outpoint invariant explicit at that boundary. --- lightning/src/chain/channelmonitor.rs | 53 ++++++++++++++------------- lightning/src/chain/onchaintx.rs | 31 ++++++---------- lightning/src/chain/package.rs | 42 +++++++++++++++++++++ 3 files changed, 81 insertions(+), 45 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 42d04e0f8ce..c61883ea1f5 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -38,8 +38,8 @@ use crate::chain::chaininterface::{ }; use crate::chain::onchaintx::{ClaimEvent, FeerateStrategy, OnchainTxHandler}; use crate::chain::package::{ - CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, - HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedHTLCOutput, RevokedOutput, + ClaimRequest, CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, + HolderFundingOutput, HolderHTLCOutput, PackageSolvingData, RevokedHTLCOutput, RevokedOutput, }; use crate::chain::transaction::{OutPoint, TransactionData}; use crate::chain::{BlockLocator, WatchedOutput}; @@ -3879,7 +3879,7 @@ impl ChannelMonitorImpl { fn generate_claimable_outpoints_and_watch_outputs( &mut self, generate_monitor_event_with_reason: Option, require_funding_seen: bool, - ) -> (Vec, Vec) { + ) -> (Vec, Vec) { let funding = get_confirmed_funding_scope!(self); let holder_commitment_tx = &funding.current_holder_commitment_tx; let funding_outp = HolderFundingOutput::build( @@ -3887,7 +3887,7 @@ impl ChannelMonitorImpl { funding.channel_parameters.clone(), ); let funding_outpoint = funding.funding_outpoint(); - let commitment_package = PackageTemplate::build_package( + let commitment_package = ClaimRequest::new( funding_outpoint.txid.clone(), funding_outpoint.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height, @@ -3926,9 +3926,9 @@ impl ChannelMonitorImpl { let zero_fee_commitments = self.channel_type_features().supports_anchor_zero_fee_commitments(); if !zero_fee_htlcs && !zero_fee_commitments { - // Because we're broadcasting a commitment transaction, we should construct the package - // assuming it gets confirmed in the next block. Sadly, we have code which considers - // "not yet confirmed" things as discardable, so we cannot do that here. + // Because we're broadcasting a commitment transaction, we should construct claim + // requests assuming it gets confirmed in the next block. Sadly, we have code which + // considers "not yet confirmed" things as discardable, so we cannot do that here. let (mut new_outpoints, _) = self.get_broadcasted_holder_claims( funding, holder_commitment_tx, self.best_block.height, ); @@ -4806,11 +4806,11 @@ impl ChannelMonitorImpl { /// height > height + CLTV_SHARED_CLAIM_BUFFER. In any case, will install monitoring for /// HTLC-Success/HTLC-Timeout transactions. /// - /// Returns packages to claim the revoked output(s) and general information about the output that - /// is to the counterparty in the commitment transaction. + /// Returns claim requests for the revoked output(s) and general information about the output + /// that is to the counterparty in the commitment transaction. #[rustfmt::skip] fn check_spend_counterparty_transaction(&mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) - -> (Vec, CommitmentTxCounterpartyOutputInfo) + -> (Vec, CommitmentTxCounterpartyOutputInfo) { // Most secp and related errors trying to create keys means we have no hope of constructing // a spend transaction...so we return no transactions to broadcast @@ -4850,7 +4850,7 @@ impl ChannelMonitorImpl { per_commitment_point, per_commitment_key, outp.value, funding_spent.channel_parameters.clone(), height, ); - let justice_package = PackageTemplate::build_package( + let justice_package = ClaimRequest::new( commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, @@ -4879,7 +4879,7 @@ impl ChannelMonitorImpl { } else { height }; - let justice_package = PackageTemplate::build_package( + let justice_package = ClaimRequest::new( commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), @@ -4968,7 +4968,7 @@ impl ChannelMonitorImpl { commitment_txid: Txid, per_commitment_option: Option<&Vec<(HTLCOutputInCommitment, Option>)>>, confirmation_height: Option, - ) -> Vec { + ) -> Vec { let per_commitment_claimable_data = match per_commitment_option { Some(outputs) => outputs, None => return Vec::new(), @@ -4993,7 +4993,7 @@ impl ChannelMonitorImpl { confirmation_height, ), ); - Some(PackageTemplate::build_package( + Some(ClaimRequest::new( commitment_txid, transaction_output_index, htlc_data, @@ -5009,13 +5009,13 @@ impl ChannelMonitorImpl { .collect() } - /// Returns the HTLC claim package templates and the counterparty output info + /// Returns the HTLC claim requests and the counterparty output info. fn get_counterparty_output_claim_info( &self, funding_spent: &FundingScope, commitment_number: u64, commitment_txid: Txid, tx: &Transaction, per_commitment_claimable_data: &[(HTLCOutputInCommitment, Option>)], confirmation_height: Option, - ) -> (Vec, CommitmentTxCounterpartyOutputInfo) { + ) -> (Vec, CommitmentTxCounterpartyOutputInfo) { let mut claimable_outpoints = Vec::new(); let mut to_counterparty_output_info: CommitmentTxCounterpartyOutputInfo = None; @@ -5086,7 +5086,7 @@ impl ChannelMonitorImpl { ), ) }; - let counterparty_package = PackageTemplate::build_package( + let counterparty_package = ClaimRequest::new( commitment_txid, transaction_output_index, counterparty_htlc_outp, @@ -5104,7 +5104,7 @@ impl ChannelMonitorImpl { #[rustfmt::skip] fn check_spend_counterparty_htlc( &mut self, tx: &Transaction, commitment_number: u64, commitment_txid: &Txid, height: u32, logger: &L - ) -> (Vec, Option) { + ) -> (Vec, Option) { let secret = if let Some(secret) = self.get_secret(commitment_number) { secret } else { return (Vec::new(), None); }; let per_commitment_key = match SecretKey::from_slice(&secret) { Ok(key) => key, @@ -5135,7 +5135,7 @@ impl ChannelMonitorImpl { per_commitment_point, per_commitment_key, tx.output[idx].value, self.funding.channel_parameters.clone(), height, ); - let justice_package = PackageTemplate::build_package( + let justice_package = ClaimRequest::new( htlc_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, ); @@ -5187,13 +5187,14 @@ impl ChannelMonitorImpl { htlcs } - // Returns (1) `PackageTemplate`s that can be given to the OnchainTxHandler, so that the handler can - // broadcast transactions claiming holder HTLC commitment outputs and (2) a holder revokable - // script so we can detect whether a holder transaction has been seen on-chain. + // Returns (1) `ClaimRequest`s that can be given to the OnchainTxHandler, so that the + // handler can broadcast transactions claiming holder HTLC commitment outputs and (2) a + // holder revokable script so we can detect whether a holder transaction has been seen + // on-chain. #[rustfmt::skip] fn get_broadcasted_holder_claims( &self, funding: &FundingScope, holder_tx: &HolderCommitmentTransaction, conf_height: u32, - ) -> (Vec, Option<(ScriptBuf, PublicKey, RevocationKey)>) { + ) -> (Vec, Option<(ScriptBuf, PublicKey, RevocationKey)>) { let tx = holder_tx.trust(); let keys = tx.keys(); let redeem_script = chan_utils::get_revokeable_redeemscript( @@ -5212,7 +5213,7 @@ impl ChannelMonitorImpl { }; let transaction_output_index = htlc_descriptor.htlc.transaction_output_index .expect("Expected transaction output index for non-dust HTLC"); - PackageTemplate::build_package( + ClaimRequest::new( tx.txid(), transaction_output_index, PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build(htlc_descriptor, conf_height)), counterparty_spendable_height, @@ -5248,7 +5249,7 @@ impl ChannelMonitorImpl { fn check_spend_holder_transaction( &mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L, - ) -> Option<(Vec, TransactionOutputs)> { + ) -> Option<(Vec, TransactionOutputs)> { let funding_spent = get_confirmed_funding_scope!(self); // HTLCs set may differ between last and previous holder commitment txn, in case of one them hitting chain, ensure we cancel all HTLCs backward @@ -5759,7 +5760,7 @@ impl ChannelMonitorImpl { conf_hash: BlockHash, txn_matched: Vec<&Transaction>, mut watch_outputs: Vec, - mut claimable_outpoints: Vec, + mut claimable_outpoints: Vec, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 3eb6d64f3a2..823b81936ce 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -27,7 +27,7 @@ use crate::chain::chaininterface::{ BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator, TransactionType, }; use crate::chain::channelmonitor::ANTI_REORG_DELAY; -use crate::chain::package::{PackageSolvingData, PackageTemplate}; +use crate::chain::package::{ClaimRequest, PackageSolvingData, PackageTemplate}; use crate::chain::transaction::MaybeSignedTransaction; use crate::chain::ClaimId; use crate::ln::chan_utils::{ @@ -791,7 +791,7 @@ impl OnchainTxHandler { /// `cur_height`, however it must never be higher than `cur_height`. #[rustfmt::skip] pub(super) fn update_claims_view_from_requests( - &mut self, mut requests: Vec, conf_height: u32, cur_height: u32, + &mut self, mut requests: Vec, conf_height: u32, cur_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) { @@ -801,33 +801,26 @@ impl OnchainTxHandler { // First drop any duplicate claims. requests.retain(|req| { - debug_assert_eq!( - req.outpoints().len(), - 1, - "Claims passed to `update_claims_view_from_requests` should not be aggregated" - ); - let mut all_outpoints_claiming = true; - for outpoint in req.outpoints() { - if self.claimable_outpoints.get(outpoint).is_none() { - all_outpoints_claiming = false; - } - } - if all_outpoints_claiming { + let outpoint = req.outpoint(); + if self.claimable_outpoints.get(outpoint).is_some() { log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", - req.outpoints()[0].txid, req.outpoints()[0].vout); + outpoint.txid, outpoint.vout); false } else { let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten() - .find(|locked_package| locked_package.outpoints() == req.outpoints()); + .find(|locked_package| locked_package.outpoints().len() == 1 && locked_package.contains_outpoint(outpoint)); if let Some(package) = timelocked_equivalent_package { log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", - req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_locktime(cur_height)); + outpoint.txid, outpoint.vout, package.package_locktime(cur_height)); false } else { true } } }); + let mut requests = requests.into_iter() + .map(ClaimRequest::into_package_template) + .collect::>(); // Then try to maximally aggregate `requests`. for i in (1..requests.len()).rev() { @@ -1290,7 +1283,7 @@ mod tests { use types::features::ChannelTypeFeatures; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator}; - use crate::chain::package::{HolderHTLCOutput, PackageSolvingData, PackageTemplate}; + use crate::chain::package::{ClaimRequest, HolderHTLCOutput, PackageSolvingData}; use crate::chain::transaction::OutPoint; use crate::ln::chan_utils::{ ChannelPublicKeys, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, @@ -1412,7 +1405,7 @@ mod tests { let holder_commit_txid = holder_commit.trust().txid(); let mut requests = Vec::new(); for (htlc, counterparty_sig) in holder_commit.nondust_htlcs().iter().zip(holder_commit.counterparty_htlc_sigs.iter()) { - requests.push(PackageTemplate::build_package( + requests.push(ClaimRequest::new( holder_commit_txid, htlc.transaction_output_index.unwrap(), PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build(HTLCDescriptor { diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 0ef8855242b..06be5750367 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -1097,6 +1097,19 @@ enum PackageMalleability { Untractable, } +/// A single on-chain output claim generated by [`ChannelMonitor`]. +/// +/// These requests are converted to [`PackageTemplate`]s once [`OnchainTxHandler`] has deduplicated +/// them and is ready to aggregate compatible claims. +/// +/// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor +/// [`OnchainTxHandler`]: crate::chain::onchaintx::OnchainTxHandler +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct ClaimRequest { + input: (BitcoinOutPoint, PackageSolvingData), + counterparty_spendable_height: u32, +} + /// A structure to describe a package content that is generated by ChannelMonitor and /// used by OnchainTxHandler to generate and broadcast transactions settling onchain claims. /// @@ -1179,6 +1192,32 @@ impl PartialEq for PackageTemplate { } } +impl ClaimRequest { + pub(crate) fn new( + txid: Txid, vout: u32, input_solving_data: PackageSolvingData, + counterparty_spendable_height: u32, + ) -> Self { + Self { + input: (BitcoinOutPoint { txid, vout }, input_solving_data), + counterparty_spendable_height, + } + } + + pub(crate) fn outpoint(&self) -> &BitcoinOutPoint { + &self.input.0 + } + + pub(crate) fn into_package_template(self) -> PackageTemplate { + let (outpoint, input_solving_data) = self.input; + PackageTemplate::build_package( + outpoint.txid, + outpoint.vout, + input_solving_data, + self.counterparty_spendable_height, + ) + } +} + impl PackageTemplate { #[rustfmt::skip] pub(crate) fn can_merge_with(&self, other: &PackageTemplate, cur_height: u32) -> bool { @@ -1265,6 +1304,9 @@ impl PackageTemplate { pub(crate) fn outpoints(&self) -> Vec<&BitcoinOutPoint> { self.inputs.iter().map(|(o, _)| o).collect() } + pub(crate) fn contains_outpoint(&self, outpoint: &BitcoinOutPoint) -> bool { + self.inputs.iter().any(|(input, _)| input == outpoint) + } pub(crate) fn outpoints_and_creation_heights( &self, ) -> impl Iterator)> { From 27837dee6e6aa230d61c47dc23be02dea8f9072b Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 4 May 2026 10:36:53 +0200 Subject: [PATCH 06/30] lightning: clarify channelmonitor event thresholds Clarify ChannelMonitor comments around on-chain event thresholds. Some events only wait for anti-reorg finality, while CSV-delayed outputs wait until spendable through the same threshold queue. --- lightning/src/chain/channelmonitor.rs | 51 ++++++++++++++------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index c61883ea1f5..67dff0f3e80 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -473,7 +473,8 @@ impl Readable for CounterpartyCommitmentParameters { /// An entry for an [`OnchainEvent`], stating the block height and hash when the event was /// observed, as well as the transaction causing it. /// -/// Used to determine when the on-chain event can be considered safe from a chain reorganization. +/// Used to determine when the on-chain event can be considered safe from a chain reorganization +/// or, for CSV-delayed outputs, spendable. #[derive(Clone, PartialEq, Eq)] struct OnchainEventEntry { txid: Txid, @@ -491,14 +492,14 @@ impl OnchainEventEntry { OnchainEvent::MaturingOutput { descriptor: SpendableOutputDescriptor::DelayedPaymentOutput(ref descriptor) } => { - // A CSV'd transaction is confirmable in block (input height) + CSV delay, which means - // it's broadcastable when we see the previous block. + // A CSV-delayed output is spendable in block (input height) + CSV delay, which + // means we can hand it upstream when we see the previous block. conf_threshold = cmp::max(conf_threshold, self.height + descriptor.to_self_delay as u32 - 1); }, OnchainEvent::FundingSpendConfirmation { on_local_output_csv: Some(csv), .. } | OnchainEvent::HTLCSpendConfirmation { on_to_local_output_csv: Some(csv), .. } => { - // A CSV'd transaction is confirmable in block (input height) + CSV delay, which means - // it's broadcastable when we see the previous block. + // A CSV-delayed output is spendable in block (input height) + CSV delay, which + // means we can act on the event when we see the previous block. conf_threshold = cmp::max(conf_threshold, self.height + csv as u32 - 1); }, _ => {}, @@ -517,7 +518,7 @@ impl OnchainEventEntry { type CommitmentTxCounterpartyOutputInfo = Option<(u32, Amount)>; /// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it -/// once they mature to enough confirmations (ANTI_REORG_DELAY) +/// once they reach anti-reorg finality or, for CSV-delayed outputs, CSV maturity. #[derive(Clone, PartialEq, Eq)] enum OnchainEvent { /// An outbound HTLC failing after a transaction is confirmed. Used @@ -534,8 +535,8 @@ enum OnchainEvent { /// transaction which appeared on chain. commitment_tx_output_idx: Option, }, - /// An output waiting on [`ANTI_REORG_DELAY`] confirmations before we hand the user the - /// [`SpendableOutputDescriptor`]. + /// An output waiting until it is anti-reorg final and, for CSV-delayed outputs, spendable + /// before we hand the user the [`SpendableOutputDescriptor`]. MaturingOutput { descriptor: SpendableOutputDescriptor }, /// A spend of the funding output, either a commitment transaction or a cooperative closing /// transaction. @@ -566,8 +567,8 @@ enum OnchainEvent { /// If the claim was made by either party with a preimage, this is filled in preimage: Option, /// If the claim was made by us on an inbound HTLC against a local commitment transaction, - /// we set this to the output CSV value which we will have to wait until to spend the - /// output (and generate a SpendableOutput event). + /// this records the CSV delay for the delayed output. While present, the event reaches + /// its threshold once the output is spendable. on_to_local_output_csv: Option, }, /// An alternative funding transaction (due to a splice/RBF) has confirmed but can no longer be @@ -1003,7 +1004,7 @@ impl Balance { } } -/// An HTLC which has been irrevocably resolved on-chain, and has reached ANTI_REORG_DELAY. +/// An HTLC whose on-chain outcome has reached the threshold for irrevocable resolution. #[derive(Clone, PartialEq, Eq)] struct IrrevocablyResolvedHTLC { commitment_tx_output_idx: Option, @@ -1301,8 +1302,9 @@ pub(crate) struct ChannelMonitorImpl { pub(super) is_processing_pending_events: bool, // Used to track on-chain events (i.e., transactions part of channels confirmed on chain) on - // which to take actions once they reach enough confirmations. Each entry includes the - // transaction's id and the height when the transaction was confirmed on chain. + // which to take actions once they reach anti-reorg finality or, for CSV-delayed outputs, + // CSV maturity. Each entry includes the transaction's id and the height when the transaction + // was confirmed on chain. onchain_events_awaiting_threshold_conf: Vec, // If we get serialized out and re-read, we need to make sure that the chain monitoring @@ -1339,8 +1341,8 @@ pub(crate) struct ChannelMonitorImpl { /// Added in 0.0.124. holder_pays_commitment_tx_fee: Option, - /// Set to `Some` of the confirmed transaction spending the funding input of the channel after - /// reaching `ANTI_REORG_DELAY` confirmations. + /// Set to `Some` once the confirmed transaction spending the funding input of the channel has + /// reached its event threshold. funding_spend_confirmed: Option, confirmed_commitment_tx_counterparty_output: CommitmentTxCounterpartyOutputInfo, @@ -2763,11 +2765,10 @@ impl ChannelMonitorImpl { source: BalanceSource::Htlc, }); } else if htlc_resolved && !htlc_output_spend_pending { - // Funding transaction spends should be fully confirmed by the time any - // HTLC transactions are resolved, unless we're talking about a holder - // commitment tx, whose resolution is delayed until the CSV timeout is - // reached, even though HTLCs may be resolved after only - // ANTI_REORG_DELAY confirmations. + // Funding transaction spends should have reached their event threshold by the time any + // HTLC transactions are irrevocably resolved, unless we're talking about a holder + // commitment tx, whose resolution is delayed until CSV maturity, even though HTLCs + // may be resolved after anti-reorg finality. debug_assert!(holder_commitment || self.funding_spend_confirmed.is_some()); } else if counterparty_revoked_commitment { let htlc_output_claim_pending = self.onchain_events_awaiting_threshold_conf.iter().any(|event| { @@ -2889,7 +2890,7 @@ impl ChannelMonitor { }); if let Some((txid, conf_thresh)) = funding_spend_pending { debug_assert!(us.funding_spend_confirmed.is_none(), - "We have a pending funding spend awaiting anti-reorg confirmation, we can't have confirmed it already!"); + "We have a pending funding spend awaiting its event threshold, it cannot have reached it already!"); confirmed_txid = Some(txid); pending_commitment_tx_conf_thresh = Some(conf_thresh); } @@ -3347,7 +3348,7 @@ macro_rules! fail_unbroadcast_htlcs { commitment_tx_output_idx: None, }, }; - log_trace!($logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of {} commitment transaction {}, waiting for confirmation (at height {})", + log_trace!($logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of {} commitment transaction {}, event reaches threshold at height {}", &htlc.payment_hash, $commitment_tx, $commitment_tx_type, $commitment_txid_confirmed, entry.confirmation_threshold()); $self.onchain_events_awaiting_threshold_conf.push(entry); @@ -4513,7 +4514,7 @@ impl ChannelMonitorImpl { // event for the same source. self.failed_back_htlc_ids.insert(SentHTLCId::from_source(source)); if let Some(confirmed_txid) = self.funding_spend_confirmed { - // Funding spend already confirmed past ANTI_REORG_DELAY: resolve immediately. + // Funding spend already reached its event threshold: resolve immediately. log_trace!( logger, "Failing HTLC from late counterparty commitment update immediately \ @@ -4549,7 +4550,7 @@ impl ChannelMonitorImpl { log_trace!( logger, "Failing HTLC from late counterparty commitment update, \ - waiting for confirmation (at height {})", + event reaches threshold at height {}", entry.confirmation_threshold() ); self.onchain_events_awaiting_threshold_conf.push(entry); @@ -6403,7 +6404,7 @@ impl ChannelMonitorImpl { commitment_tx_output_idx: Some(input.previous_output.vout), }, }; - log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", &payment_hash, entry.confirmation_threshold()); + log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, event reaches threshold at height {}", &payment_hash, entry.confirmation_threshold()); self.onchain_events_awaiting_threshold_conf.push(entry); } } From 977852f0256bdc4e707049a7fc9bf8ef2a20d8c7 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 16:02:00 +0200 Subject: [PATCH 07/30] lightning: refactor onchain tx handler tests Move repeated OnchainTxHandler setup into shared test helpers so the claim-replay coverage can focus on the behavior under test. --- lightning/src/chain/onchaintx.rs | 111 +++++++++++++++++-------------- 1 file changed, 62 insertions(+), 49 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 823b81936ce..e559f093922 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -1298,12 +1298,9 @@ mod tests { use super::OnchainTxHandler; - // Test that all claims with locktime equal to or less than the current height are broadcast - // immediately while claims with locktime greater than the current height are only broadcast - // once the locktime is reached. - #[test] - #[rustfmt::skip] - fn test_broadcast_height() { + fn new_test_tx_handler( + channel_type_features: ChannelTypeFeatures, nondust_htlcs: Vec, + ) -> OnchainTxHandler { let secp_ctx = Secp256k1::new(); let signer = InMemorySigner::new( SecretKey::from_slice(&[41; 32]).unwrap(), @@ -1340,9 +1337,6 @@ mod tests { )), }; let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::MAX }; - - // Use non-anchor channels so that HTLC-Timeouts are broadcast immediately instead of sent - // to the user for external funding. let chan_params = ChannelTransactionParameters { holder_pubkeys: signer.pubkeys(&secp_ctx), holder_selected_contest_delay: 66, @@ -1353,66 +1347,45 @@ mod tests { }), funding_outpoint: Some(funding_outpoint), splice_parent_funding_txid: None, - channel_type_features: ChannelTypeFeatures::only_static_remote_key(), + channel_type_features, channel_value_satoshis: 0, }; - - // Create an OnchainTxHandler for a commitment containing HTLCs with CLTV expiries of 0, 1, - // and 2 blocks. - let mut nondust_htlcs = Vec::new(); - for i in 0..3 { - let preimage = PaymentPreimage([i; 32]); - let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); - nondust_htlcs.push( - HTLCOutputInCommitment { - offered: true, - amount_msat: 10000, - cltv_expiry: i as u32, - payment_hash: hash, - transaction_output_index: Some(i as u32), - } - ); - } - let holder_commit = HolderCommitmentTransaction::dummy(1000000, funding_outpoint, nondust_htlcs); - let destination_script = ScriptBuf::new(); + let holder_commit = + HolderCommitmentTransaction::dummy(1000000, funding_outpoint, nondust_htlcs); let counterparty_node_id = PublicKey::from_slice(&[2; 33]).unwrap(); - let mut tx_handler = OnchainTxHandler::new( + OnchainTxHandler::new( ChannelId::from_bytes([0; 32]), counterparty_node_id, 1000000, [0; 32], - destination_script.clone(), + ScriptBuf::new(), signer, chan_params, holder_commit, secp_ctx, - ); - - // Create a broadcaster with current block height 1. - let broadcaster = TestBroadcaster::new(Network::Testnet); - { - let mut blocks = broadcaster.blocks.lock().unwrap(); - let genesis_hash = blocks[0].0.block_hash(); - blocks.push((create_dummy_block(genesis_hash, 0, Vec::new()), 1)); - } - - let fee_estimator = TestFeeEstimator::new(253); - let fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator); - let logger = TestLogger::new(); + ) + } - // Request claiming of each HTLC on the holder's commitment, with current block height 1. + fn build_offered_holder_htlc_requests( + tx_handler: &OnchainTxHandler, + ) -> Vec { let holder_commit = tx_handler.current_holder_commitment_tx(); let holder_commit_txid = holder_commit.trust().txid(); let mut requests = Vec::new(); - for (htlc, counterparty_sig) in holder_commit.nondust_htlcs().iter().zip(holder_commit.counterparty_htlc_sigs.iter()) { + for (htlc, counterparty_sig) in + holder_commit.nondust_htlcs().iter().zip(holder_commit.counterparty_htlc_sigs.iter()) + { requests.push(ClaimRequest::new( holder_commit_txid, htlc.transaction_output_index.unwrap(), - PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build(HTLCDescriptor { + PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build( + HTLCDescriptor { channel_derivation_parameters: ChannelDerivationParameters { value_satoshis: tx_handler.channel_value_satoshis, keys_id: tx_handler.channel_keys_id, - transaction_parameters: tx_handler.channel_transaction_parameters.clone(), + transaction_parameters: tx_handler + .channel_transaction_parameters + .clone(), }, commitment_txid: holder_commit_txid, per_commitment_number: holder_commit.commitment_number(), @@ -1422,11 +1395,51 @@ mod tests { preimage: None, counterparty_sig: *counterparty_sig, }, - 0 + 0, )), 0, )); } + requests + } + + // Test that all claims with locktime equal to or less than the current height are broadcast + // immediately while claims with locktime greater than the current height are only broadcast + // once the locktime is reached. + #[test] + fn test_broadcast_height() { + // Create an OnchainTxHandler for a commitment containing HTLCs with CLTV expiries of 0, 1, + // and 2 blocks. + let mut nondust_htlcs = Vec::new(); + for i in 0..3 { + let preimage = PaymentPreimage([i; 32]); + let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); + nondust_htlcs.push(HTLCOutputInCommitment { + offered: true, + amount_msat: 10000, + cltv_expiry: i as u32, + payment_hash: hash, + transaction_output_index: Some(i as u32), + }); + } + let destination_script = ScriptBuf::new(); + let mut tx_handler = + new_test_tx_handler(ChannelTypeFeatures::only_static_remote_key(), nondust_htlcs); + + // Create a broadcaster with current block height 1. + let broadcaster = TestBroadcaster::new(Network::Testnet); + { + let mut blocks = broadcaster.blocks.lock().unwrap(); + let genesis_hash = blocks[0].0.block_hash(); + blocks.push((create_dummy_block(genesis_hash, 0, Vec::new()), 1)); + } + + let fee_estimator = TestFeeEstimator::new(253); + let fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator); + let logger = TestLogger::new(); + + // Request claiming of each HTLC on the holder's commitment, with current block height 1. + let requests = build_offered_holder_htlc_requests(&tx_handler); tx_handler.update_claims_view_from_requests( requests, 1, From e81d949241b3c879b7cc5507e391cdec42bc8a6a Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 4 May 2026 11:10:15 +0200 Subject: [PATCH 08/30] lightning: cover delayed preimage claim balance Add a monitor test for an inbound HTLC claimed by preimage from a holder commitment. Confirm that the claimable balance remains unchanged after the HTLC-success spend reaches anti-reorg finality but before the CSV-delayed output is spendable. --- lightning/src/ln/monitor_tests.rs | 67 +++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index f52f093917b..4fd40df3a45 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -115,6 +115,73 @@ fn test_spendable_output<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, spendable_t } else { panic!(); } } +#[test] +fn preimage_claim_balance_unchanged_between_anti_reorg_and_csv() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let (_, _, chan_id, funding_tx) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); + // Route an inbound HTLC to node 0 so its preimage claim spends an HTLC output from node 0's + // holder commitment and creates a CSV-delayed output. + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 12_000_000); + nodes[1].node.send_payment_with_route(route, payment_hash, + RecipientOnionFields::secret_only(payment_secret, 12_000_000), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[1], 1); + let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); + expect_and_process_pending_htlcs(&nodes[0], false); + expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 12_000_000); + + // Confirm node 0's holder commitment before claiming the HTLC so the preimage claim has a + // delayed output that remains tracked as an HTLC balance until it becomes spendable. + let message = "Channel force-closed".to_owned(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); + check_added_monitors(&nodes[0], 1); + check_closed_broadcast(&nodes[0], 1, true); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1000000); + let commitment_txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); + assert_eq!(commitment_txn.len(), 1); + check_spends!(commitment_txn[0], funding_tx); + mine_transaction(&nodes[0], &commitment_txn[0]); + nodes[0].tx_broadcaster.clear(); + + // Claiming the HTLC with the preimage broadcasts the HTLC-Success transaction. Once it + // confirms, the resulting delayed output should be reported as an HTLC balance awaiting + // confirmations. + nodes[0].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[0], 1); + expect_payment_claimed!(nodes[0], payment_hash, 12_000_000); + let htlc_claim_txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); + assert_eq!(htlc_claim_txn.len(), 1); + check_spends!(htlc_claim_txn[0], commitment_txn[0]); + mine_transaction(&nodes[0], &htlc_claim_txn[0]); + + let htlc_claim_balances = sorted_vec(nodes[0].chain_monitor.chain_monitor + .get_monitor(chan_id).unwrap().get_claimable_balances()); + assert!(htlc_claim_balances.iter().any(|balance| matches!(balance, + Balance::ClaimableAwaitingConfirmations { + amount_satoshis: 12_000, + source: BalanceSource::Htlc, + .. + } + ))); + + // Advance only to anti-reorg finality for the HTLC-Success transaction. The CSV-delayed + // output is not spendable yet, so the claimable HTLC balance should remain unchanged. + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + assert_eq!(htlc_claim_balances, sorted_vec(nodes[0].chain_monitor.chain_monitor + .get_monitor(chan_id).unwrap().get_claimable_balances())); +} + #[test] fn revoked_output_htlc_resolution_timing() { // Tests that HTLCs which were present in a broadcasted remote revoked commitment transaction From a6213de7fdf832a9c6767ecc5b5fa67fc63979ca Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 4 May 2026 09:45:31 +0200 Subject: [PATCH 09/30] lightning: resolve HTLC spends at anti-reorg finality Treat HTLCSpendConfirmation entries as irrevocably resolved once the commitment HTLC output spend reaches anti-reorg finality. Do not wait for CSV maturity of any delayed output created by that spend. Delayed outputs remain tracked separately as MaturingOutput entries, keeping claimable balances alive until they are CSV-mature and can be surfaced as SpendableOutputs. --- lightning/src/chain/channelmonitor.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 67dff0f3e80..09b5466c378 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -496,8 +496,7 @@ impl OnchainEventEntry { // means we can hand it upstream when we see the previous block. conf_threshold = cmp::max(conf_threshold, self.height + descriptor.to_self_delay as u32 - 1); }, - OnchainEvent::FundingSpendConfirmation { on_local_output_csv: Some(csv), .. } | - OnchainEvent::HTLCSpendConfirmation { on_to_local_output_csv: Some(csv), .. } => { + OnchainEvent::FundingSpendConfirmation { on_local_output_csv: Some(csv), .. } => { // A CSV-delayed output is spendable in block (input height) + CSV delay, which // means we can act on the event when we see the previous block. conf_threshold = cmp::max(conf_threshold, self.height + csv as u32 - 1); @@ -567,8 +566,9 @@ enum OnchainEvent { /// If the claim was made by either party with a preimage, this is filled in preimage: Option, /// If the claim was made by us on an inbound HTLC against a local commitment transaction, - /// this records the CSV delay for the delayed output. While present, the event reaches - /// its threshold once the output is spendable. + /// this records the CSV delay for the delayed output. The CSV-mature output remains + /// tracked via the corresponding [`OnchainEvent::MaturingOutput`]; the HTLC spend itself + /// reaches anti-reorg finality. on_to_local_output_csv: Option, }, /// An alternative funding transaction (due to a splice/RBF) has confirmed but can no longer be @@ -1346,9 +1346,10 @@ pub(crate) struct ChannelMonitorImpl { funding_spend_confirmed: Option, confirmed_commitment_tx_counterparty_output: CommitmentTxCounterpartyOutputInfo, - /// The set of HTLCs which have been either claimed or failed on chain and have reached - /// the requisite confirmations on the claim/fail transaction (either ANTI_REORG_DELAY or the - /// spending CSV for revocable outputs). + /// The set of HTLCs whose on-chain claim or fail outcome is irrevocably resolved because the + /// commitment transaction HTLC output spend has reached anti-reorg finality. Any resulting + /// output that is still waiting on CSV maturity is tracked separately as an + /// [`OnchainEvent::MaturingOutput`]. htlcs_resolved_on_chain: Vec, /// When a payment is resolved through an on-chain transaction, we tell the `ChannelManager` @@ -6298,10 +6299,9 @@ impl ChannelMonitorImpl { commitment_tx_output_idx: input.previous_output.vout, preimage: if accepted_preimage_claim || offered_preimage_claim { Some(payment_preimage) } else { None }, - // If this is a payment to us (ie !outbound_htlc), wait for - // the CSV delay before dropping the HTLC from claimable - // balance if the claim was an HTLC-Success transaction (ie - // accepted_preimage_claim). + // If this is a payment to us (ie !outbound_htlc), keep a + // record of the CSV delay. The delayed output is tracked + // separately as a MaturingOutput until it is spendable. on_to_local_output_csv: if accepted_preimage_claim && !outbound_htlc { Some(self.on_holder_tx_csv) } else { None }, }, From ffc6fec009291b16a95eb0be8874f86135c3d53b Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 6 May 2026 17:05:12 +0200 Subject: [PATCH 10/30] f: assert delayed output for HTLC spends Check that any HTLCSpendConfirmation carrying a local-output CSV has a matching delayed MaturingOutput. Scan spendable outputs before recording HTLC spend confirmations so the invariant is present when the assertion runs. --- lightning/src/chain/channelmonitor.rs | 32 ++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 09b5466c378..09aa863b51c 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -5727,9 +5727,9 @@ impl ChannelMonitorImpl { break; } } - self.is_resolving_htlc_output(&tx, height, &block_hash, logger); - self.check_tx_and_push_spendable_outputs(&tx, height, &block_hash, logger); + + self.is_resolving_htlc_output(&tx, height, &block_hash, logger); } } @@ -6207,6 +6207,7 @@ impl ChannelMonitorImpl { &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithContext, ) { let funding_spent = get_confirmed_funding_scope!(self); + let txid = tx.compute_txid(); 'outer_loop: for input in &tx.input { let mut payment_data = None; @@ -6293,8 +6294,17 @@ impl ChannelMonitorImpl { if payment_data.is_none() { log_claim!($tx_info, $holder_tx, htlc_output, false); let outbound_htlc = $holder_tx == htlc_output.offered; + let on_to_local_output_csv = if accepted_preimage_claim && !outbound_htlc { + Some(self.on_holder_tx_csv) } else { None }; + #[cfg(debug_assertions)] + if let Some(csv) = on_to_local_output_csv { + debug_assert!( + self.has_delayed_maturing_output_for_tx(txid, csv), + "CSV-delayed HTLC spend confirmation should have a matching MaturingOutput" + ); + } self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry { - txid: tx.compute_txid(), height, block_hash: Some(*block_hash), transaction: Some(tx.clone()), + txid, height, block_hash: Some(*block_hash), transaction: Some(tx.clone()), event: OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx: input.previous_output.vout, preimage: if accepted_preimage_claim || offered_preimage_claim { @@ -6302,8 +6312,7 @@ impl ChannelMonitorImpl { // If this is a payment to us (ie !outbound_htlc), keep a // record of the CSV delay. The delayed output is tracked // separately as a MaturingOutput until it is spendable. - on_to_local_output_csv: if accepted_preimage_claim && !outbound_htlc { - Some(self.on_holder_tx_csv) } else { None }, + on_to_local_output_csv, }, }); continue 'outer_loop; @@ -6456,6 +6465,19 @@ impl ChannelMonitorImpl { spendable_outputs } + #[cfg(debug_assertions)] + fn has_delayed_maturing_output_for_tx(&self, txid: Txid, csv: u16) -> bool { + self.onchain_events_awaiting_threshold_conf.iter().any(|entry| { + entry.txid == txid + && match &entry.event { + OnchainEvent::MaturingOutput { + descriptor: SpendableOutputDescriptor::DelayedPaymentOutput(descriptor), + } => descriptor.to_self_delay == csv, + _ => false, + } + }) + } + /// Checks if the confirmed transaction is paying funds back to some address we can assume to /// own. #[rustfmt::skip] From 426f97d1e0511c560a25aa88e4750fbfa7b490bd Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 14:21:04 +0200 Subject: [PATCH 11/30] lightning: dedupe delayed claims by outpoint coverage A replayed holder HTLC claim may arrive as a single-outpoint request after earlier requests were merged into a delayed package. Check whether an existing delayed package already covers the new request instead of requiring exact outpoint-set equality. Add focused OnchainTxHandler coverage and a ChannelMonitor regression through claim_funds for both current anchor variants. --- lightning/src/chain/onchaintx.rs | 95 ++++++++++++++++++++++++- lightning/src/ln/monitor_tests.rs | 113 ++++++++++++++++++++++++++++++ 2 files changed, 205 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index e559f093922..2cdad08de1e 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -807,9 +807,10 @@ impl OnchainTxHandler { outpoint.txid, outpoint.vout); false } else { - let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten() - .find(|locked_package| locked_package.outpoints().len() == 1 && locked_package.contains_outpoint(outpoint)); - if let Some(package) = timelocked_equivalent_package { + let timelocked_covering_package = self.locktimed_packages.values() + .flat_map(|packages| packages.iter()) + .find(|locked_package| locked_package.contains_outpoint(outpoint)); + if let Some(package) = timelocked_covering_package { log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", outpoint.txid, outpoint.vout, package.package_locktime(cur_height)); false @@ -1480,4 +1481,92 @@ mod tests { assert_eq!(txs_broadcasted.len(), 1); assert_eq!(txs_broadcasted[0].lock_time.to_consensus_u32(), 2); } + + #[test] + fn test_duplicate_pending_claim_request_after_force_close_replay() { + let claim_height = 21; + let locktime = 42; + let mut nondust_htlcs = Vec::new(); + for i in 0..2 { + let preimage = PaymentPreimage([i + 1; 32]); + let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); + nondust_htlcs.push(HTLCOutputInCommitment { + offered: true, + amount_msat: 10000, + cltv_expiry: locktime, + payment_hash: hash, + transaction_output_index: Some(i as u32), + }); + } + + let mut tx_handler = new_test_tx_handler( + ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), + nondust_htlcs, + ); + let requests = build_offered_holder_htlc_requests(&tx_handler); + let destination_script = ScriptBuf::new(); + let broadcaster = TestBroadcaster::new(Network::Testnet); + let fee_estimator = TestFeeEstimator::new(253); + let fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator); + let logger = TestLogger::new(); + + // Simulate the force-close path registering the two holder HTLC claims as + // a single delayed package. + tx_handler.update_claims_view_from_requests( + requests.clone(), + claim_height, + claim_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + assert_eq!( + tx_handler.locktimed_packages.get(&locktime).map(|packages| packages.len()), + Some(1), + ); + + // Replaying the same per-HTLC claim requests must match by outpoint + // coverage, otherwise each single-outpoint request would be added again. + tx_handler.update_claims_view_from_requests( + requests, + claim_height, + claim_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + assert_eq!( + tx_handler.locktimed_packages.get(&locktime).map(|packages| packages.len()), + Some(1), + ); + + // At locktime, the delayed package should still yield one bump event + // covering both HTLCs. + tx_handler.update_claims_view_from_requests( + Vec::new(), + locktime, + locktime, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + + let pending_events = tx_handler.get_and_clear_pending_claim_events(); + assert_eq!(pending_events.len(), 1); + assert_eq!(tx_handler.pending_claim_requests.len(), 1); + assert_eq!(tx_handler.claimable_outpoints.len(), 2); + match &pending_events[0].1 { + super::ClaimEvent::BumpHTLC { htlcs, tx_lock_time, .. } => { + assert_eq!(htlcs.len(), 2); + assert_eq!(tx_lock_time.to_consensus_u32(), locktime); + }, + _ => panic!("expected a single HTLC bump event"), + } + } } diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 4fd40df3a45..436bb01c907 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -2455,6 +2455,119 @@ fn test_restored_packages_retry() { do_test_restored_packages_retry(true); } +fn do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(p2a_anchor: bool) { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut anchors_config = test_default_channel_config(); + anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + anchors_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let coinbase_tx = provide_anchor_reserves(&nodes); + let (_, _, chan_id, funding_tx) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 50_000_000); + + // Seed two unresolved outbound HTLCs that will be aggregated into one + // delayed holder-commitment package after force close. + route_payment(&nodes[0], &[&nodes[1]], 10_000_000); + route_payment(&nodes[0], &[&nodes[1]], 11_000_000); + + // Add a third incoming HTLC which will later be claimed by preimage after + // the commitment transaction confirms, reproducing the replay path. + let (claim_route, claim_hash, claim_preimage, claim_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 12_000_000); + nodes[1] + .node + .send_payment_with_route( + claim_route, + claim_hash, + RecipientOnionFields::secret_only(claim_secret, 12_000_000), + PaymentId(claim_hash.0), + ) + .unwrap(); + check_added_monitors(&nodes[1], 1); + let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + nodes[0] + .node + .handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); + expect_and_process_pending_htlcs(&nodes[0], false); + expect_payment_claimable!(nodes[0], claim_hash, claim_secret, 12_000_000); + + // Force-close node 0 so its holder commitment hits chain and its HTLC + // claims are fed into OnchainTxHandler as delayed requests. + let message = "Channel force-closed".to_owned(); + nodes[0] + .node + .force_close_broadcasting_latest_txn( + &chan_id, + &nodes[1].node.get_our_node_id(), + message.clone(), + ) + .unwrap(); + check_added_monitors(&nodes[0], 1); + check_closed_broadcast(&nodes[0], 1, true); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1_000_000); + handle_bump_close_event(&nodes[0]); + + let (commitment_tx, anchor_tx) = { + let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); + assert_eq!(txn.len(), if p2a_anchor { 2 } else { 1 }); + let anchor_tx = p2a_anchor.then(|| txn.pop().unwrap()); + let commitment_tx = txn.pop().unwrap(); + check_spends!(commitment_tx, funding_tx); + if p2a_anchor { + check_spends!(anchor_tx.as_ref().unwrap(), commitment_tx, coinbase_tx); + } + (commitment_tx, anchor_tx) + }; + + let _ = mine_transaction(&nodes[0], &commitment_tx); + if p2a_anchor { + let _ = mine_transaction(&nodes[0], anchor_tx.as_ref().unwrap()); + } + + // Claim the incoming HTLC after the commitment is confirmed. This + // regenerates a single-outpoint claim request alongside the existing + // delayed package covering the two earlier HTLCs. + nodes[0].node.claim_funds(claim_preimage); + check_added_monitors(&nodes[0], 1); + expect_payment_claimed!(nodes[0], claim_hash, 12_000_000); + + // Once all holder HTLCs reach their timelock, we should see the original two-HTLC + // delayed package plus the replayed single-HTLC claim, not duplicates of + // the delayed package's outpoints. + connect_blocks(&nodes[0], TEST_FINAL_CLTV + 1); + + let mut htlc_event_sizes = nodes[0] + .chain_monitor + .chain_monitor + .get_and_clear_pending_events() + .into_iter() + .filter_map(|event| { + if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { + htlc_descriptors, .. + }) = event + { + Some(htlc_descriptors.len()) + } else { + None + } + }) + .collect::>(); + htlc_event_sizes.sort_unstable(); + assert_eq!(htlc_event_sizes, vec![1, 2]); +} + +#[test] +fn test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay() { + do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(false); + do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(true); +} + fn do_test_monitor_rebroadcast_pending_claims(keyed_anchors: bool, p2a_anchor: bool) { // Test that we will retry broadcasting pending claims for a force-closed channel on every // `ChainMonitor::rebroadcast_pending_claims` call. From 8a41957be9be11751b5c65d946a3c59148a05a07 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 14:23:24 +0200 Subject: [PATCH 12/30] lightning: ignore claims for pending spent outpoints When a transaction spends one outpoint from a delayed package, the split outpoint is tracked as a ContentiousOutpoint until the spend reaches anti-reorg finality. Reject replayed claim requests for those pending-spent outpoints so they are not added back before the spend reaches anti-reorg finality or reorgs out. Add an OnchainTxHandler regression that replays a holder claim during that pending-spent window and verifies reorg resurrection still works. --- lightning/src/chain/onchaintx.rs | 140 ++++++++++++++++++++++++++++++- 1 file changed, 137 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 2cdad08de1e..1a5ec61b0c7 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -576,6 +576,16 @@ impl OnchainTxHandler { self.pending_claim_requests.len() != 0 } + fn is_outpoint_spend_waiting_threshold_conf(&self, outpoint: &BitcoinOutPoint) -> bool { + self.onchain_events_awaiting_threshold_conf.iter().any(|entry| { + if let OnchainEvent::ContentiousOutpoint { ref package } = entry.event { + package.contains_outpoint(outpoint) + } else { + false + } + }) + } + /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty /// onchain) lays on the assumption of claim transactions getting confirmed before timelock /// expiration (CSV or CLTV following cases). In case of high-fee spikes, claim tx may get stuck @@ -802,7 +812,15 @@ impl OnchainTxHandler { // First drop any duplicate claims. requests.retain(|req| { let outpoint = req.outpoint(); - if self.claimable_outpoints.get(outpoint).is_some() { + if self.is_outpoint_spend_waiting_threshold_conf(outpoint) { + // This is a package-layer guard. ChannelMonitor filters regenerated + // HTLC claims using HTLC resolution state, while this keeps outpoints + // split from an existing package from being re-added during the reorg + // window. + log_info!(logger, "Ignoring claim for outpoint {}:{}, it is already spent by a transaction awaiting anti-reorg finality", + outpoint.txid, outpoint.vout); + false + } else if self.claimable_outpoints.get(outpoint).is_some() { log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", outpoint.txid, outpoint.vout); false @@ -1276,11 +1294,14 @@ impl OnchainTxHandler { #[cfg(test)] mod tests { - use bitcoin::hash_types::Txid; + use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; + use bitcoin::locktime::absolute::LockTime; + use bitcoin::transaction::{OutPoint as BitcoinOutPoint, Version}; use bitcoin::Network; - use bitcoin::{key::Secp256k1, secp256k1::PublicKey, secp256k1::SecretKey, ScriptBuf}; + use bitcoin::{key::Secp256k1, secp256k1::PublicKey, secp256k1::SecretKey}; + use bitcoin::{Amount, ScriptBuf, Transaction, TxIn, TxOut}; use types::features::ChannelTypeFeatures; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator}; @@ -1404,6 +1425,18 @@ mod tests { requests } + fn locked_outpoints( + tx_handler: &OnchainTxHandler, locktime: u32, + ) -> Vec { + tx_handler + .locktimed_packages + .get(&locktime) + .into_iter() + .flat_map(|packages| packages.iter()) + .flat_map(|package| package.outpoints().into_iter().map(|outpoint| *outpoint)) + .collect() + } + // Test that all claims with locktime equal to or less than the current height are broadcast // immediately while claims with locktime greater than the current height are only broadcast // once the locktime is reached. @@ -1569,4 +1602,105 @@ mod tests { _ => panic!("expected a single HTLC bump event"), } } + + #[test] + fn test_replayed_claim_ignored_for_pending_spent_outpoint() { + let claim_height = 21; + let spend_height = 22; + let locktime = 42; + let mut nondust_htlcs = Vec::new(); + for i in 0..2 { + let preimage = PaymentPreimage([i + 1; 32]); + let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); + nondust_htlcs.push(HTLCOutputInCommitment { + offered: true, + amount_msat: 10000, + cltv_expiry: locktime, + payment_hash: hash, + transaction_output_index: Some(i as u32), + }); + } + + let mut tx_handler = new_test_tx_handler( + ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), + nondust_htlcs, + ); + let requests = build_offered_holder_htlc_requests(&tx_handler); + let spent_outpoint = *requests[0].outpoint(); + let still_delayed_outpoint = *requests[1].outpoint(); + let destination_script = ScriptBuf::new(); + let broadcaster = TestBroadcaster::new(Network::Testnet); + let fee_estimator = TestFeeEstimator::new(253); + let fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator); + let logger = TestLogger::new(); + + // Register both holder HTLC claims as one delayed package before any + // individual outpoint spends are observed. + tx_handler.update_claims_view_from_requests( + requests.clone(), + claim_height, + claim_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + assert_eq!(locked_outpoints(&tx_handler, locktime).len(), 2); + + // Spend one outpoint before the package reaches its timelock. The handler + // should split it into a ContentiousOutpoint until the spend reaches + // anti-reorg finality. + let spend_tx = Transaction { + version: Version::TWO, + lock_time: LockTime::ZERO, + input: vec![TxIn { previous_output: spent_outpoint, ..Default::default() }], + output: vec![TxOut { value: Amount::from_sat(1000), script_pubkey: ScriptBuf::new() }], + }; + tx_handler.update_claims_view_from_matched_txn( + &[&spend_tx], + spend_height, + BlockHash::all_zeros(), + spend_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + let locked = locked_outpoints(&tx_handler, locktime); + assert_eq!(locked, vec![still_delayed_outpoint]); + + // Replaying both original claim requests during that window must not + // re-add the already-spent outpoint to the delayed package. + tx_handler.update_claims_view_from_requests( + requests, + spend_height, + spend_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + let locked = locked_outpoints(&tx_handler, locktime); + assert_eq!(locked, vec![still_delayed_outpoint]); + assert!(tx_handler.pending_claim_requests.is_empty()); + assert!(tx_handler.claimable_outpoints.is_empty()); + + // If the spend reorgs out, the contentious outpoint is resurrected into + // the delayed package. + tx_handler.blocks_disconnected( + spend_height - 1, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + let locked = locked_outpoints(&tx_handler, locktime); + assert_eq!(locked.len(), 2); + assert!(locked.contains(&spent_outpoint)); + assert!(locked.contains(&still_delayed_outpoint)); + } } From 8e18e7b91d5744006d7d6670fdaa0509953fb0de Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 6 May 2026 17:57:23 +0200 Subject: [PATCH 13/30] f: fold timelocked outpoint claim check Classify duplicate outpoint state in one helper. Preserve existing filter ordering and timelock logging. --- lightning/src/chain/onchaintx.rs | 73 +++++++++++++++++++++----------- 1 file changed, 48 insertions(+), 25 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 1a5ec61b0c7..e4d23d77e88 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -91,6 +91,12 @@ enum OnchainEvent { ContentiousOutpoint { package: PackageTemplate }, } +enum OutpointClaimState { + WaitingThresholdConf, + ClaimingRequestRegistered, + WaitingTimelock(u32), +} + impl Writeable for OnchainEventEntry { fn write(&self, writer: &mut W) -> Result<(), io::Error> { write_tlv_fields!(writer, { @@ -576,14 +582,30 @@ impl OnchainTxHandler { self.pending_claim_requests.len() != 0 } - fn is_outpoint_spend_waiting_threshold_conf(&self, outpoint: &BitcoinOutPoint) -> bool { - self.onchain_events_awaiting_threshold_conf.iter().any(|entry| { + fn outpoint_claim_state( + &self, outpoint: &BitcoinOutPoint, cur_height: u32, + ) -> Option { + if self.onchain_events_awaiting_threshold_conf.iter().any(|entry| { if let OnchainEvent::ContentiousOutpoint { ref package } = entry.event { package.contains_outpoint(outpoint) } else { false } - }) + }) { + return Some(OutpointClaimState::WaitingThresholdConf); + } + + if self.claimable_outpoints.get(outpoint).is_some() { + return Some(OutpointClaimState::ClaimingRequestRegistered); + } + + self.locktimed_packages + .values() + .flat_map(|packages| packages.iter()) + .find(|locked_package| locked_package.contains_outpoint(outpoint)) + .map(|package| { + OutpointClaimState::WaitingTimelock(package.package_locktime(cur_height)) + }) } /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty @@ -812,29 +834,30 @@ impl OnchainTxHandler { // First drop any duplicate claims. requests.retain(|req| { let outpoint = req.outpoint(); - if self.is_outpoint_spend_waiting_threshold_conf(outpoint) { - // This is a package-layer guard. ChannelMonitor filters regenerated - // HTLC claims using HTLC resolution state, while this keeps outpoints - // split from an existing package from being re-added during the reorg - // window. - log_info!(logger, "Ignoring claim for outpoint {}:{}, it is already spent by a transaction awaiting anti-reorg finality", - outpoint.txid, outpoint.vout); - false - } else if self.claimable_outpoints.get(outpoint).is_some() { - log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", - outpoint.txid, outpoint.vout); - false - } else { - let timelocked_covering_package = self.locktimed_packages.values() - .flat_map(|packages| packages.iter()) - .find(|locked_package| locked_package.contains_outpoint(outpoint)); - if let Some(package) = timelocked_covering_package { - log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", - outpoint.txid, outpoint.vout, package.package_locktime(cur_height)); - false - } else { - true + if let Some(claim_state) = self.outpoint_claim_state(outpoint, cur_height) { + match claim_state { + OutpointClaimState::WaitingThresholdConf => { + // This is a package-layer guard. ChannelMonitor filters regenerated + // HTLC claims using HTLC resolution state, while this keeps outpoints + // split from an existing package from being re-added during the reorg + // window. + log_info!(logger, "Ignoring claim for outpoint {}:{}, it is already spent by a transaction awaiting anti-reorg finality", + outpoint.txid, outpoint.vout); + false + }, + OutpointClaimState::ClaimingRequestRegistered => { + log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", + outpoint.txid, outpoint.vout); + false + }, + OutpointClaimState::WaitingTimelock(locktime) => { + log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", + outpoint.txid, outpoint.vout, locktime); + false + }, } + } else { + true } }); let mut requests = requests.into_iter() From 8f1e37bdc3d414f6098eb1a612970a6acaa54016 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 14:29:33 +0200 Subject: [PATCH 14/30] lightning: skip resolved HTLC claim replays Filter regenerated HTLC claim requests once ChannelMonitor has persisted anti-reorg finality for the commitment HTLC output spend. This keeps replayed preimage updates from recreating claims after OnchainTxHandler has cleaned up its active retry state, relying on the monitor's persisted HTLC resolution state. --- lightning/src/chain/channelmonitor.rs | 25 +++++++- lightning/src/ln/monitor_tests.rs | 84 ++++++++++++++++++++++++++- 2 files changed, 106 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 09aa863b51c..e0c56b4d537 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -4985,7 +4985,10 @@ impl ChannelMonitorImpl { .iter() .filter_map(|(htlc, _)| { if let Some(transaction_output_index) = htlc.transaction_output_index { - if htlc.offered && htlc.payment_hash == matching_payment_hash { + if htlc.offered + && htlc.payment_hash == matching_payment_hash + && !self.is_htlc_output_resolved_on_chain(htlc) + { let htlc_data = PackageSolvingData::CounterpartyOfferedHTLCOutput( CounterpartyOfferedHTLCOutput::build( per_commitment_point, @@ -5011,6 +5014,20 @@ impl ChannelMonitorImpl { .collect() } + fn is_htlc_output_resolved_on_chain(&self, htlc: &HTLCOutputInCommitment) -> bool { + if let Some(transaction_output_index) = htlc.transaction_output_index { + // Only suppress claims once the commitment HTLC output spend has + // reached anti-reorg finality. Any output created by that spend may + // still be CSV-delayed, but the original HTLC outpoint should not be + // re-claimed. + self.htlcs_resolved_on_chain.iter().any(|resolved_htlc| { + resolved_htlc.commitment_tx_output_idx == Some(transaction_output_index) + }) + } else { + false + } + } + /// Returns the HTLC claim requests and the counterparty output info. fn get_counterparty_output_claim_info( &self, funding_spent: &FundingScope, commitment_number: u64, commitment_txid: Txid, @@ -5058,6 +5075,9 @@ impl ChannelMonitorImpl { // per_commitment_data is corrupt or our commitment signing key leaked! return (claimable_outpoints, to_counterparty_output_info); } + if self.is_htlc_output_resolved_on_chain(htlc) { + continue; + } let preimage = if htlc.offered { if let Some((p, _)) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) @@ -5159,6 +5179,9 @@ impl ChannelMonitorImpl { let mut htlcs = Vec::with_capacity(holder_tx.nondust_htlcs().len()); debug_assert_eq!(holder_tx.nondust_htlcs().len(), holder_tx.counterparty_htlc_sigs.len()); for (htlc, counterparty_sig) in holder_tx.nondust_htlcs().iter().zip(holder_tx.counterparty_htlc_sigs.iter()) { + if self.is_htlc_output_resolved_on_chain(htlc) { + continue; + } assert!(htlc.transaction_output_index.is_some(), "Expected transaction output index for non-dust HTLC"); let preimage = if htlc.offered { diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 436bb01c907..111d1fbfd81 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -2458,12 +2458,15 @@ fn test_restored_packages_retry() { fn do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(p2a_anchor: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_deserialized; let mut anchors_config = test_default_channel_config(); anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; anchors_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config)]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let coinbase_tx = provide_anchor_reserves(&nodes); let (_, _, chan_id, funding_tx) = @@ -2542,11 +2545,14 @@ fn do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(p2a_anc // the delayed package's outpoints. connect_blocks(&nodes[0], TEST_FINAL_CLTV + 1); - let mut htlc_event_sizes = nodes[0] + let events = nodes[0] .chain_monitor .chain_monitor .get_and_clear_pending_events() .into_iter() + .collect::>(); + let mut htlc_event_sizes = events + .iter() .filter_map(|event| { if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { htlc_descriptors, .. @@ -2560,6 +2566,80 @@ fn do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(p2a_anc .collect::>(); htlc_event_sizes.sort_unstable(); assert_eq!(htlc_event_sizes, vec![1, 2]); + + // Drive only the replayed single-HTLC event on-chain so we can replay the + // preimage once the spend is anti-reorg final, then again after reload. + for event in events { + if let Event::BumpTransaction(event) = event { + let is_single_htlc = if let BumpTransactionEvent::HTLCResolution { + ref htlc_descriptors, + .. + } = event + { + htlc_descriptors.len() == 1 + } else { + false + }; + if is_single_htlc { + nodes[0].bump_tx_handler.handle_event(&event); + break; + } + } + } + let mut htlc_txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); + assert_eq!(htlc_txn.len(), 1); + let htlc_tx = htlc_txn.pop().unwrap(); + mine_transaction(&nodes[0], &htlc_tx); + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + + // The spend has passed anti-reorg finality, but its CSV-delayed output is + // not yet spendable. Replaying the preimage in this window must not create + // a new conflicting claim for the already-spent commitment HTLC output. + get_monitor!(nodes[0], chan_id).provide_payment_preimage_unsafe_legacy( + &claim_hash, + &claim_preimage, + &node_cfgs[0].tx_broadcaster, + &LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), + &nodes[0].logger, + ); + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + let balances = nodes[0] + .chain_monitor + .chain_monitor + .get_monitor(chan_id) + .unwrap() + .get_claimable_balances(); + assert!(balances.iter().any(|balance| matches!( + balance, + Balance::ClaimableAwaitingConfirmations { + amount_satoshis: 12_000, + source: BalanceSource::Htlc, + .. + } + ))); + + connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - ANTI_REORG_DELAY); + let _ = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); + + // Reload before replaying the preimage so the regression covers persisted + // resolution state, not only in-memory filtering. + let serialized_channel_manager = nodes[0].node.encode(); + let serialized_monitor = get_monitor!(nodes[0], chan_id).encode(); + reload_node!( + nodes[0], &serialized_channel_manager, &[&serialized_monitor], persister, + new_chain_monitor, node_deserialized + ); + + // Replaying the preimage update must not regenerate a claim for the HTLC + // whose commitment output has anti-reorg persisted resolution state. + get_monitor!(nodes[0], chan_id).provide_payment_preimage_unsafe_legacy( + &claim_hash, &claim_preimage, &node_cfgs[0].tx_broadcaster, + &LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), &nodes[0].logger, + ); + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + expect_payment_claimed!(nodes[0], claim_hash, 12_000_000); + check_added_monitors(&nodes[0], 1); } #[test] From c30eafe2044f317e787eeee3ef2789c4cdf68319 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 6 May 2026 17:20:10 +0200 Subject: [PATCH 15/30] f: log resolved HTLC preimage losses Log when a replayed preimage claim is skipped because the HTLC output reached anti-reorg finality without that preimage. --- lightning/src/chain/channelmonitor.rs | 78 ++++++++++++++++++++++----- 1 file changed, 64 insertions(+), 14 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index e0c56b4d537..7ee9df1c30f 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -3813,7 +3813,7 @@ impl ChannelMonitorImpl { // First check if a counterparty commitment transaction has been broadcasted: macro_rules! claim_htlcs { ($commitment_number: expr, $txid: expr, $htlcs: expr) => { - let htlc_claim_reqs = self.get_counterparty_output_claims_for_preimage(*payment_preimage, funding_spent, $commitment_number, $txid, $htlcs, confirmed_spend_height); + let htlc_claim_reqs = self.get_counterparty_output_claims_for_preimage(*payment_preimage, funding_spent, $commitment_number, $txid, $htlcs, confirmed_spend_height, logger); let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests( htlc_claim_reqs, self.best_block.height, self.best_block.height, broadcaster, @@ -3862,6 +3862,9 @@ impl ChannelMonitorImpl { None }; if let Some(holder_commitment_tx) = holder_commitment_tx { + self.log_holder_preimage_claim_after_htlc_resolved_on_chain( + logger, holder_commitment_tx, *payment_preimage, + ); // Assume that the broadcasted commitment transaction confirmed in the current best // block. Even if not, its a reasonable metric for the bump criteria on the HTLC // transactions. @@ -4965,11 +4968,11 @@ impl ChannelMonitorImpl { } } - fn get_counterparty_output_claims_for_preimage( + fn get_counterparty_output_claims_for_preimage( &self, preimage: PaymentPreimage, funding_spent: &FundingScope, commitment_number: u64, commitment_txid: Txid, per_commitment_option: Option<&Vec<(HTLCOutputInCommitment, Option>)>>, - confirmation_height: Option, + confirmation_height: Option, logger: &L, ) -> Vec { let per_commitment_claimable_data = match per_commitment_option { Some(outputs) => outputs, @@ -4985,10 +4988,17 @@ impl ChannelMonitorImpl { .iter() .filter_map(|(htlc, _)| { if let Some(transaction_output_index) = htlc.transaction_output_index { - if htlc.offered - && htlc.payment_hash == matching_payment_hash - && !self.is_htlc_output_resolved_on_chain(htlc) - { + if htlc.offered && htlc.payment_hash == matching_payment_hash { + if let Some(resolved_htlc) = self.htlc_output_resolution_on_chain(htlc) { + self.log_preimage_claim_after_htlc_resolved_on_chain( + logger, + commitment_txid, + htlc, + preimage, + resolved_htlc, + ); + return None; + } let htlc_data = PackageSolvingData::CounterpartyOfferedHTLCOutput( CounterpartyOfferedHTLCOutput::build( per_commitment_point, @@ -5014,17 +5024,57 @@ impl ChannelMonitorImpl { .collect() } - fn is_htlc_output_resolved_on_chain(&self, htlc: &HTLCOutputInCommitment) -> bool { - if let Some(transaction_output_index) = htlc.transaction_output_index { + fn htlc_output_resolution_on_chain( + &self, htlc: &HTLCOutputInCommitment, + ) -> Option<&IrrevocablyResolvedHTLC> { + htlc.transaction_output_index.and_then(|transaction_output_index| { // Only suppress claims once the commitment HTLC output spend has // reached anti-reorg finality. Any output created by that spend may // still be CSV-delayed, but the original HTLC outpoint should not be // re-claimed. - self.htlcs_resolved_on_chain.iter().any(|resolved_htlc| { + self.htlcs_resolved_on_chain.iter().find(|resolved_htlc| { resolved_htlc.commitment_tx_output_idx == Some(transaction_output_index) }) - } else { - false + }) + } + + fn log_preimage_claim_after_htlc_resolved_on_chain( + &self, logger: &L, commitment_txid: Txid, htlc: &HTLCOutputInCommitment, + preimage: PaymentPreimage, resolved_htlc: &IrrevocablyResolvedHTLC, + ) { + if resolved_htlc.payment_preimage == Some(preimage) { + return; + } + if let Some(transaction_output_index) = htlc.transaction_output_index { + let logger = WithContext::from(logger, None, None, Some(htlc.payment_hash)); + if let Some(resolving_txid) = resolved_htlc.resolving_txid.as_ref() { + log_error!(logger, "WE HAVE LIKELY LOST FUNDS: HTLC output {}:{} was irrevocably resolved on-chain by transaction {} without the payment preimage we now know; not replaying the claim", + commitment_txid, transaction_output_index, resolving_txid); + } else { + log_error!(logger, "WE HAVE LIKELY LOST FUNDS: HTLC output {}:{} was irrevocably resolved on-chain by an unknown transaction without the payment preimage we now know; not replaying the claim", + commitment_txid, transaction_output_index); + } + } + } + + fn log_holder_preimage_claim_after_htlc_resolved_on_chain( + &self, logger: &L, holder_tx: &HolderCommitmentTransaction, preimage: PaymentPreimage, + ) { + let matching_payment_hash = PaymentHash::from(preimage); + let tx = holder_tx.trust(); + for htlc in holder_tx.nondust_htlcs() { + if htlc.offered || htlc.payment_hash != matching_payment_hash { + continue; + } + if let Some(resolved_htlc) = self.htlc_output_resolution_on_chain(htlc) { + self.log_preimage_claim_after_htlc_resolved_on_chain( + logger, + tx.txid(), + htlc, + preimage, + resolved_htlc, + ); + } } } @@ -5075,7 +5125,7 @@ impl ChannelMonitorImpl { // per_commitment_data is corrupt or our commitment signing key leaked! return (claimable_outpoints, to_counterparty_output_info); } - if self.is_htlc_output_resolved_on_chain(htlc) { + if self.htlc_output_resolution_on_chain(htlc).is_some() { continue; } let preimage = if htlc.offered { @@ -5179,7 +5229,7 @@ impl ChannelMonitorImpl { let mut htlcs = Vec::with_capacity(holder_tx.nondust_htlcs().len()); debug_assert_eq!(holder_tx.nondust_htlcs().len(), holder_tx.counterparty_htlc_sigs.len()); for (htlc, counterparty_sig) in holder_tx.nondust_htlcs().iter().zip(holder_tx.counterparty_htlc_sigs.iter()) { - if self.is_htlc_output_resolved_on_chain(htlc) { + if self.htlc_output_resolution_on_chain(htlc).is_some() { continue; } assert!(htlc.transaction_output_index.is_some(), "Expected transaction output index for non-dust HTLC"); From 119aaa2a6085916ed221517871a0541f208d8160 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 14:33:27 +0200 Subject: [PATCH 16/30] lightning: canonicalize htlc claim ids Hash HTLC claim outpoints in canonical order so the same logical HTLC set produces the same ClaimId regardless of descriptor order. Add a unit test covering reversed descriptor order. --- lightning/src/chain/mod.rs | 64 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index d72d58b3149..5ff96e46953 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -563,10 +563,18 @@ pub struct ClaimId(pub [u8; 32]); impl ClaimId { pub(crate) fn from_htlcs(htlcs: &[HTLCDescriptor]) -> ClaimId { + let mut htlc_outpoints = htlcs + .iter() + .map(|htlc| { + (htlc.commitment_txid.to_byte_array(), htlc.htlc.transaction_output_index.unwrap()) + }) + .collect::>(); + htlc_outpoints.sort_unstable(); + let mut engine = Sha256::engine(); - for htlc in htlcs { - engine.input(&htlc.commitment_txid.to_byte_array()); - engine.input(&htlc.htlc.transaction_output_index.unwrap().to_be_bytes()); + for (commitment_txid, transaction_output_index) in htlc_outpoints { + engine.input(&commitment_txid); + engine.input(&transaction_output_index.to_be_bytes()); } ClaimId(Sha256::from_engine(engine).to_byte_array()) } @@ -581,8 +589,45 @@ impl ClaimId { #[cfg(test)] mod tests { use super::*; + use crate::ln::chan_utils::{ + ChannelTransactionParameters, HTLCOutputInCommitment, HolderCommitmentTransaction, + }; + use crate::sign::ChannelDerivationParameters; + use crate::types::payment::{PaymentHash, PaymentPreimage}; use bitcoin::hashes::Hash; + fn dummy_htlc_descriptor( + commitment_txid: Txid, transaction_output_index: u32, + ) -> HTLCDescriptor { + let channel_parameters = ChannelTransactionParameters::test_dummy(100_000); + let htlc = HTLCOutputInCommitment { + offered: true, + amount_msat: 1000, + cltv_expiry: 100, + payment_hash: PaymentHash::from(PaymentPreimage([1; 32])), + transaction_output_index: Some(transaction_output_index), + }; + let funding_outpoint = channel_parameters.funding_outpoint.unwrap(); + let commitment_tx = + HolderCommitmentTransaction::dummy(100_000, funding_outpoint, vec![htlc.clone()]); + let trusted_tx = commitment_tx.trust(); + + HTLCDescriptor { + channel_derivation_parameters: ChannelDerivationParameters { + value_satoshis: channel_parameters.channel_value_satoshis, + keys_id: [1; 32], + transaction_parameters: channel_parameters, + }, + commitment_txid, + per_commitment_number: trusted_tx.commitment_number(), + per_commitment_point: trusted_tx.per_commitment_point(), + feerate_per_kw: trusted_tx.negotiated_feerate_per_kw(), + htlc, + preimage: None, + counterparty_sig: commitment_tx.counterparty_htlc_sigs[0], + } + } + #[test] fn test_best_block() { let hash1 = BlockHash::from_slice(&[1; 32]).unwrap(); @@ -618,4 +663,17 @@ mod tests { let chain_c = BlockLocator::new(hash_other, 200); assert_eq!(chain_a.find_common_ancestor(&chain_c), None); } + + #[test] + fn test_htlc_claim_id_is_descriptor_order_independent() { + // Use opposite txid and vout ordering so the assertion would fail if + // ClaimId still hashed descriptors in caller-provided order. + let first = dummy_htlc_descriptor(Txid::from_slice(&[1; 32]).unwrap(), 2); + let second = dummy_htlc_descriptor(Txid::from_slice(&[2; 32]).unwrap(), 1); + + assert_eq!( + ClaimId::from_htlcs(&[first.clone(), second.clone()]), + ClaimId::from_htlcs(&[second, first]) + ); + } } From f919de5f6ccf0bd401c071e453dc366b336419db Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 10:27:34 +0200 Subject: [PATCH 17/30] [dropme] testcases and docs Collect local notes and fuzz inputs used while investigating the force-close chanmon_consistency work. These files are marked dropme so they can aid review and reduction without becoming part of the final patch set. --- fc-crashes.md | 162 +++++++++ fuzz/.gitignore | 1 + fuzz/FC-INFO.md | 107 ++++++ fuzz/ONCHAINTX-BUGS.md | 327 ++++++++++++++++++ fuzz/OPEN-ISSUES.md | 39 +++ fuzz/test_cases/base32/smoke | 1 - fuzz/test_cases/bech32_parse/smoke | 1 - ...h-02830a6ff7757f3570924b0c0fd9118a7cdd9770 | Bin 0 -> 24 bytes ...h-0473b0e767d9a98de62538ce5afcbbc2e6ec5af2 | 1 + ...h-05e175d40f60b823f730fa874d98dc10dd2bb6ad | 1 + ...h-07bdc4e56ee67bd2ffa409f76529199d748ab2d8 | 1 + ...h-096cc3008264dccaefb945f5a4b7a2d3c9f8e90c | 1 + ...h-09a17e06913dea74dba796940cec86cb4e2dd597 | 1 + ...h-09f5a41270b07f70a031884cbdfd081e8600923e | Bin 0 -> 22 bytes ...h-0b87d8b430697fe9d1781a38f41a68ebcf7b18c1 | 1 + ...h-0c3334736f5c55e44088d6140580354827026732 | 1 + ...h-0dcddb7aa2b729fa8de829e5ea82c38b5918acfa | 1 + ...h-0f0ca42c8b4c815495919663652db18483d5e846 | 1 + ...h-14a022e3e4d88420a08bc4c2d67193f74e4f8bdd | 1 + ...h-15b45517356c182051c2b334e09c00f4f9368e94 | 1 + ...h-18062bd37528e06c4921e7ef7df2b2c3e676823b | 1 + ...h-22125d8a200205d52723ec232f5aab710856f4b0 | Bin 0 -> 22 bytes ...h-228ea00412a2fab1e866fc6df32ffd00bbfe81ad | Bin 0 -> 24 bytes ...h-242de208110143401fcf4e1ebaa7d9d38fb93611 | 1 + ...h-24f1373b1cf51f95af854d6d8730336b77728007 | 1 + ...h-2923c14608fb259c21862cd71ffeb6ac74b0ba32 | 1 + ...h-2a0852bec1d75334538dacec26831db6995b6e33 | 1 + ...h-2d93541536e19c030d95d236e6be545352d98b80 | 1 + ...h-2e002fcfdc76c5981f5f93c0f842b548fb56c7a7 | 1 + ...h-2fad50c7fd20b250f0349887445af198124900df | Bin 0 -> 23 bytes ...h-2fcd63b2ed709dfcd9c6a08dc673d1f896b6cdad | Bin 0 -> 26 bytes ...h-304db9c93d320420bdef656699ad1f49c37feaf7 | 1 + ...h-315119ea09b9febec156d212fe57020def4b5af4 | Bin 0 -> 25 bytes ...h-32a013d8bd38f3ba39d4a214ba0780edd41ccb85 | Bin 0 -> 23 bytes ...h-33c08a8f15f1c842df5da4fc92228d00606573f9 | 1 + ...h-33e77c2f720493e306bbfea79f151388ca7a04ea | Bin 0 -> 24 bytes ...h-37a18356d608c97415c0a1bef6a0f13fe04c8b97 | Bin 0 -> 22 bytes ...h-380ee6f8c1030828f4d80582154b0418fca58c90 | Bin 0 -> 20 bytes ...h-38192a6cb0500969f301c7a6742949ecd213bfae | 1 + ...h-387c18b4c7235aa1960400de5b0d5798202ec3b1 | 1 + ...h-3bb94b7b4397397caa5eb0e9ba6abb9a18028270 | Bin 0 -> 23 bytes ...h-3be4d9d7a75c8459b3ec349474c7fc206b00fe9c | Bin 0 -> 26 bytes ...h-3cda5b606ce05f4207207e8fd1480fe530a51b13 | 1 + ...h-3f8a6e5b806235b795ebea3d6998943a3ab6ff9d | Bin 0 -> 22 bytes ...h-41ffe016736ddfef0eb1d877b35a0c85bd5cfd5f | 1 + ...h-45240f379a3a24948c4b091fd658a9f0ef4d4963 | 1 + ...h-45872f91e28e4ed1e8814084bbf5ada6fe4963f0 | 1 + ...h-49e1240588c1b4507b24c4f07dae75faef02a639 | 1 + ...h-4da789d875488d8f244bccefaff4295ae801c745 | Bin 0 -> 21 bytes ...h-4e4b47b5a0f4c4689868a3003ae7d62e5ac78484 | 1 + ...h-53d6404dc8dee21adf112f3c909459f67e176301 | Bin 0 -> 24 bytes ...h-544eff2c026e0464aff1a9afaa4acd2912e93267 | 1 + ...h-54a3422e8e1c578813d5cfce1f8b732040fc668e | 1 + ...h-55fd3e4e7c2506a9ce067b0e0a468161db22dec0 | Bin 0 -> 20 bytes ...h-56271abf5206dd39ac1a1035d49d41f61ee0606e | 1 + ...h-5be7542ec7a98b835a2c3dca63e3d89a76050fe6 | 3 + ...h-5d2ca379ca5dabcbfae13c3eca104e48a4bf94c9 | 1 + ...h-63164e99d1a0561c352ea11be619b8505a83ceb4 | Bin 0 -> 20 bytes ...h-6aec66d5104839013b44f977a01915c29f2e6795 | 1 + ...h-6af2409d5c331f44f76e165e735cd2e9104aed9e | 1 + ...h-6b5c5549ee7ed6e7fcf9613d62c295fd65d100ce | 1 + ...h-6bd8c4ea12175b25bb1d239699622ba5485248cf | 1 + ...h-6bda1f46384cf85ae2d9ca8048619963a9416ddc | 1 + ...h-767cf8ac05cf878f93f55fe21f96a9e76b28c5f9 | 1 + ...h-7776698efb54442fa8170cb39b7c7bf72e515335 | 1 + ...h-78202f87ee8c211227082479a8bd67cd1e7f16e5 | Bin 0 -> 8 bytes ...h-79790f24a47ad8f39398df48800b946cd85fc3fe | 1 + ...h-7ab7fa1fb4303a91c57ec241fefdf5826d2b52aa | Bin 0 -> 26 bytes ...h-7b7826cea32794a2ab2c245cd3dc024355b07c78 | Bin 0 -> 16 bytes ...h-7c72226eeba2eb5192d9b7adfead405d3b93fdf9 | Bin 0 -> 24 bytes ...h-7cb0cf9df154821deb68a78001ce9c0e27f97b0a | 1 + ...h-815718bf6e59d981220f037f7509c9cfe5401485 | 1 + ...h-8453a4a3cf9dd9f60e5aa40fdce440b69f62869d | 1 + ...h-86ee8ae4c13784d3d750f6d4b970ec0852ea2bc3 | 1 + ...h-87093ec5446a84482f5a728fc65a51a15b6de843 | Bin 0 -> 27 bytes ...h-87f98b753291bd37f92795d32e2df4c3597dd6dd | Bin 0 -> 25 bytes ...h-8ab54f3642a60a239a7bb787838f3e5a6b6f4f41 | 1 + ...h-8ec6798103af6cedfdec68373991c0c0a73e3770 | 1 + ...h-8f5cc4f6de42f52dcb571b6c0f21df957eb25462 | Bin 0 -> 27 bytes ...h-8fb6d213ac7d14f6c62c09e7baf392f01e8688d0 | 1 + ...h-90c560825e852e3dfb64e09d6764b85cf9f7689d | 1 + ...h-91d8898837e425d607ef36ed73fa364b0fa58121 | Bin 0 -> 17 bytes ...h-91ebb8583ed7705e2601334e52428ea5eb80a681 | Bin 0 -> 23 bytes ...h-93c44c96a5c5e1d4532370b2c77bb372170bd59b | 1 + ...h-9c69d63a708c0a83d2d1fa60577a1a9270924ff2 | 1 + ...h-9c84f405725b7c171338f776b7ac7f3a3b010f34 | Bin 0 -> 17 bytes ...h-a235e98ab95f66315cef361c49eea5483ce2d91a | 1 + ...h-a6628891c34498ca2cb4122c2ee66fe4ba6cd01d | Bin 0 -> 26 bytes ...h-a8f59ca92bcc53e042fd759493c67a35f308721a | 1 + ...h-ace48b23767637be15eb3763e88170f7aab17cd4 | Bin 0 -> 23 bytes ...h-adf5f907d4bc584e6348b7188532f6fc08cda464 | 1 + ...h-af7499de68300f3346be7b69ff913c8da2394d23 | 1 + ...h-b2e70396bda55d716c022a683df49d72e28b5cae | Bin 0 -> 24 bytes ...h-b5aed7ccaeacb4347cc7599a258e28e1ccf3855b | 1 + ...h-ba5dd0ee55c764b2ae71543e95fd63c496d924bd | 1 + ...h-baa2cd71d1e22c966f3c2ddc44cf5b297da5d671 | 1 + ...h-c1fe932fa21c4382ba71ec745790386f010b939c | 1 + ...h-c29e58a510e698fc8205e4896a938adb92424105 | 1 + ...h-c7b166535d5d3591604aeb239b01592f24fff27b | 1 + ...h-cb39e58d20b35ceb4ecd9fc8dd91272e308f11a1 | 1 + ...h-cc144c9fa2f889e3c3665b1e7c870ddd41cb3e15 | 1 + ...h-cec678efd9c2c03dccf92f62c20e9520566d130f | Bin 0 -> 25 bytes ...h-cedac69cfff63a360470d6f051164b149f74bc18 | Bin 0 -> 25 bytes ...h-cf44c3acf507cae6fd00e0bf331d18536c551ce1 | 1 + ...h-d09e9319d459f21b180f1c730fbf4e89840bd6c5 | Bin 0 -> 20 bytes ...h-d11e5e5259e57e32f120f0d005bc52aead73d099 | 1 + ...h-d3ee0bad80fbd14f1f62903fc6d23f26ed5eb405 | 1 + ...h-d5124444b5e39d9a67c395e6325d340fff97a159 | 1 + ...h-d87da6cc047b35d69808787157394a0ac7c9ff92 | Bin 0 -> 26 bytes ...h-d91352ebdfa46f3734403e7e041bf0faa559e97a | 1 + ...h-d9affe3db851b50c3b1186ff86f97710cfd115b0 | 1 + ...h-db2606af8c9a718bd0da6a6e03c51fd4c84909cd | Bin 0 -> 17 bytes ...h-dbf141642a66403570204baf8a310783885e081d | 1 + ...h-dd67d75d834201769b29d89a5243fdae7f6d8ad1 | 1 + ...h-df426fe2abe15519a7ad994034bd2711f26f80af | 1 + ...h-ea07f1a57bd66e8a0b48347a45f12a4e48fa4b02 | Bin 0 -> 25 bytes ...h-efc04dc2a68b17479ad445cce2b84a91a7d3e9b9 | 1 + ...h-f2e76e1926cc2604f35de1316e48cb7c8e2aee65 | 1 + ...h-f4567ec41df8f30f9c0975e2b9cb3bed9278df8c | Bin 0 -> 26 bytes ...h-f804080d84b3bfc7adfe563ad1ac9013733983f6 | Bin 0 -> 24 bytes ...h-f995b58793f0e17361d409df7ddb99d7c14873cd | 1 + ...h-fd80c35839107ef932a09d1fd63e34d2a6cd6451 | 1 + ...h-fda69e901e92ce81134859dfbd53ceec84393aeb | 1 + .../fc_advance_before_drain | Bin 0 -> 9 bytes .../fc_advance_before_drain_keyed_anchors | 1 + ..._advance_before_drain_zero_fee_commitments | 1 + .../fc_after_claim_before_forward | Bin 0 -> 18 bytes ...c_after_claim_before_forward_keyed_anchors | 1 + ..._claim_before_forward_zero_fee_commitments | 1 + .../chanmon_consistency/fc_after_disconnect | Bin 0 -> 10 bytes .../fc_after_disconnect_keyed_anchors | 1 + .../fc_after_disconnect_zero_fee_commitments | 1 + .../chanmon_consistency/fc_after_fee_update | Bin 0 -> 11 bytes .../fc_after_fee_update_keyed_anchors | 1 + .../fc_after_fee_update_zero_fee_commitments | 1 + .../chanmon_consistency/fc_after_timer_ticks | Bin 0 -> 13 bytes .../fc_after_timer_ticks_keyed_anchors | 1 + .../fc_after_timer_ticks_zero_fee_commitments | 1 + .../chanmon_consistency/fc_all_channels | Bin 0 -> 12 bytes .../fc_all_channels_keyed_anchors | 1 + .../fc_all_channels_zero_fee_commitments | 1 + .../fc_async_complete_after | 2 + .../fc_async_complete_after_keyed_anchors | 2 + ..._async_complete_after_zero_fee_commitments | 2 + .../fc_async_hop_middle_closes | 4 + .../fc_async_hop_middle_closes_keyed_anchors | 4 + ...ync_hop_middle_closes_zero_fee_commitments | 4 + .../chanmon_consistency/fc_async_many_pays | 2 + .../fc_async_many_pays_keyed_anchors | 2 + .../fc_async_many_pays_zero_fee_commitments | 2 + .../chanmon_consistency/fc_async_no_complete | 1 + .../fc_async_no_complete_keyed_anchors | 1 + .../fc_async_no_complete_zero_fee_commitments | 1 + .../fc_async_pending_never_complete | 1 + ...async_pending_never_complete_keyed_anchors | 1 + ...ending_never_complete_zero_fee_commitments | 1 + .../chanmon_consistency/fc_async_restart | 1 + .../fc_async_restart_keyed_anchors | 1 + .../fc_async_restart_zero_fee_commitments | 1 + .../chanmon_consistency/fc_b_closes_both | Bin 0 -> 10 bytes .../fc_b_closes_both_hop_inflight | Bin 0 -> 17 bytes ...c_b_closes_both_hop_inflight_keyed_anchors | 1 + ...ses_both_hop_inflight_zero_fee_commitments | 1 + .../fc_b_closes_both_keyed_anchors | 1 + .../fc_b_closes_both_zero_fee_commitments | 1 + .../fc_bc_during_hop_ab_only | Bin 0 -> 12 bytes .../fc_bc_during_hop_ab_only_keyed_anchors | 1 + ...bc_during_hop_ab_only_zero_fee_commitments | 1 + .../chanmon_consistency/fc_bc_while_ab_htlc | Bin 0 -> 15 bytes .../fc_bc_while_ab_htlc_keyed_anchors | 1 + .../fc_bc_while_ab_htlc_zero_fee_commitments | 1 + .../chanmon_consistency/fc_bidir_htlcs | Bin 0 -> 15 bytes .../fc_bidir_htlcs_keyed_anchors | 1 + .../fc_bidir_htlcs_zero_fee_commitments | 1 + .../fc_both_sides_same_chan | Bin 0 -> 10 bytes .../fc_both_sides_same_chan_keyed_anchors | 1 + ..._both_sides_same_chan_zero_fee_commitments | 1 + .../fc_bump_htlc_p2wpkh_fee_estimate | 1 + .../fc_c_initiates_b_restart | Bin 0 -> 10 bytes .../fc_c_initiates_b_restart_keyed_anchors | 1 + ...c_initiates_b_restart_zero_fee_commitments | 1 + .../chanmon_consistency/fc_cascade_c_then_b | Bin 0 -> 14 bytes .../fc_cascade_c_then_b_keyed_anchors | 1 + .../fc_cascade_c_then_b_zero_fee_commitments | 1 + .../fc_claimable_on_close_needs_confirmation | 1 + .../fc_claimed_dust_htlc_sender_fails | Bin 0 -> 8 bytes .../fc_claimed_mpp_dust_path_still_succeeds | 1 + .../fc_claimed_payment_sender_completion | 1 + .../fc_close_then_disconnect_all | Bin 0 -> 12 bytes ...fc_close_then_disconnect_all_keyed_anchors | 1 + ...e_then_disconnect_all_zero_fee_commitments | 1 + ..._retire_old_snapshots_zero_fee_commitments | Bin 0 -> 8 bytes .../fc_completed_update_retires_old_snapshot | Bin 0 -> 8 bytes ...tious_claim_stuck_after_force_close_218996 | 1 + ...tious_claim_stuck_after_force_close_36a22e | 1 + ...tious_claim_stuck_after_force_close_d7793e | 1 + .../chanmon_consistency/fc_direct_pay_claimed | Bin 0 -> 16 bytes .../fc_direct_pay_claimed_keyed_anchors | 1 + ...fc_direct_pay_claimed_zero_fee_commitments | 1 + .../chanmon_consistency/fc_disabled_signers | Bin 0 -> 13 bytes .../fc_disabled_signers_keyed_anchors | 1 + .../fc_disabled_signers_zero_fee_commitments | 1 + .../fc_disconnect_close_reconnect | Bin 0 -> 13 bytes ...c_disconnect_close_reconnect_keyed_anchors | 1 + ...nnect_close_reconnect_zero_fee_commitments | 1 + .../fc_disconnect_drain_reconnect | Bin 0 -> 13 bytes ...c_disconnect_drain_reconnect_keyed_anchors | 1 + ...nnect_drain_reconnect_zero_fee_commitments | 1 + .../chanmon_consistency/fc_drain_a_only | Bin 0 -> 7 bytes .../fc_drain_a_only_keyed_anchors | 1 + .../fc_drain_a_only_zero_fee_commitments | 1 + ...cate_pending_claim_event_after_force_close | 1 + ...ent_after_force_close_zero_fee_commitments | 1 + ...ing_claim_request_after_force_close_39b47f | 1 + ...ing_claim_request_after_force_close_ed278d | 1 + .../chanmon_consistency/fc_during_reconnect | Bin 0 -> 13 bytes .../fc_during_reconnect_keyed_anchors | 1 + .../fc_during_reconnect_zero_fee_commitments | 1 + .../chanmon_consistency/fc_dust_htlcs | Bin 0 -> 19 bytes .../fc_dust_htlcs_keyed_anchors | 1 + .../fc_dust_htlcs_zero_fee_commitments | 1 + ...t_path_claim_expected_fail_but_sent_5099d3 | 1 + ...t_path_claim_expected_fail_but_sent_595140 | 1 + ...t_path_claim_expected_fail_but_sent_7a4062 | 1 + ...t_path_claim_expected_fail_but_sent_9d7311 | 1 + ...t_path_claim_expected_fail_but_sent_b1281e | 1 + ...t_path_claim_expected_fail_but_sent_bf210c | 1 + .../fc_events_between_drains | Bin 0 -> 15 bytes .../fc_events_between_drains_keyed_anchors | 1 + ...events_between_drains_zero_fee_commitments | 1 + .../chanmon_consistency/fc_events_only | Bin 0 -> 12 bytes .../fc_events_only_keyed_anchors | 1 + .../fc_events_only_zero_fee_commitments | 1 + .../chanmon_consistency/fc_exact_cltv_height | Bin 0 -> 15 bytes .../fc_exact_cltv_height_keyed_anchors | 1 + .../fc_exact_cltv_height_zero_fee_commitments | 1 + .../chanmon_consistency/fc_hop_b_has_preimage | Bin 0 -> 20 bytes .../fc_hop_b_has_preimage_keyed_anchors | 1 + ...fc_hop_b_has_preimage_zero_fee_commitments | 1 + .../fc_hop_before_bc_commit | Bin 0 -> 13 bytes .../fc_hop_before_bc_commit_keyed_anchors | 1 + ..._hop_before_bc_commit_zero_fee_commitments | 1 + .../chanmon_consistency/fc_hop_mid_flight | Bin 0 -> 16 bytes .../fc_hop_mid_flight_keyed_anchors | 1 + .../fc_hop_mid_flight_zero_fee_commitments | 1 + .../chanmon_consistency/fc_htlc_late_signer | Bin 0 -> 17 bytes .../fc_htlc_late_signer_keyed_anchors | 1 + .../fc_htlc_late_signer_zero_fee_commitments | 1 + .../chanmon_consistency/fc_immediate_settle | Bin 0 -> 3 bytes .../fc_immediate_settle_keyed_anchors | 1 + .../fc_immediate_settle_zero_fee_commitments | 1 + .../fc_inprogress_monitors | Bin 0 -> 11 bytes .../fc_inprogress_monitors_keyed_anchors | Bin 0 -> 11 bytes ...c_inprogress_monitors_zero_fee_commitments | Bin 0 -> 11 bytes .../fc_interleaved_channels | Bin 0 -> 17 bytes .../fc_interleaved_channels_keyed_anchors | 1 + ..._interleaved_channels_zero_fee_commitments | 1 + .../fc_large_payment_resolve | Bin 0 -> 16 bytes .../fc_large_payment_resolve_keyed_anchors | 1 + ...large_payment_resolve_zero_fee_commitments | 1 + .../chanmon_consistency/fc_many_htlcs | Bin 0 -> 15 bytes .../fc_many_htlcs_keyed_anchors | 1 + .../fc_many_htlcs_zero_fee_commitments | 1 + .../fc_mid_fulfill_propagation | Bin 0 -> 21 bytes .../fc_mid_fulfill_propagation_keyed_anchors | 1 + ...d_fulfill_propagation_zero_fee_commitments | 1 + ..._monitor_update_replay_out_of_order_dcbc86 | 1 + .../chanmon_consistency/fc_msgs_before_drain | Bin 0 -> 13 bytes .../fc_msgs_before_drain_keyed_anchors | 1 + .../fc_msgs_before_drain_zero_fee_commitments | 1 + .../chanmon_consistency/fc_multi_drain_rounds | Bin 0 -> 16 bytes .../fc_multi_drain_rounds_keyed_anchors | 1 + ...fc_multi_drain_rounds_zero_fee_commitments | 1 + .../chanmon_consistency/fc_no_settle | Bin 0 -> 3 bytes .../fc_no_settle_keyed_anchors | 1 + .../fc_no_settle_zero_fee_commitments | 1 + .../chanmon_consistency/fc_node_restart | Bin 0 -> 10 bytes .../fc_node_restart_keyed_anchors | 1 + .../fc_node_restart_zero_fee_commitments | 1 + .../chanmon_consistency/fc_one_msg_at_a_time | Bin 0 -> 13 bytes .../fc_one_msg_at_a_time_keyed_anchors | 1 + .../fc_one_msg_at_a_time_zero_fee_commitments | 1 + .../fc_pay_claim_close_pay | Bin 0 -> 17 bytes .../fc_pay_claim_close_pay_keyed_anchors | 1 + ...c_pay_claim_close_pay_zero_fee_commitments | 1 + ..._observable_lifecycle_zero_fee_commitments | 1 + .../chanmon_consistency/fc_pending_monitor | Bin 0 -> 15 bytes .../fc_pending_monitor_keyed_anchors | Bin 0 -> 15 bytes .../fc_pending_monitor_zero_fee_commitments | Bin 0 -> 15 bytes ...le_probe_skips_zero_outbound_limit_channel | 1 + ...ettle_probe_uses_advertised_sendable_range | 2 + .../chanmon_consistency/fc_rapid_fire | Bin 0 -> 12 bytes .../fc_rapid_fire_keyed_anchors | 1 + .../fc_rapid_fire_zero_fee_commitments | 1 + .../chanmon_consistency/fc_reconnect | Bin 0 -> 13 bytes .../fc_reconnect_broadcast_announcements | 1 + .../fc_reconnect_keyed_anchors | 1 + .../fc_reconnect_zero_fee_commitments | 1 + .../fc_repeated_same_channel | Bin 0 -> 11 bytes .../fc_repeated_same_channel_keyed_anchors | 1 + ...repeated_same_channel_zero_fee_commitments | 1 + ...start_claimed_payment_stale_monitor_replay | 1 + .../fc_restart_in_progress_chain_sync_replay | 1 + .../fc_restart_mid_resolve | Bin 0 -> 10 bytes .../fc_restart_mid_resolve_keyed_anchors | 1 + ...c_restart_mid_resolve_zero_fee_commitments | 1 + .../fc_restart_then_counterparty_closes | Bin 0 -> 10 bytes ...art_then_counterparty_closes_keyed_anchors | 1 + ...n_counterparty_closes_zero_fee_commitments | 1 + .../chanmon_consistency/fc_reverse_hop | Bin 0 -> 16 bytes .../fc_reverse_hop_keyed_anchors | 1 + .../fc_reverse_hop_zero_fee_commitments | 1 + .../fc_signer_disabled_holder | Bin 0 -> 14 bytes .../fc_signer_disabled_holder_keyed_anchors | 1 + ...igner_disabled_holder_zero_fee_commitments | 1 + .../fc_stale_monitor_restart | Bin 0 -> 13 bytes .../fc_stale_monitor_restart_keyed_anchors | 1 + ...stale_monitor_restart_zero_fee_commitments | 1 + .../chanmon_consistency/fc_sync_one_block | Bin 0 -> 14 bytes .../fc_sync_one_block_keyed_anchors | 1 + .../fc_sync_one_block_zero_fee_commitments | 1 + .../chanmon_consistency/fc_sync_to_tip | Bin 0 -> 7 bytes .../fc_sync_to_tip_keyed_anchors | 1 + .../fc_sync_to_tip_zero_fee_commitments | 1 + .../chanmon_consistency/fc_then_send | Bin 0 -> 10 bytes .../fc_then_send_keyed_anchors | 1 + .../fc_then_send_zero_fee_commitments | 1 + .../chanmon_consistency/fc_timer_tick_after | Bin 0 -> 15 bytes .../fc_timer_tick_after_keyed_anchors | 1 + .../fc_timer_tick_after_zero_fee_commitments | 1 + .../fc_unclaimed_mpp_timeout_variant_a | 1 + .../fc_unclaimed_mpp_timeout_variant_b | 1 + .../chanmon_consistency/force_close_basic | Bin 0 -> 9 bytes .../force_close_basic_async | 1 + .../force_close_basic_async_keyed_anchors | 1 + ...rce_close_basic_async_zero_fee_commitments | 1 + .../force_close_basic_keyed_anchors | 1 + .../force_close_basic_zero_fee_commitments | 1 + .../force_close_both_directions | Bin 0 -> 10 bytes .../force_close_both_directions_async | 1 + ..._close_both_directions_async_keyed_anchors | 1 + ...both_directions_async_zero_fee_commitments | 1 + .../force_close_both_directions_keyed_anchors | 1 + ...close_both_directions_zero_fee_commitments | 1 + .../force_close_htlc_needs_height | Bin 0 -> 11 bytes .../force_close_htlc_needs_height_async | 1 + ...lose_htlc_needs_height_async_keyed_anchors | 1 + ...lc_needs_height_async_zero_fee_commitments | 1 + ...orce_close_htlc_needs_height_keyed_anchors | 1 + ...ose_htlc_needs_height_zero_fee_commitments | 1 + .../force_close_htlc_resolved | Bin 0 -> 11 bytes .../force_close_htlc_resolved_async | 1 + ...ce_close_htlc_resolved_async_keyed_anchors | 1 + ...e_htlc_resolved_async_zero_fee_commitments | 1 + .../force_close_htlc_resolved_keyed_anchors | 1 + ...e_close_htlc_resolved_zero_fee_commitments | 1 + .../force_close_middle_node | Bin 0 -> 9 bytes .../force_close_middle_node_async | 1 + ...orce_close_middle_node_async_keyed_anchors | 1 + ...ose_middle_node_async_zero_fee_commitments | 1 + .../force_close_middle_node_keyed_anchors | 1 + ...rce_close_middle_node_zero_fee_commitments | 1 + .../force_close_no_confirm | Bin 0 -> 2 bytes .../force_close_no_confirm_async | 1 + ...force_close_no_confirm_async_keyed_anchors | 1 + ...lose_no_confirm_async_zero_fee_commitments | 1 + .../force_close_no_confirm_keyed_anchors | 1 + ...orce_close_no_confirm_zero_fee_commitments | 1 + .../force_close_three_node_preimage | Bin 0 -> 14 bytes .../force_close_three_node_preimage_async | 4 + ...se_three_node_preimage_async_keyed_anchors | 4 + ...e_node_preimage_async_zero_fee_commitments | 4 + ...ce_close_three_node_preimage_keyed_anchors | 1 + ...e_three_node_preimage_zero_fee_commitments | 1 + .../ldk_crash_channelmanager_19484 | 1 + .../ldk_crash_channelmanager_9836 | Bin 0 -> 13 bytes .../ldk_crash_channelmonitor_2727 | 1 + .../ldk_crash_onchaintx_1025 | Bin 0 -> 14 bytes .../ldk_crash_onchaintx_913 | 1 + .../chanmon_consistency/ldk_crash_signer_395 | 1 + ...t-0103befb3dc5aa050668752668d04e85bd1fc14e | Bin 0 -> 24 bytes ...t-05fc1bb98f2a3b29e826a4de636474de0b23c895 | Bin 0 -> 25 bytes ...t-2bd72986b31d87f9260cf627e63971b5b8310a60 | Bin 0 -> 22 bytes ...t-3d9c399d0e2d915375da243fb57023df803a5dc6 | 1 + ...t-475eb92f6d72aed80ce7cdaed4181b99b11b2fcd | Bin 0 -> 26 bytes ...t-4c5cc7debdfdf2569e21b13b21c4270a9b558267 | Bin 0 -> 20 bytes ...t-505b331015cbe51169de31e09acc6d8330c8e385 | 2 + ...t-593eb3357e98be529b0ef35f21577ef6eede171b | Bin 0 -> 18 bytes ...t-6ee72dbba68dbca58038d2f9b8525e4d0df25f94 | 1 + ...t-76a82aa89161d0428192e725650324a74a710dca | 1 + ...t-7a7ea04ead9439ad7db2eefb23f6e242d547e459 | Bin 0 -> 25 bytes ...t-8250da1564cda1a1dde38a431859afab8ac2934d | Bin 0 -> 23 bytes ...t-838b7d436a92ae2a68aa9ad9badd88cbf96b407b | Bin 0 -> 24 bytes ...t-87b55c5b37383fe43420089fd3e8ccecbb034b44 | 1 + ...t-885f446335ae279baed408d42af8c398dfdb8c9b | 1 + ...t-8a81e4c066465a2975ef22625c0b91da6332a2c8 | Bin 0 -> 24 bytes ...t-8f2aebf3aeeb70d8edd39a886e30beb770f3b42b | Bin 0 -> 23 bytes ...t-91d97d9eea2bd59f746681ad822488262e832ff1 | Bin 0 -> 26 bytes ...t-95a90908391d3398084b77eb11ff5c9d7fdde008 | Bin 0 -> 24 bytes ...t-a31cdfc423211489c841a6ddd067f9e6cf5bed4b | Bin 0 -> 21 bytes ...t-b1c4840ea1279dd8d6080d79373ae55bbcad3061 | Bin 0 -> 25 bytes ...t-b6ef84eec94d70bbc385c98c4ab0bac77da00a2f | 1 + ...t-bae8693182b102dfebab143a0f48992dad76245d | Bin 0 -> 23 bytes ...t-bcab049322729e275e3bbdacebc633495da7643f | Bin 0 -> 26 bytes ...t-d5afdff02a253c9f2fbce95cbaf730eb210128fa | 1 + ...t-d6494f068fb2b2d31f1ac8627752692b3c8b7d2f | 1 + ...t-edd3f8168217501dd93f3c24d09c2c095cdf7784 | Bin 0 -> 14 bytes ...t-fcbcc131184e33d5b000820b0972f6197b0801d2 | 1 + 408 files changed, 938 insertions(+), 2 deletions(-) create mode 100644 fc-crashes.md create mode 100644 fuzz/FC-INFO.md create mode 100644 fuzz/ONCHAINTX-BUGS.md create mode 100644 fuzz/OPEN-ISSUES.md delete mode 100644 fuzz/test_cases/base32/smoke delete mode 100644 fuzz/test_cases/bech32_parse/smoke create mode 100644 fuzz/test_cases/chanmon_consistency/crash-02830a6ff7757f3570924b0c0fd9118a7cdd9770 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-0473b0e767d9a98de62538ce5afcbbc2e6ec5af2 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-05e175d40f60b823f730fa874d98dc10dd2bb6ad create mode 100644 fuzz/test_cases/chanmon_consistency/crash-07bdc4e56ee67bd2ffa409f76529199d748ab2d8 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-096cc3008264dccaefb945f5a4b7a2d3c9f8e90c create mode 100644 fuzz/test_cases/chanmon_consistency/crash-09a17e06913dea74dba796940cec86cb4e2dd597 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-09f5a41270b07f70a031884cbdfd081e8600923e create mode 100644 fuzz/test_cases/chanmon_consistency/crash-0b87d8b430697fe9d1781a38f41a68ebcf7b18c1 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-0c3334736f5c55e44088d6140580354827026732 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-0dcddb7aa2b729fa8de829e5ea82c38b5918acfa create mode 100644 fuzz/test_cases/chanmon_consistency/crash-0f0ca42c8b4c815495919663652db18483d5e846 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-14a022e3e4d88420a08bc4c2d67193f74e4f8bdd create mode 100644 fuzz/test_cases/chanmon_consistency/crash-15b45517356c182051c2b334e09c00f4f9368e94 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-18062bd37528e06c4921e7ef7df2b2c3e676823b create mode 100644 fuzz/test_cases/chanmon_consistency/crash-22125d8a200205d52723ec232f5aab710856f4b0 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-228ea00412a2fab1e866fc6df32ffd00bbfe81ad create mode 100644 fuzz/test_cases/chanmon_consistency/crash-242de208110143401fcf4e1ebaa7d9d38fb93611 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-24f1373b1cf51f95af854d6d8730336b77728007 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-2923c14608fb259c21862cd71ffeb6ac74b0ba32 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-2a0852bec1d75334538dacec26831db6995b6e33 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-2d93541536e19c030d95d236e6be545352d98b80 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-2e002fcfdc76c5981f5f93c0f842b548fb56c7a7 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-2fad50c7fd20b250f0349887445af198124900df create mode 100644 fuzz/test_cases/chanmon_consistency/crash-2fcd63b2ed709dfcd9c6a08dc673d1f896b6cdad create mode 100644 fuzz/test_cases/chanmon_consistency/crash-304db9c93d320420bdef656699ad1f49c37feaf7 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-315119ea09b9febec156d212fe57020def4b5af4 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-32a013d8bd38f3ba39d4a214ba0780edd41ccb85 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-33c08a8f15f1c842df5da4fc92228d00606573f9 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-33e77c2f720493e306bbfea79f151388ca7a04ea create mode 100644 fuzz/test_cases/chanmon_consistency/crash-37a18356d608c97415c0a1bef6a0f13fe04c8b97 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-380ee6f8c1030828f4d80582154b0418fca58c90 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-38192a6cb0500969f301c7a6742949ecd213bfae create mode 100644 fuzz/test_cases/chanmon_consistency/crash-387c18b4c7235aa1960400de5b0d5798202ec3b1 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-3bb94b7b4397397caa5eb0e9ba6abb9a18028270 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-3be4d9d7a75c8459b3ec349474c7fc206b00fe9c create mode 100644 fuzz/test_cases/chanmon_consistency/crash-3cda5b606ce05f4207207e8fd1480fe530a51b13 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-3f8a6e5b806235b795ebea3d6998943a3ab6ff9d create mode 100644 fuzz/test_cases/chanmon_consistency/crash-41ffe016736ddfef0eb1d877b35a0c85bd5cfd5f create mode 100644 fuzz/test_cases/chanmon_consistency/crash-45240f379a3a24948c4b091fd658a9f0ef4d4963 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-45872f91e28e4ed1e8814084bbf5ada6fe4963f0 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-49e1240588c1b4507b24c4f07dae75faef02a639 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-4da789d875488d8f244bccefaff4295ae801c745 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-4e4b47b5a0f4c4689868a3003ae7d62e5ac78484 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-53d6404dc8dee21adf112f3c909459f67e176301 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-544eff2c026e0464aff1a9afaa4acd2912e93267 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-54a3422e8e1c578813d5cfce1f8b732040fc668e create mode 100644 fuzz/test_cases/chanmon_consistency/crash-55fd3e4e7c2506a9ce067b0e0a468161db22dec0 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-56271abf5206dd39ac1a1035d49d41f61ee0606e create mode 100644 fuzz/test_cases/chanmon_consistency/crash-5be7542ec7a98b835a2c3dca63e3d89a76050fe6 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-5d2ca379ca5dabcbfae13c3eca104e48a4bf94c9 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-63164e99d1a0561c352ea11be619b8505a83ceb4 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-6aec66d5104839013b44f977a01915c29f2e6795 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-6af2409d5c331f44f76e165e735cd2e9104aed9e create mode 100644 fuzz/test_cases/chanmon_consistency/crash-6b5c5549ee7ed6e7fcf9613d62c295fd65d100ce create mode 100644 fuzz/test_cases/chanmon_consistency/crash-6bd8c4ea12175b25bb1d239699622ba5485248cf create mode 100644 fuzz/test_cases/chanmon_consistency/crash-6bda1f46384cf85ae2d9ca8048619963a9416ddc create mode 100644 fuzz/test_cases/chanmon_consistency/crash-767cf8ac05cf878f93f55fe21f96a9e76b28c5f9 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-7776698efb54442fa8170cb39b7c7bf72e515335 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-78202f87ee8c211227082479a8bd67cd1e7f16e5 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-79790f24a47ad8f39398df48800b946cd85fc3fe create mode 100644 fuzz/test_cases/chanmon_consistency/crash-7ab7fa1fb4303a91c57ec241fefdf5826d2b52aa create mode 100644 fuzz/test_cases/chanmon_consistency/crash-7b7826cea32794a2ab2c245cd3dc024355b07c78 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-7c72226eeba2eb5192d9b7adfead405d3b93fdf9 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-7cb0cf9df154821deb68a78001ce9c0e27f97b0a create mode 100644 fuzz/test_cases/chanmon_consistency/crash-815718bf6e59d981220f037f7509c9cfe5401485 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-8453a4a3cf9dd9f60e5aa40fdce440b69f62869d create mode 100644 fuzz/test_cases/chanmon_consistency/crash-86ee8ae4c13784d3d750f6d4b970ec0852ea2bc3 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-87093ec5446a84482f5a728fc65a51a15b6de843 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-87f98b753291bd37f92795d32e2df4c3597dd6dd create mode 100644 fuzz/test_cases/chanmon_consistency/crash-8ab54f3642a60a239a7bb787838f3e5a6b6f4f41 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-8ec6798103af6cedfdec68373991c0c0a73e3770 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-8f5cc4f6de42f52dcb571b6c0f21df957eb25462 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-8fb6d213ac7d14f6c62c09e7baf392f01e8688d0 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-90c560825e852e3dfb64e09d6764b85cf9f7689d create mode 100644 fuzz/test_cases/chanmon_consistency/crash-91d8898837e425d607ef36ed73fa364b0fa58121 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-91ebb8583ed7705e2601334e52428ea5eb80a681 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-93c44c96a5c5e1d4532370b2c77bb372170bd59b create mode 100644 fuzz/test_cases/chanmon_consistency/crash-9c69d63a708c0a83d2d1fa60577a1a9270924ff2 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-9c84f405725b7c171338f776b7ac7f3a3b010f34 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-a235e98ab95f66315cef361c49eea5483ce2d91a create mode 100644 fuzz/test_cases/chanmon_consistency/crash-a6628891c34498ca2cb4122c2ee66fe4ba6cd01d create mode 100644 fuzz/test_cases/chanmon_consistency/crash-a8f59ca92bcc53e042fd759493c67a35f308721a create mode 100644 fuzz/test_cases/chanmon_consistency/crash-ace48b23767637be15eb3763e88170f7aab17cd4 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-adf5f907d4bc584e6348b7188532f6fc08cda464 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-af7499de68300f3346be7b69ff913c8da2394d23 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-b2e70396bda55d716c022a683df49d72e28b5cae create mode 100644 fuzz/test_cases/chanmon_consistency/crash-b5aed7ccaeacb4347cc7599a258e28e1ccf3855b create mode 100644 fuzz/test_cases/chanmon_consistency/crash-ba5dd0ee55c764b2ae71543e95fd63c496d924bd create mode 100644 fuzz/test_cases/chanmon_consistency/crash-baa2cd71d1e22c966f3c2ddc44cf5b297da5d671 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-c1fe932fa21c4382ba71ec745790386f010b939c create mode 100644 fuzz/test_cases/chanmon_consistency/crash-c29e58a510e698fc8205e4896a938adb92424105 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-c7b166535d5d3591604aeb239b01592f24fff27b create mode 100644 fuzz/test_cases/chanmon_consistency/crash-cb39e58d20b35ceb4ecd9fc8dd91272e308f11a1 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-cc144c9fa2f889e3c3665b1e7c870ddd41cb3e15 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-cec678efd9c2c03dccf92f62c20e9520566d130f create mode 100644 fuzz/test_cases/chanmon_consistency/crash-cedac69cfff63a360470d6f051164b149f74bc18 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-cf44c3acf507cae6fd00e0bf331d18536c551ce1 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-d09e9319d459f21b180f1c730fbf4e89840bd6c5 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-d11e5e5259e57e32f120f0d005bc52aead73d099 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-d3ee0bad80fbd14f1f62903fc6d23f26ed5eb405 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-d5124444b5e39d9a67c395e6325d340fff97a159 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-d87da6cc047b35d69808787157394a0ac7c9ff92 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-d91352ebdfa46f3734403e7e041bf0faa559e97a create mode 100644 fuzz/test_cases/chanmon_consistency/crash-d9affe3db851b50c3b1186ff86f97710cfd115b0 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-db2606af8c9a718bd0da6a6e03c51fd4c84909cd create mode 100644 fuzz/test_cases/chanmon_consistency/crash-dbf141642a66403570204baf8a310783885e081d create mode 100644 fuzz/test_cases/chanmon_consistency/crash-dd67d75d834201769b29d89a5243fdae7f6d8ad1 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-df426fe2abe15519a7ad994034bd2711f26f80af create mode 100644 fuzz/test_cases/chanmon_consistency/crash-ea07f1a57bd66e8a0b48347a45f12a4e48fa4b02 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-efc04dc2a68b17479ad445cce2b84a91a7d3e9b9 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-f2e76e1926cc2604f35de1316e48cb7c8e2aee65 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-f4567ec41df8f30f9c0975e2b9cb3bed9278df8c create mode 100644 fuzz/test_cases/chanmon_consistency/crash-f804080d84b3bfc7adfe563ad1ac9013733983f6 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-f995b58793f0e17361d409df7ddb99d7c14873cd create mode 100644 fuzz/test_cases/chanmon_consistency/crash-fd80c35839107ef932a09d1fd63e34d2a6cd6451 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-fda69e901e92ce81134859dfbd53ceec84393aeb create mode 100644 fuzz/test_cases/chanmon_consistency/fc_advance_before_drain create mode 100644 fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_claim_before_forward create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_claim_before_forward_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_claim_before_forward_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_disconnect create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_disconnect_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_disconnect_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_fee_update create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_fee_update_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_fee_update_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_timer_ticks create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_timer_ticks_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_timer_ticks_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_all_channels create mode 100644 fuzz/test_cases/chanmon_consistency/fc_all_channels_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_all_channels_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_complete_after create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_complete_after_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_complete_after_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_hop_middle_closes create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_hop_middle_closes_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_hop_middle_closes_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_many_pays create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_many_pays_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_many_pays_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_no_complete create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_no_complete_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_no_complete_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_pending_never_complete create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_pending_never_complete_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_pending_never_complete_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_restart create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_restart_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_restart_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_b_closes_both create mode 100644 fuzz/test_cases/chanmon_consistency/fc_b_closes_both_hop_inflight create mode 100644 fuzz/test_cases/chanmon_consistency/fc_b_closes_both_hop_inflight_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_b_closes_both_hop_inflight_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_b_closes_both_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_b_closes_both_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bc_during_hop_ab_only create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bc_during_hop_ab_only_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bc_during_hop_ab_only_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bc_while_ab_htlc create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bc_while_ab_htlc_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bc_while_ab_htlc_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bidir_htlcs create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bidir_htlcs_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bidir_htlcs_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_both_sides_same_chan create mode 100644 fuzz/test_cases/chanmon_consistency/fc_both_sides_same_chan_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_both_sides_same_chan_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bump_htlc_p2wpkh_fee_estimate create mode 100644 fuzz/test_cases/chanmon_consistency/fc_c_initiates_b_restart create mode 100644 fuzz/test_cases/chanmon_consistency/fc_c_initiates_b_restart_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_c_initiates_b_restart_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_cascade_c_then_b create mode 100644 fuzz/test_cases/chanmon_consistency/fc_cascade_c_then_b_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_cascade_c_then_b_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_claimable_on_close_needs_confirmation create mode 100644 fuzz/test_cases/chanmon_consistency/fc_claimed_dust_htlc_sender_fails create mode 100644 fuzz/test_cases/chanmon_consistency/fc_claimed_mpp_dust_path_still_succeeds create mode 100644 fuzz/test_cases/chanmon_consistency/fc_claimed_payment_sender_completion create mode 100644 fuzz/test_cases/chanmon_consistency/fc_close_then_disconnect_all create mode 100644 fuzz/test_cases/chanmon_consistency/fc_close_then_disconnect_all_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_close_then_disconnect_all_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_completed_parallel_updates_retire_old_snapshots_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_completed_update_retires_old_snapshot create mode 100644 fuzz/test_cases/chanmon_consistency/fc_contentious_claim_stuck_after_force_close_218996 create mode 100644 fuzz/test_cases/chanmon_consistency/fc_contentious_claim_stuck_after_force_close_36a22e create mode 100644 fuzz/test_cases/chanmon_consistency/fc_contentious_claim_stuck_after_force_close_d7793e create mode 100644 fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed create mode 100644 fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disabled_signers create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disabled_signers_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disabled_signers_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_drain_a_only create mode 100644 fuzz/test_cases/chanmon_consistency/fc_drain_a_only_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_drain_a_only_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close create mode 100644 fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_39b47f create mode 100644 fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_ed278d create mode 100644 fuzz/test_cases/chanmon_consistency/fc_during_reconnect create mode 100644 fuzz/test_cases/chanmon_consistency/fc_during_reconnect_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_during_reconnect_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_htlcs create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_htlcs_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_htlcs_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_path_claim_expected_fail_but_sent_5099d3 create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_path_claim_expected_fail_but_sent_595140 create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_path_claim_expected_fail_but_sent_7a4062 create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_path_claim_expected_fail_but_sent_9d7311 create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_path_claim_expected_fail_but_sent_b1281e create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_path_claim_expected_fail_but_sent_bf210c create mode 100644 fuzz/test_cases/chanmon_consistency/fc_events_between_drains create mode 100644 fuzz/test_cases/chanmon_consistency/fc_events_between_drains_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_events_between_drains_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_events_only create mode 100644 fuzz/test_cases/chanmon_consistency/fc_events_only_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_events_only_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_exact_cltv_height create mode 100644 fuzz/test_cases/chanmon_consistency/fc_exact_cltv_height_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_exact_cltv_height_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_b_has_preimage create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_b_has_preimage_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_b_has_preimage_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_before_bc_commit create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_before_bc_commit_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_before_bc_commit_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_mid_flight create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_mid_flight_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_mid_flight_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_htlc_late_signer create mode 100644 fuzz/test_cases/chanmon_consistency/fc_htlc_late_signer_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_htlc_late_signer_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_immediate_settle create mode 100644 fuzz/test_cases/chanmon_consistency/fc_immediate_settle_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_immediate_settle_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_inprogress_monitors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_inprogress_monitors_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_inprogress_monitors_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_interleaved_channels create mode 100644 fuzz/test_cases/chanmon_consistency/fc_interleaved_channels_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_interleaved_channels_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_large_payment_resolve create mode 100644 fuzz/test_cases/chanmon_consistency/fc_large_payment_resolve_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_large_payment_resolve_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_many_htlcs create mode 100644 fuzz/test_cases/chanmon_consistency/fc_many_htlcs_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_many_htlcs_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_mid_fulfill_propagation create mode 100644 fuzz/test_cases/chanmon_consistency/fc_mid_fulfill_propagation_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_mid_fulfill_propagation_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_monitor_update_replay_out_of_order_dcbc86 create mode 100644 fuzz/test_cases/chanmon_consistency/fc_msgs_before_drain create mode 100644 fuzz/test_cases/chanmon_consistency/fc_msgs_before_drain_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_msgs_before_drain_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_multi_drain_rounds create mode 100644 fuzz/test_cases/chanmon_consistency/fc_multi_drain_rounds_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_multi_drain_rounds_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_no_settle create mode 100644 fuzz/test_cases/chanmon_consistency/fc_no_settle_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_no_settle_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_node_restart create mode 100644 fuzz/test_cases/chanmon_consistency/fc_node_restart_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_node_restart_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time create mode 100644 fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay create mode 100644 fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_pending_cache_without_observable_lifecycle_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_pending_monitor create mode 100644 fuzz/test_cases/chanmon_consistency/fc_pending_monitor_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_pending_monitor_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_skips_zero_outbound_limit_channel create mode 100644 fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_uses_advertised_sendable_range create mode 100644 fuzz/test_cases/chanmon_consistency/fc_rapid_fire create mode 100644 fuzz/test_cases/chanmon_consistency/fc_rapid_fire_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_rapid_fire_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_reconnect create mode 100644 fuzz/test_cases/chanmon_consistency/fc_reconnect_broadcast_announcements create mode 100644 fuzz/test_cases/chanmon_consistency/fc_reconnect_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_reconnect_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel create mode 100644 fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_claimed_payment_stale_monitor_replay create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_in_progress_chain_sync_replay create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_mid_resolve create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_mid_resolve_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_mid_resolve_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_then_counterparty_closes create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_then_counterparty_closes_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_then_counterparty_closes_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_reverse_hop create mode 100644 fuzz/test_cases/chanmon_consistency/fc_reverse_hop_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_reverse_hop_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_signer_disabled_holder create mode 100644 fuzz/test_cases/chanmon_consistency/fc_signer_disabled_holder_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_signer_disabled_holder_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_stale_monitor_restart create mode 100644 fuzz/test_cases/chanmon_consistency/fc_stale_monitor_restart_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_stale_monitor_restart_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_sync_one_block create mode 100644 fuzz/test_cases/chanmon_consistency/fc_sync_one_block_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_sync_one_block_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_sync_to_tip create mode 100644 fuzz/test_cases/chanmon_consistency/fc_sync_to_tip_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_sync_to_tip_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_then_send create mode 100644 fuzz/test_cases/chanmon_consistency/fc_then_send_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_then_send_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_timer_tick_after create mode 100644 fuzz/test_cases/chanmon_consistency/fc_timer_tick_after_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_timer_tick_after_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_unclaimed_mpp_timeout_variant_a create mode 100644 fuzz/test_cases/chanmon_consistency/fc_unclaimed_mpp_timeout_variant_b create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_basic create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_basic_async create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_basic_async_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_basic_async_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_basic_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_basic_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_both_directions create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_both_directions_async create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_both_directions_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_both_directions_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved_async create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved_async_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved_async_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_middle_node create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_middle_node_async create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_middle_node_async_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_middle_node_async_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_middle_node_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_middle_node_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_no_confirm create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_no_confirm_async create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_no_confirm_async_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_no_confirm_async_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_no_confirm_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_no_confirm_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/ldk_crash_channelmanager_19484 create mode 100644 fuzz/test_cases/chanmon_consistency/ldk_crash_channelmanager_9836 create mode 100644 fuzz/test_cases/chanmon_consistency/ldk_crash_channelmonitor_2727 create mode 100644 fuzz/test_cases/chanmon_consistency/ldk_crash_onchaintx_1025 create mode 100644 fuzz/test_cases/chanmon_consistency/ldk_crash_onchaintx_913 create mode 100644 fuzz/test_cases/chanmon_consistency/ldk_crash_signer_395 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-0103befb3dc5aa050668752668d04e85bd1fc14e create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-05fc1bb98f2a3b29e826a4de636474de0b23c895 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-2bd72986b31d87f9260cf627e63971b5b8310a60 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-3d9c399d0e2d915375da243fb57023df803a5dc6 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-475eb92f6d72aed80ce7cdaed4181b99b11b2fcd create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-4c5cc7debdfdf2569e21b13b21c4270a9b558267 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-505b331015cbe51169de31e09acc6d8330c8e385 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-593eb3357e98be529b0ef35f21577ef6eede171b create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-6ee72dbba68dbca58038d2f9b8525e4d0df25f94 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-76a82aa89161d0428192e725650324a74a710dca create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-7a7ea04ead9439ad7db2eefb23f6e242d547e459 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-8250da1564cda1a1dde38a431859afab8ac2934d create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-838b7d436a92ae2a68aa9ad9badd88cbf96b407b create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-87b55c5b37383fe43420089fd3e8ccecbb034b44 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-885f446335ae279baed408d42af8c398dfdb8c9b create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-8a81e4c066465a2975ef22625c0b91da6332a2c8 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-8f2aebf3aeeb70d8edd39a886e30beb770f3b42b create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-91d97d9eea2bd59f746681ad822488262e832ff1 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-95a90908391d3398084b77eb11ff5c9d7fdde008 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-a31cdfc423211489c841a6ddd067f9e6cf5bed4b create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-b1c4840ea1279dd8d6080d79373ae55bbcad3061 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-b6ef84eec94d70bbc385c98c4ab0bac77da00a2f create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-bae8693182b102dfebab143a0f48992dad76245d create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-bcab049322729e275e3bbdacebc633495da7643f create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-d5afdff02a253c9f2fbce95cbaf730eb210128fa create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-d6494f068fb2b2d31f1ac8627752692b3c8b7d2f create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-edd3f8168217501dd93f3c24d09c2c095cdf7784 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-fcbcc131184e33d5b000820b0972f6197b0801d2 diff --git a/fc-crashes.md b/fc-crashes.md new file mode 100644 index 00000000000..a257e7966ba --- /dev/null +++ b/fc-crashes.md @@ -0,0 +1,162 @@ +# Force-close fuzzer LDK crashes + +Minimized crash sequences found by the chanmon_consistency fuzzer with +force-close support. All crashes are `debug_assert` or `panic!` inside +LDK, not in the fuzzer harness. Byte 0 encodes monitor styles (bits +0-2) and channel type (bits 3-4: 0=Legacy, 1=KeyedAnchors). + +## 1. channelmonitor.rs:2727 - HTLC input not found in transaction + +``` +debug_assert!(htlc_input_idx_opt.is_some()); +``` + +When resolving an HTLC spend, the monitor searches for the HTLC +outpoint in the spending transaction's inputs but doesn't find it. +Falls back to index 0 in release mode, which would produce incorrect +tracking. + +Minimized (17 bytes): +``` +0x40 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xdc 0xde 0xff +``` + +Byte 0 = 0x40: Legacy channels, no async monitors. The sequence is +mostly 0xff (settlement) repeated, with height advances (0xdc, 0xde) +near the end. This suggests the crash happens during settlement when +processing on-chain HTLC spends after repeated settlement attempts. + +## 2. onchaintx.rs:913 - Duplicate claim ID in pending requests + +``` +debug_assert!(self.pending_claim_requests.get(&claim_id).is_none()); +``` + +The OnchainTxHandler registers a claim event with a claim_id that +already exists in the pending_claim_requests map. + +Minimized (10 bytes): +``` +0x08 0xd2 0x70 0x70 0x71 0x70 0x10 0x19 0xde 0xff +``` + +Byte 0 = 0x08: KeyedAnchors channels, no async monitors. +- 0xd2: B force-closes the A-B channel +- 0x70/0x71: disconnect/reconnect peers +- 0x10, 0x19: process messages on nodes A and B +- 0xde: advance chain 200 blocks +- 0xff: settle + +B force-closes, peers disconnect and reconnect, messages are exchanged, +then height advances and settlement triggers the duplicate claim. + +## 3. onchaintx.rs:1025 - Inconsistent internal maps + +``` +panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map"); +``` + +The OnchainTxHandler detects that its `pending_claim_requests` and +`claimable_outpoints` maps are out of sync. + +Minimized (14 bytes): +``` +0x00 0x3c 0x11 0x19 0xd0 0xde 0xff 0xff 0x19 0x21 0x19 0xde 0x26 0xff +``` + +Byte 0 = 0x00: Legacy channels, all monitors completed. +- 0x3c: send hop payment A->B->C (1M msat) +- 0x11, 0x19: process messages to commit HTLC on A-B +- 0xd0: A force-closes A-B +- 0xde: advance 200 blocks +- 0xff: settle (first round) +- 0xff: settle again (second round, processes more messages) +- 0x19, 0x21, 0x19: continue processing B and C messages +- 0xde: advance 200 more blocks +- 0x26: process events on node C +- 0xff: settle (third round) + +A hop payment partially committed, then A force-closes. Multiple +settlement rounds with continued message processing in between triggers +the internal map inconsistency. + +## 4. test_channel_signer.rs:395 - Signing revoked commitment + +``` +panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={}") +``` + +The test channel signer is asked to sign an HTLC transaction for a +commitment number that has already been revoked. + +Minimized (18 bytes): +``` +0x22 0x71 0x71 0x71 0x71 0x71 0x71 0x71 0xff 0xff 0xff 0xff 0xff 0xff 0xde 0xde 0xb5 0xff +``` + +Byte 0 = 0x22: Legacy channels, async monitors on node B. +- 0x71: disconnect B-C peers (repeated, only first effective) +- 0xff: settle (repeated 6 times) +- 0xde 0xde: advance 400 blocks +- 0xb5: restart node B with alternate monitor state +- 0xff: settle + +Async monitors on B with peer disconnection, repeated settlements, +height advances, and a node restart with a different monitor state. +The stale monitor combined with the restart puts B's signer in a state +where it's asked to sign for an already-revoked commitment. + +## 5. channelmanager.rs:9836 - Payment blocker not found + +``` +debug_assert!(found_blocker); +``` + +During payment processing, the ChannelManager expects to find a +specific blocker entry for an in-flight payment but it's missing. + +Minimized (13 bytes): +``` +0x00 0x3c 0x11 0x19 0x11 0x1f 0x19 0x21 0x19 0x27 0x27 0xde 0xff +``` + +Byte 0 = 0x00: Legacy channels, all monitors completed. +- 0x3c: send hop A->B->C (1M msat) +- 0x11, 0x19, 0x11: commit HTLC on A-B +- 0x1f: B processes events (forwards HTLC to C) +- 0x19, 0x21, 0x19: commit HTLC on B-C +- 0x27, 0x27: C processes events (claims payment) +- 0xde: advance 200 blocks +- 0xff: settle + +A straightforward A->B->C hop payment that completes normally (C +claims), followed by a height advance and settlement. No force-close +in this sequence, so the height advance before settlement may cause +HTLC timeout processing that conflicts with the claim path. + +## 6. channelmanager.rs:19484 - Monitor update ID ordering violation + +``` +debug_assert!(update.update_id >= pending_update.update_id); +``` + +A ChannelMonitorUpdate has an update_id that is less than a pending +update's id, violating the expected monotonic ordering. + +Minimized (10 bytes): +``` +0x84 0x70 0x11 0x19 0x11 0x1f 0xd0 0x11 0x1f 0xba +``` + +Byte 0 = 0x84: Legacy channels, no async monitors, high bits set +(bits 3-4 = 0, bits 7 and 2 set). +- 0x70: disconnect A-B peers +- 0x11, 0x19, 0x11: process messages (likely reestablish after setup) +- 0x1f: process B events +- 0xd0: A force-closes A-B channel +- 0x11: process A messages +- 0x1f: process B events +- 0xba: restart node B with alternate monitor state + +A force-close followed by continued message/event processing and a +node B restart triggers a monitor update with an out-of-order ID. diff --git a/fuzz/.gitignore b/fuzz/.gitignore index e8dc6b6e08b..cc3f5f53040 100644 --- a/fuzz/.gitignore +++ b/fuzz/.gitignore @@ -2,3 +2,4 @@ hfuzz_target target hfuzz_workspace corpus +artifacts \ No newline at end of file diff --git a/fuzz/FC-INFO.md b/fuzz/FC-INFO.md new file mode 100644 index 00000000000..1293fcdcedb --- /dev/null +++ b/fuzz/FC-INFO.md @@ -0,0 +1,107 @@ +# Force-Close Fuzzing Notes + +This file records the current contract for `chanmon_consistency` force-close +coverage. It is intentionally short. Keep branch history and one-off debugging +notes elsewhere. + +## Goal + +Force-close fuzzing here should: + +- exercise realistic off-chain to on-chain transitions +- keep force-close from changing the eventual outcome of claimed payments +- only allow claimed-payment sender failures when force-close dust touched a + used payment path +- allow unclaimed HTLCs to resolve by CLTV timeout +- drive the harness far enough that it observes real terminal outcomes +- avoid manufacturing timeout wins by starving message delivery or claim + propagation + +## Hard-Mode Invariant + +The current hard mode is: + +- once the harness calls `claim_funds`, that HTLC must eventually produce + `PaymentClaimed` at the receiver +- after that claim, the sender must eventually produce a terminal outcome, + `PaymentSent` or `PaymentFailed` +- if the sender produces `PaymentFailed` for a claimed payment, some used + force-close path for that payment must have been dust-trimmed +- force-close dust on a used path is not, by itself, enough to require + `PaymentFailed`; the payment may still end in `PaymentSent` +- if no used force-close path for the claimed payment was dust-trimmed, the + sender must eventually produce `PaymentSent` +- going on-chain does not create any broader exception than that dust case +- unclaimed HTLCs may still fail by CLTV expiry +- CSV waits on force-close outputs are normal and expected; they are not + payment outcome changes +- a payment disappearing from `list_recent_payments()` is not enough, the + harness must observe or drive the terminal outcome directly + +In this mode, the following are harness failures: + +- `HTLCHandlingFailed::Receive` after we already chose to claim the HTLC +- a receiver-side claim without the receiver later getting `PaymentClaimed` +- a claimed HTLC without any sender-side terminal event +- a claimed HTLC getting `PaymentFailed` without any dust-trimmed used + force-close path +- a claimed HTLC that should fulfill resolving by CLTV timeout instead +- cleanup stopping while live balances or other pending work still show that + more progress is possible + +## Timeouts + +Do not conflate CSV and CLTV: + +- CSV is normal force-close settlement latency +- CLTV expiry changes the HTLC outcome + +The harness should keep driving through CSV waits. It should only protect +claimed HTLCs that should still fulfill from CLTV-expiry resolution. + +## Harness Rules + +The main rules for preserving the invariant are: + +- advance large height jumps one block at a time, with bounded draining before + and after each block +- process queued messages and events before confirming newly broadcast + transactions, so preimages can propagate before timeout paths win +- keep sender-side payment bookkeeping independent of + `list_recent_payments()` +- track which channels each payment actually used, and when force-closing, + snapshot which used payment paths become dust-blocked on the closer's + commitment +- keep driving while `ClaimableOnChannelClose`, HTLC-related claimable balances, + queued messages, pending monitor updates, or pending broadcasts still show + unresolved work +- only stop before a CLTV boundary when crossing it would let a claimed HTLC + that has not yet reached a sender terminal event expire instead +- do not hide pending-payment state behind unrelated auto-driving before an + explicit force-close opcode; a bounded pre-close drain is acceptable when it + is only making already-queued work visible + +## Review Checklist + +When changing this harness, verify: + +- claimed HTLCs still require `PaymentClaimed` +- claimed HTLCs still require a sender-side terminal event +- claimed HTLCs only allow `PaymentFailed` when some used force-close path was + dust-trimmed +- claimed HTLCs without dust-trimmed used force-close paths still require + `PaymentSent` +- unclaimed HTLCs may still time out on-chain +- force-close opcodes still act on the currently pending state +- large synthetic height jumps do not become blind timeout buttons again +- sender-side obligations are not reconciled away through local caches + +## Verification + +The standard check is: + +```bash +~/repo/rl-tools/run_fuzz_runner.sh --timeout-secs 20 +``` + +Re-run the full corpus after any meaningful force-close harness change. diff --git a/fuzz/ONCHAINTX-BUGS.md b/fuzz/ONCHAINTX-BUGS.md new file mode 100644 index 00000000000..0cb1b397bbe --- /dev/null +++ b/fuzz/ONCHAINTX-BUGS.md @@ -0,0 +1,327 @@ +# Recent `OnchainTxHandler` Bugs And Fixes + +This note records the three `OnchainTxHandler` bugs that were fixed while +hardening the `chanmon_consistency` force-close corpus. + +Both bugs lived in `lightning/src/chain/onchaintx.rs`. Both were real +logic issues, not harness-only artifacts. Both now pass in targeted +reruns and in the full `chanmon_consistency` corpus sweep. + +Current green reference runs: + +- Targeted duplicate-claim rerun: + `fuzz/artifacts/chanmon_runner/run-1776537725/summary.txt` +- Targeted contentious-claim rerun: + `fuzz/artifacts/chanmon_runner/run-1776538115/summary.txt` +- Targeted duplicate pending-claim-event rerun: + `fuzz/artifacts/chanmon_runner/run-1776586956/summary.txt` +- Full corpus rerun: + `fuzz/artifacts/chanmon_runner/run-1776587008/summary.txt` + +Full-corpus result: + +- `392 ok / 0 failed / 0 timed_out / 0 spawn_errors` + +## 1. Duplicate pending claim request after force-close + +### Repro cases + +- `fc_duplicate_pending_claim_request_after_force_close_39b47f` + - bytes: `0fd37373d0b2ffd3` +- `fc_duplicate_pending_claim_request_after_force_close_ed278d` + - bytes: `08d37373d0b2ffd3` + +### What went wrong + +The failing shape was: + +1. A force-close created two single-outpoint claim requests. +2. Those requests were merged into one delayed package because their + timelock was still in the future. +3. A later replay of `update_claims_view_from_requests` at the same + logical state recreated the same two single-outpoint requests. +4. The old dedupe logic only rejected a duplicate delayed claim if the + outpoint sets were exactly equal. +5. Because the existing delayed claim had already been merged into a + two-outpoint package, the new single-outpoint requests were not seen + as duplicates. +6. At the timelock height, the same aggregated delayed package was + restored twice and tried to register the same `ClaimId` twice. + +The crash was the debug assertion in `OnchainTxHandler`: + +- `assertion failed: self.pending_claim_requests.get(&claim_id).is_none()` + +Representative evidence from +`fuzz/artifacts/chanmon_runner/run-1776537612/logs/fc_duplicate_pending_claim_request_after_force_close_ed278d.log`: + +- line `1829`: `Updating claims view at height 61 with 2 claim requests` +- line `1830`: delayed until timelock `361` +- line `2077`: the same `2 claim requests` appear again +- line `17163`: delayed package restored at timelock `361` +- lines `17164` and `17167`: the same two-outpoint event is yielded twice +- line `17169`: assertion failure + +The same pattern appears in the sibling repro +`fc_duplicate_pending_claim_request_after_force_close_39b47f`. + +### Why the old logic was wrong + +Before the fix, delayed-claim dedupe effectively asked: + +- "Do I already have a delayed package with exactly the same outpoint + set as this new request?" + +That was too strict. Once two single-outpoint requests had already been +merged into one delayed package, replaying either single-outpoint +request should have been considered duplicate as well. + +The correct question is: + +- "Is every outpoint in this new request already covered by an existing + delayed package?" + +### The fix + +In `OnchainTxHandler::update_claims_view_from_requests`, the delayed +claim dedupe was changed from exact package equality to covering-package +detection. + +Relevant code: + +- `lightning/src/chain/onchaintx.rs`, `timelocked_covering_package` +- log line for this path: + `Ignoring second claim for outpoint ..., we already have one which + we're waiting on a timelock at ...` + +In practical terms: + +- a fresh single-outpoint request is now ignored if a delayed package + already contains that outpoint +- replaying the same logical claim state no longer creates duplicate + delayed packages +- the delayed package is restored only once at the timelock height + +### Why this fix is correct + +This does not suppress any legitimate new claim. It only rejects a +request whose entire outpoint set is already represented in pending +delayed state. If a request introduces a truly new outpoint, it still +passes through. + +### Verification + +Targeted rerun: + +- `fuzz/artifacts/chanmon_runner/run-1776537725/summary.txt` +- result: `2 ok / 0 failed / 0 timed_out` + +## 2. Contentious claim reused an already resolved outpoint + +### Repro cases + +- `fc_contentious_claim_stuck_after_force_close_218996` + - bytes: `89ffde3d3dc0d3ff` +- `fc_contentious_claim_stuck_after_force_close_36a22e` + - bytes: `2cffde3d3dc0d3ff` +- `fc_contentious_claim_stuck_after_force_close_d7793e` + - bytes: `76ffde3d3dc0d1ff` + +### What went wrong + +The failing shape was: + +1. An HTLC output was claimed on-chain by a single-outpoint claim. +2. That claim matured past `ANTI_REORG_DELAY`. +3. `OnchainTxHandler` removed the pending claim tracking for that + outpoint. +4. A later preimage update arrived and built a fresh two-outpoint claim + that included the already-resolved outpoint again. +5. That new claim could never confirm, because one of its inputs had + already been definitively spent. +6. The handler kept RBF-bumping that impossible claim forever, leaving a + claimed payment stuck pending in the harness. + +Representative evidence from +`fuzz/artifacts/chanmon_runner/run-1776537816/logs/fc_contentious_claim_stuck_after_force_close_d7793e.log`: + +- line `3173`: `Updating claims view at height 60 with 1 claim requests` +- line `3175`: registers claim for + `cc0e...:2` +- line `3282`: removes tracking for `cc0e...:2` after the claim package + matured +- line `3424`: `Updating claims view at height 66 with 2 claim requests` +- line `3425`: yields a new event spending + `cc0e...:1` and `cc0e...:2` +- lines `3426` and `3427`: registers both outpoints again +- lines `4438`, `5380`, `6322`, and many later lines: keeps yielding + RBF-bumped events for that same impossible two-input claim +- line `21640`: final harness failure, + `Node 2 has 1 stuck pending payments after settling all state` + +The same family reproduced in the other two named cases. + +### Why the old logic was wrong + +Removing an outpoint from `claimable_outpoints` after its claim matured +was not enough. That only said: + +- "we no longer need to actively track this pending request" + +It did not preserve the stronger fact: + +- "this outpoint is definitively spent and must never be re-claimed" + +Without that second fact, a later preimage could cause +`update_claims_view_from_requests` to resurrect an already-resolved +outpoint into a new claim package. + +### The fix + +`OnchainTxHandler` now maintains a restart-safe +`irrevocably_spent_outpoints: HashSet`. + +Relevant code paths: + +- field definition: + `lightning/src/chain/onchaintx.rs` +- serialization and deserialization: + the new optional TLV field in `write` and `read` +- request filtering: + `Ignoring claim for outpoint ..., it was already irrevocably spent by + a confirmed claim transaction` +- maturation handling: + outpoints are inserted into `irrevocably_spent_outpoints` when a claim + or contentious outpoint reaches the anti-reorg threshold + +This matters for restarts as well. The spent-outpoint memory is part of +the serialized `OnchainTxHandler` state, so a monitor reload does not +forget that the output was already definitively resolved. + +### Why this fix is correct + +Once a claim tx for an outpoint has reached `ANTI_REORG_DELAY`, the +handler should never generate a new claim for that same outpoint unless +the chain reorgs deep enough to invalidate the confirmation. That is +exactly the invariant the new set captures. + +The fix is intentionally narrow: + +- it does not suppress still-live outpoints +- it does not interfere with normal package splitting or merging +- it only blocks claim generation for outpoints that were already + irreversibly resolved + +### Verification + +Targeted rerun: + +- `fuzz/artifacts/chanmon_runner/run-1776538115/summary.txt` +- result: `3 ok / 0 failed / 0 timed_out` + +## 3. Duplicate pending claim event after force-close + +### Repro cases + +- `fc_duplicate_pending_claim_event_after_force_close` + - bytes: `2934ff3dc0d1b6ff` +- `fc_duplicate_pending_claim_event_after_force_close_zero_fee_commitments` + - bytes: `3f34ff3dc0d1b6ff` + +### What went wrong + +The failing shape was: + +1. A force-close path yielded an `OnchainClaim::Event`. +2. `OnchainTxHandler` inserted that event into `pending_claim_events` + under its `ClaimId`. +3. Before the original pending event was drained, the same logical claim + was rebuilt and yielded again. +4. The initial insertion path still assumed that duplicate `ClaimId` + entries could never happen there. +5. That path pushed a second entry with the same `ClaimId` and hit the + debug assertion that the count had to be zero. + +The crash was the debug assertion in `OnchainTxHandler`: + +- `debug_assert_eq!(self.pending_claim_events.iter().filter(|entry| entry.0 == claim_id).count(), 0);` + +Representative evidence from +`fuzz/artifacts/chanmon_runner/run-1776584834/logs/crash-4b5e6aabf5bc0467bcd2163cced7d60241d24f17.log`: + +- line `3544`: yields an on-chain event spending the commitment output +- line `3545`: registers the associated claim request +- line `3679`: later rebuilds claims view with one fresh claim request +- line `3680`: yields another on-chain event for HTLC output + `513872...:2` +- line `3681`: assertion failure while inserting the second event with + the same `ClaimId` + +The sibling zero-fee-commitments repro follows the same shape in +`crash-a83289388ca2b4f52279218f3a70e0f1f0661a92.log`, with the same +panic at `onchaintx.rs:944`. + +### Why the old logic was wrong + +`pending_claim_events` was already being treated like a keyed queue in +other parts of `OnchainTxHandler`: + +- rebroadcast logic replaced existing entries by `ClaimId` +- bump logic replaced existing entries by `ClaimId` +- reorg logic replaced existing entries by `ClaimId` + +Only the initial insertion path still assumed uniqueness and pushed +blindly. That left the structure with inconsistent semantics depending +on which path happened to enqueue the event. + +The correct invariant is: + +- there is at most one pending event per `ClaimId` +- re-enqueuing the same logical claim should replace the older entry, + not panic + +### The fix + +In `OnchainTxHandler::update_claims_view_from_requests`, the initial +`OnchainClaim::Event` insertion now matches the other paths: + +- under debug builds it asserts the existing count is `0` or `1` +- it removes any existing `pending_claim_events` entry for that + `ClaimId` +- it then pushes the newest event + +This preserves insertion order for distinct claim ids while making +duplicate requeues idempotent. + +### Why this fix is correct + +This does not hide a real conflict between distinct claims. Two +different claim packages should not share a `ClaimId`. If they do, they +represent the same logical event as far as the queue is concerned, and +the newest version should replace the old one. + +This also makes the queue semantics internally consistent. Every path +that mutates `pending_claim_events` now treats it as keyed by +`ClaimId`, rather than having one path act like a multimap. + +### Verification + +Targeted rerun: + +- `fuzz/artifacts/chanmon_runner/run-1776586956/summary.txt` +- result: `2 ok / 0 failed / 0 timed_out` + +## Final verification + +After all three fixes landed, the default corpus sweep passed: + +- `fuzz/artifacts/chanmon_runner/run-1776587008/summary.txt` +- result: `392 ok / 0 failed / 0 timed_out / 0 spawn_errors` + +This is the reference run showing that: + +- the duplicate delayed-claim family is fixed +- the contentious reused-outpoint family is fixed +- the duplicate pending-claim-event family is fixed +- neither change regressed the previously fixed dust, restart, or + sender-terminal-event invariants diff --git a/fuzz/OPEN-ISSUES.md b/fuzz/OPEN-ISSUES.md new file mode 100644 index 00000000000..e20c8e82390 --- /dev/null +++ b/fuzz/OPEN-ISSUES.md @@ -0,0 +1,39 @@ +# Open Issues + +There are no currently open `chanmon_consistency` crash families in this +branch. + +Latest green reference run: + +- Full corpus rerun: + [run-1776587008 summary](/Users/joost/repo/rust-lightning-fuzz-force-close/fuzz/artifacts/chanmon_runner/run-1776587008/summary.txt) + with `392 ok / 0 failed / 0 timed_out / 0 spawn_errors` + +Recently resolved: + +- Manager reload failed with `DangerousValue`. + Fixed in + [fuzz/src/chanmon_consistency.rs](/Users/joost/repo/rust-lightning-fuzz-force-close/fuzz/src/chanmon_consistency.rs) + by retiring every pending monitor blob at `<= completed_update_id` + once a later monitor update is acknowledged complete. + This prevents restart selectors from reloading a stale older monitor + after the serialized `ChannelManager` has already dropped the + corresponding blocked updates. + Targeted verification is clean in + [run-1776585235 summary](/Users/joost/repo/rust-lightning-fuzz-force-close/fuzz/artifacts/chanmon_runner/run-1776585235/summary.txt) + with `8 ok / 0 failed / 0 timed_out`. + +- `OnchainTxHandler` could enqueue the same pending claim event twice. + Fixed in + [lightning/src/chain/onchaintx.rs](/Users/joost/repo/rust-lightning-fuzz-force-close/lightning/src/chain/onchaintx.rs) + by making the initial `pending_claim_events` insertion path replace an + existing entry with the same `ClaimId`, matching the keyed behavior + already used in the rebroadcast, bump, and reorg paths. + Representative repro cases are: + [fc_duplicate_pending_claim_event_after_force_close](/Users/joost/repo/rust-lightning-fuzz-force-close/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close) + with bytes `2934ff3dc0d1b6ff`, and + [fc_duplicate_pending_claim_event_after_force_close_zero_fee_commitments](/Users/joost/repo/rust-lightning-fuzz-force-close/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close_zero_fee_commitments) + with bytes `3f34ff3dc0d1b6ff`. + Targeted verification is clean in + [run-1776586956 summary](/Users/joost/repo/rust-lightning-fuzz-force-close/fuzz/artifacts/chanmon_runner/run-1776586956/summary.txt) + with `2 ok / 0 failed / 0 timed_out`. diff --git a/fuzz/test_cases/base32/smoke b/fuzz/test_cases/base32/smoke deleted file mode 100644 index 573541ac970..00000000000 --- a/fuzz/test_cases/base32/smoke +++ /dev/null @@ -1 +0,0 @@ -0 diff --git a/fuzz/test_cases/bech32_parse/smoke b/fuzz/test_cases/bech32_parse/smoke deleted file mode 100644 index 573541ac970..00000000000 --- a/fuzz/test_cases/bech32_parse/smoke +++ /dev/null @@ -1 +0,0 @@ -0 diff --git a/fuzz/test_cases/chanmon_consistency/crash-02830a6ff7757f3570924b0c0fd9118a7cdd9770 b/fuzz/test_cases/chanmon_consistency/crash-02830a6ff7757f3570924b0c0fd9118a7cdd9770 new file mode 100644 index 0000000000000000000000000000000000000000..57c626b8597071187a3bc4e90ce05655fd65b429 GIT binary patch literal 24 dcmZQL!C@mPDJUzcD0%V5&0F{G0l7ed{{UQa2#Npz literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/crash-0473b0e767d9a98de62538ce5afcbbc2e6ec5af2 b/fuzz/test_cases/chanmon_consistency/crash-0473b0e767d9a98de62538ce5afcbbc2e6ec5af2 new file mode 100644 index 00000000000..ba413134fbb --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-0473b0e767d9a98de62538ce5afcbbc2e6ec5af2 @@ -0,0 +1 @@ +pppppp0ppp0ÀÞÚÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-05e175d40f60b823f730fa874d98dc10dd2bb6ad b/fuzz/test_cases/chanmon_consistency/crash-05e175d40f60b823f730fa874d98dc10dd2bb6ad new file mode 100644 index 00000000000..cabed892750 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-05e175d40f60b823f730fa874d98dc10dd2bb6ad @@ -0,0 +1 @@ +lls²ÿÿÿÿÝÝÝÝÝÿÿÿÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-07bdc4e56ee67bd2ffa409f76529199d748ab2d8 b/fuzz/test_cases/chanmon_consistency/crash-07bdc4e56ee67bd2ffa409f76529199d748ab2d8 new file mode 100644 index 00000000000..eb3ac3716d2 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-07bdc4e56ee67bd2ffa409f76529199d748ab2d8 @@ -0,0 +1 @@ +pppppp0ÀÐ%ÞÞÏØÙÚÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-096cc3008264dccaefb945f5a4b7a2d3c9f8e90c b/fuzz/test_cases/chanmon_consistency/crash-096cc3008264dccaefb945f5a4b7a2d3c9f8e90c new file mode 100644 index 00000000000..f00662619a1 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-096cc3008264dccaefb945f5a4b7a2d3c9f8e90c @@ -0,0 +1 @@ +<!''ÐØ¥!ÙÚÞºÿ³ÑºÓÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-09a17e06913dea74dba796940cec86cb4e2dd597 b/fuzz/test_cases/chanmon_consistency/crash-09a17e06913dea74dba796940cec86cb4e2dd597 new file mode 100644 index 00000000000..30543451915 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-09a17e06913dea74dba796940cec86cb4e2dd597 @@ -0,0 +1 @@ +<!ÑØÙÚÞÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-09f5a41270b07f70a031884cbdfd081e8600923e b/fuzz/test_cases/chanmon_consistency/crash-09f5a41270b07f70a031884cbdfd081e8600923e new file mode 100644 index 0000000000000000000000000000000000000000..e0ff1832a4fa6f420610b625b63dfc2b521be628 GIT binary patch literal 22 ecmdi1 literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/crash-228ea00412a2fab1e866fc6df32ffd00bbfe81ad b/fuzz/test_cases/chanmon_consistency/crash-228ea00412a2fab1e866fc6df32ffd00bbfe81ad new file mode 100644 index 0000000000000000000000000000000000000000..4a6a76ade6c4327ec5b327290a1d158a6a649f45 GIT binary patch literal 24 fcmd-u6y(3j00f4D|Nr0PziD&p9>>4`|Na92UV95K literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/crash-242de208110143401fcf4e1ebaa7d9d38fb93611 b/fuzz/test_cases/chanmon_consistency/crash-242de208110143401fcf4e1ebaa7d9d38fb93611 new file mode 100644 index 00000000000..76da0f6debb --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-242de208110143401fcf4e1ebaa7d9d38fb93611 @@ -0,0 +1 @@ +*ÿ¹¹¹þÿÿÒ¸ÿÞÞÞÞÿ¹¹¹¹¹¹¹ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-24f1373b1cf51f95af854d6d8730336b77728007 b/fuzz/test_cases/chanmon_consistency/crash-24f1373b1cf51f95af854d6d8730336b77728007 new file mode 100644 index 00000000000..0064fa17f19 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-24f1373b1cf51f95af854d6d8730336b77728007 @@ -0,0 +1 @@ +*ÿ¹tÿA2¹¹¹¹ÑØÙÚÞÿÿ¹¹ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-2923c14608fb259c21862cd71ffeb6ac74b0ba32 b/fuzz/test_cases/chanmon_consistency/crash-2923c14608fb259c21862cd71ffeb6ac74b0ba32 new file mode 100644 index 00000000000..ff1549ef79f --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-2923c14608fb259c21862cd71ffeb6ac74b0ba32 @@ -0,0 +1 @@ +p0p0ÀÞÞÏØ°Zÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-2a0852bec1d75334538dacec26831db6995b6e33 b/fuzz/test_cases/chanmon_consistency/crash-2a0852bec1d75334538dacec26831db6995b6e33 new file mode 100644 index 00000000000..f5e273ff51f --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-2a0852bec1d75334538dacec26831db6995b6e33 @@ -0,0 +1 @@ +p0ÀÞÞÏbÿÙÚÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-2d93541536e19c030d95d236e6be545352d98b80 b/fuzz/test_cases/chanmon_consistency/crash-2d93541536e19c030d95d236e6be545352d98b80 new file mode 100644 index 00000000000..0c7432d7c20 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-2d93541536e19c030d95d236e6be545352d98b80 @@ -0,0 +1 @@ +*ÿ¹tÿA¹¹¹¹ÑØÙÚÞÿÿ¹¹ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-2e002fcfdc76c5981f5f93c0f842b548fb56c7a7 b/fuzz/test_cases/chanmon_consistency/crash-2e002fcfdc76c5981f5f93c0f842b548fb56c7a7 new file mode 100644 index 00000000000..bd5c0aab70a --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-2e002fcfdc76c5981f5f93c0f842b548fb56c7a7 @@ -0,0 +1 @@ +p0t0ÀÞÞÏØÙZÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-2fad50c7fd20b250f0349887445af198124900df b/fuzz/test_cases/chanmon_consistency/crash-2fad50c7fd20b250f0349887445af198124900df new file mode 100644 index 0000000000000000000000000000000000000000..44d0be6fc50646d858865e0158f9e4641b1e387d GIT binary patch literal 23 ecmd{{cHF2-W}q literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/crash-33c08a8f15f1c842df5da4fc92228d00606573f9 b/fuzz/test_cases/chanmon_consistency/crash-33c08a8f15f1c842df5da4fc92228d00606573f9 new file mode 100644 index 00000000000..391b9204000 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-33c08a8f15f1c842df5da4fc92228d00606573f9 @@ -0,0 +1 @@ +<ˆ0sslqlqqÚÞÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-33e77c2f720493e306bbfea79f151388ca7a04ea b/fuzz/test_cases/chanmon_consistency/crash-33e77c2f720493e306bbfea79f151388ca7a04ea new file mode 100644 index 0000000000000000000000000000000000000000..2c4a1c6cac69675d19dd6a25823cf46a586de0c6 GIT binary patch literal 24 dcmd-w;QIf+VM*$eB|z%lz5hFbVDtaY3;?vR50(G` literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/crash-37a18356d608c97415c0a1bef6a0f13fe04c8b97 b/fuzz/test_cases/chanmon_consistency/crash-37a18356d608c97415c0a1bef6a0f13fe04c8b97 new file mode 100644 index 0000000000000000000000000000000000000000..877a41dd6ae570a9dedab0719c6337030f68c2c7 GIT binary patch literal 22 dcmd07LEu=>Px# literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/crash-8ab54f3642a60a239a7bb787838f3e5a6b6f4f41 b/fuzz/test_cases/chanmon_consistency/crash-8ab54f3642a60a239a7bb787838f3e5a6b6f4f41 new file mode 100644 index 00000000000..50f706994ac --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-8ab54f3642a60a239a7bb787838f3e5a6b6f4f41 @@ -0,0 +1 @@ +ÐÿÞ ØÙÚÌÜÜÿÿÿÿÿÿÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-8ec6798103af6cedfdec68373991c0c0a73e3770 b/fuzz/test_cases/chanmon_consistency/crash-8ec6798103af6cedfdec68373991c0c0a73e3770 new file mode 100644 index 00000000000..877960d1655 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-8ec6798103af6cedfdec68373991c0c0a73e3770 @@ -0,0 +1 @@ +<:!''ÚÞÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-8f5cc4f6de42f52dcb571b6c0f21df957eb25462 b/fuzz/test_cases/chanmon_consistency/crash-8f5cc4f6de42f52dcb571b6c0f21df957eb25462 new file mode 100644 index 0000000000000000000000000000000000000000..e64b0b71a133805d9d4ec6ce851113c8497e3757 GIT binary patch literal 27 Zcmd;(U|{$U2ZbUg}ZnD0{|yc2Q2^q literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/crash-91ebb8583ed7705e2601334e52428ea5eb80a681 b/fuzz/test_cases/chanmon_consistency/crash-91ebb8583ed7705e2601334e52428ea5eb80a681 new file mode 100644 index 0000000000000000000000000000000000000000..ba9c42d7059ce40516c0cc28ac235e92721a4a98 GIT binary patch literal 23 dcmd~25 literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_keyed_anchors new file mode 100644 index 00000000000..a9b7f9e59f4 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_keyed_anchors @@ -0,0 +1 @@ +ÐÜØÙÚÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_zero_fee_commitments new file mode 100644 index 00000000000..5208796d906 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_zero_fee_commitments @@ -0,0 +1 @@ +ÐÜØÙÚÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_after_claim_before_forward b/fuzz/test_cases/chanmon_consistency/fc_after_claim_before_forward new file mode 100644 index 0000000000000000000000000000000000000000..6ed4f13402c34dd77779be072fa58c2cb191520f GIT binary patch literal 18 acmZRu5tI~^msFHgSHE!M=B<18{sRCe_y81=B+z-?)(P;CAaO38!d-whW03k*O=Kufz literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_keyed_anchors new file mode 100644 index 00000000000..a3fb940a20a --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_keyed_anchors @@ -0,0 +1 @@ +0ÐØÙÚÞÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_zero_fee_commitments new file mode 100644 index 00000000000..5363a35b90c --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_zero_fee_commitments @@ -0,0 +1 @@ +0ÐØÙÚÞÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_disabled_signers b/fuzz/test_cases/chanmon_consistency/fc_disabled_signers new file mode 100644 index 0000000000000000000000000000000000000000..e178523c474a4b925340f2c79d99e33af2b39852 GIT binary patch literal 13 VcmZQLaPY#Jvo~(ux^w5we*ixR3JCxJ literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_disabled_signers_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_disabled_signers_keyed_anchors new file mode 100644 index 00000000000..c3b358cff6e --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_disabled_signers_keyed_anchors @@ -0,0 +1 @@ +ÀÁÐÌÍØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_disabled_signers_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_disabled_signers_zero_fee_commitments new file mode 100644 index 00000000000..7268091c3a6 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_disabled_signers_zero_fee_commitments @@ -0,0 +1 @@ +ÀÁÐÌÍØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect b/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect new file mode 100644 index 0000000000000000000000000000000000000000..207eb4b58c90a08f743da1baff7708c0e87a125f GIT binary patch literal 13 VcmZSJxxgnNapUH#J9qB<2LK~v2QdHu literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_keyed_anchors new file mode 100644 index 00000000000..519443e6590 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_keyed_anchors @@ -0,0 +1 @@ + ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_zero_fee_commitments new file mode 100644 index 00000000000..b89f4eb6bb1 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_zero_fee_commitments @@ -0,0 +1 @@ + ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect b/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect new file mode 100644 index 0000000000000000000000000000000000000000..93ca334ca2a70442054b3a006617f9e744d02f78 GIT binary patch literal 13 VcmZR$z;ol~Ej|H>J9qB<2LLN42QdHu literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_keyed_anchors new file mode 100644 index 00000000000..977c994b110 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_keyed_anchors @@ -0,0 +1 @@ +Ð ØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_zero_fee_commitments new file mode 100644 index 00000000000..c11308e079e --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_zero_fee_commitments @@ -0,0 +1 @@ +Ð ØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_drain_a_only b/fuzz/test_cases/chanmon_consistency/fc_drain_a_only new file mode 100644 index 0000000000000000000000000000000000000000..0b2e2d9f9ec9c46c34d8aca48afb11a0bac1872a GIT binary patch literal 7 OcmZR$aO2LMJO2R@+66oS literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_drain_a_only_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_drain_a_only_keyed_anchors new file mode 100644 index 00000000000..fb230c9d737 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_drain_a_only_keyed_anchors @@ -0,0 +1 @@ +ÐØÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_drain_a_only_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_drain_a_only_zero_fee_commitments new file mode 100644 index 00000000000..745714790dd --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_drain_a_only_zero_fee_commitments @@ -0,0 +1 @@ +ÐØÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close new file mode 100644 index 00000000000..90215e173af --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close @@ -0,0 +1 @@ +)4ÿ=ÀѶÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close_zero_fee_commitments new file mode 100644 index 00000000000..915dda2eb20 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close_zero_fee_commitments @@ -0,0 +1 @@ +?4ÿ=ÀѶÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_39b47f b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_39b47f new file mode 100644 index 00000000000..04b31004a35 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_39b47f @@ -0,0 +1 @@ +ÓssвÿÓ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_ed278d b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_ed278d new file mode 100644 index 00000000000..dad6421e159 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_ed278d @@ -0,0 +1 @@ +ÓssвÿÓ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_during_reconnect b/fuzz/test_cases/chanmon_consistency/fc_during_reconnect new file mode 100644 index 0000000000000000000000000000000000000000..fa91c56dd57c27a7d09026c131bd07d60af03dc5 GIT binary patch literal 13 VcmZSJ;kzIpapUH#J9qB<2LK|X2QdHu literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_during_reconnect_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_during_reconnect_keyed_anchors new file mode 100644 index 00000000000..9d325043b08 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_during_reconnect_keyed_anchors @@ -0,0 +1 @@ + ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_during_reconnect_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_during_reconnect_zero_fee_commitments new file mode 100644 index 00000000000..5ed28383592 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_during_reconnect_zero_fee_commitments @@ -0,0 +1 @@ + ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_dust_htlcs b/fuzz/test_cases/chanmon_consistency/fc_dust_htlcs new file mode 100644 index 0000000000000000000000000000000000000000..22e65fb3d4d29503573145a0fe55c4a1feb608ae GIT binary patch literal 19 bcmZRGh>VIBkPwhml)P~9#?4#z?)?Vrr(J9qB<2LL4m2S)$^ literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_keyed_anchors new file mode 100644 index 00000000000..44bfd92caef --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_keyed_anchors @@ -0,0 +1 @@ +ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_zero_fee_commitments new file mode 100644 index 00000000000..69689cecd75 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_zero_fee_commitments @@ -0,0 +1 @@ +ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay b/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay new file mode 100644 index 0000000000000000000000000000000000000000..604a284a048544ac8c72956c8fd9b74698c31094 GIT binary patch literal 17 ZcmZQD5RecMlT*K7c;n`+J9qB<2LLL}2jc($ literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_keyed_anchors new file mode 100644 index 00000000000..813d703da99 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_keyed_anchors @@ -0,0 +1 @@ +0'Ð1ØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_zero_fee_commitments new file mode 100644 index 00000000000..91a7822c238 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_zero_fee_commitments @@ -0,0 +1 @@ +0'Ð1ØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_pending_cache_without_observable_lifecycle_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_pending_cache_without_observable_lifecycle_zero_fee_commitments new file mode 100644 index 00000000000..149317919a3 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_pending_cache_without_observable_lifecycle_zero_fee_commitments @@ -0,0 +1 @@ +ÿÿÿÿ·Ïùÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_pending_monitor b/fuzz/test_cases/chanmon_consistency/fc_pending_monitor new file mode 100644 index 0000000000000000000000000000000000000000..ed197c810f12c5699150264790926c8c73c95070 GIT binary patch literal 15 XcmZQzFc6Tqz`=Rr=B+z-?)(P;B=QGX literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_pending_monitor_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_pending_monitor_keyed_anchors new file mode 100644 index 0000000000000000000000000000000000000000..89b4d1eca9f4ba9a9d66aff40dfdbb854c3ad2b5 GIT binary patch literal 15 Xcmd;JFc6Tqz`=Rr=B+z-?)(P;C29v? literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_pending_monitor_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_pending_monitor_zero_fee_commitments new file mode 100644 index 0000000000000000000000000000000000000000..b7dfc3c50127d57805f2f588a8291985552c3165 GIT binary patch literal 15 XcmWe&Fc6Tqz`=Rr=B+z-?)(P;CE^EY literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_skips_zero_outbound_limit_channel b/fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_skips_zero_outbound_limit_channel new file mode 100644 index 00000000000..7f191fe194b --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_skips_zero_outbound_limit_channel @@ -0,0 +1 @@ +ÿÿºúÿÿÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_uses_advertised_sendable_range b/fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_uses_advertised_sendable_range new file mode 100644 index 00000000000..0d5352b88ae --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_uses_advertised_sendable_range @@ -0,0 +1,2 @@ + +ÿ´búÿÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_rapid_fire b/fuzz/test_cases/chanmon_consistency/fc_rapid_fire new file mode 100644 index 0000000000000000000000000000000000000000..cf6df985d74d27e4fc3f6b1de87cb6f6a1d54b69 GIT binary patch literal 12 UcmZQDxL|bg#?4!I?%eqg04jk9A^-pY literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_rapid_fire_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_rapid_fire_keyed_anchors new file mode 100644 index 00000000000..01035d5c146 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_rapid_fire_keyed_anchors @@ -0,0 +1 @@ +0Ð2ÑØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_rapid_fire_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_rapid_fire_zero_fee_commitments new file mode 100644 index 00000000000..a5d2a725f74 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_rapid_fire_zero_fee_commitments @@ -0,0 +1 @@ +0Ð2ÑØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_reconnect b/fuzz/test_cases/chanmon_consistency/fc_reconnect new file mode 100644 index 0000000000000000000000000000000000000000..d0a2f5dc15227c19183b0de7b4b7cfe8b0d099f6 GIT binary patch literal 13 VcmZR$z{4jXapUH#J9qB<2LL1}2QdHu literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_reconnect_broadcast_announcements b/fuzz/test_cases/chanmon_consistency/fc_reconnect_broadcast_announcements new file mode 100644 index 00000000000..9547b711ba8 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_reconnect_broadcast_announcements @@ -0,0 +1 @@ +µ¸ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_reconnect_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_reconnect_keyed_anchors new file mode 100644 index 00000000000..77ac690a59a --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_reconnect_keyed_anchors @@ -0,0 +1 @@ +Ð ØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_reconnect_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_reconnect_zero_fee_commitments new file mode 100644 index 00000000000..a76076c83c3 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_reconnect_zero_fee_commitments @@ -0,0 +1 @@ +Ð ØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel b/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel new file mode 100644 index 0000000000000000000000000000000000000000..2cecd41dd26ead926c526969e139f66f8969e77b GIT binary patch literal 11 TcmZR$aN)v@o44-Vx$_?YE%peH literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_keyed_anchors new file mode 100644 index 00000000000..9d1ae851c44 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_keyed_anchors @@ -0,0 +1 @@ +ÐÐÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_zero_fee_commitments new file mode 100644 index 00000000000..6fde55ccdb7 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_zero_fee_commitments @@ -0,0 +1 @@ +ÐÐÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_restart_claimed_payment_stale_monitor_replay b/fuzz/test_cases/chanmon_consistency/fc_restart_claimed_payment_stale_monitor_replay new file mode 100644 index 00000000000..9af74b8d826 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_restart_claimed_payment_stale_monitor_replay @@ -0,0 +1 @@ +4Â4аÜaÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_restart_in_progress_chain_sync_replay b/fuzz/test_cases/chanmon_consistency/fc_restart_in_progress_chain_sync_replay new file mode 100644 index 00000000000..38181bf293f --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_restart_in_progress_chain_sync_replay @@ -0,0 +1 @@ +14Â4аÓÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_restart_mid_resolve b/fuzz/test_cases/chanmon_consistency/fc_restart_mid_resolve new file mode 100644 index 0000000000000000000000000000000000000000..0517320bc3380cd876cedb26e79a836721db66e8 GIT binary patch literal 10 ScmZR$aO38!I~(rY`40dkd literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/force_close_basic_async b/fuzz/test_cases/chanmon_consistency/force_close_basic_async new file mode 100644 index 00000000000..086ce5b53fe --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_basic_async @@ -0,0 +1 @@ +ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_basic_async_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_basic_async_keyed_anchors new file mode 100644 index 00000000000..55d8227b650 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_basic_async_keyed_anchors @@ -0,0 +1 @@ +ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_basic_async_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_basic_async_zero_fee_commitments new file mode 100644 index 00000000000..4f375bcbcc8 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_basic_async_zero_fee_commitments @@ -0,0 +1 @@ +ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_basic_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_basic_keyed_anchors new file mode 100644 index 00000000000..87788e516d1 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_basic_keyed_anchors @@ -0,0 +1 @@ +ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_basic_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_basic_zero_fee_commitments new file mode 100644 index 00000000000..686c55e6e8d --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_basic_zero_fee_commitments @@ -0,0 +1 @@ +ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_both_directions b/fuzz/test_cases/chanmon_consistency/force_close_both_directions new file mode 100644 index 0000000000000000000000000000000000000000..c55d73896f8652ff99bccefb8c203b6aca911c2a GIT binary patch literal 10 ScmZR$aPh{?TX*i<`40dk-3Pz` literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async new file mode 100644 index 00000000000..4937e12b5e2 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async @@ -0,0 +1 @@ +ÐÑØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_keyed_anchors new file mode 100644 index 00000000000..868c75adb90 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_keyed_anchors @@ -0,0 +1 @@ +ÐÑØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_zero_fee_commitments new file mode 100644 index 00000000000..0f3c204b38e --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_zero_fee_commitments @@ -0,0 +1 @@ +ÐÑØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_both_directions_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_keyed_anchors new file mode 100644 index 00000000000..f5fd80c55fb --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_keyed_anchors @@ -0,0 +1 @@ +ÐÑØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_both_directions_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_zero_fee_commitments new file mode 100644 index 00000000000..d22536577df --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_zero_fee_commitments @@ -0,0 +1 @@ +ÐÑØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height new file mode 100644 index 0000000000000000000000000000000000000000..9936534a4758c909218399db60fdfb75b12faeaa GIT binary patch literal 11 ScmZQD5RecMle=)^<}CmX83XtL literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async new file mode 100644 index 00000000000..11a097db35c --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async @@ -0,0 +1 @@ +0  ÐØÙÚ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_keyed_anchors new file mode 100644 index 00000000000..846b036955d --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_keyed_anchors @@ -0,0 +1 @@ +0  ÐØÙÚ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_zero_fee_commitments new file mode 100644 index 00000000000..2ba86884cb3 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_zero_fee_commitments @@ -0,0 +1 @@ +0  ÐØÙÚ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_keyed_anchors new file mode 100644 index 00000000000..7e59be8feaa --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_keyed_anchors @@ -0,0 +1 @@ +0ÐØÙÚ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_zero_fee_commitments new file mode 100644 index 00000000000..d852e18e121 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_zero_fee_commitments @@ -0,0 +1 @@ +0ÐØÙÚ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved b/fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved new file mode 100644 index 0000000000000000000000000000000000000000..73b498f3c647ce882aa42003e39a1c5e1633509a GIT binary patch literal 11 ScmZQD5RecMle=)^-aP;fCj$t11JCh literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async new file mode 100644 index 00000000000..f2961702e79 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async @@ -0,0 +1,4 @@ +<   + ! + '' ! + Ðÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_keyed_anchors new file mode 100644 index 00000000000..babb7953068 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_keyed_anchors @@ -0,0 +1,4 @@ +<   + ! + '' ! + Ðÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_zero_fee_commitments new file mode 100644 index 00000000000..10fdc572a11 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_zero_fee_commitments @@ -0,0 +1,4 @@ +<   + ! + '' ! + Ðÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_keyed_anchors new file mode 100644 index 00000000000..d0d5126f551 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_keyed_anchors @@ -0,0 +1 @@ +<!''!Ðÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_zero_fee_commitments new file mode 100644 index 00000000000..50ba2ea9183 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_zero_fee_commitments @@ -0,0 +1 @@ +<!''!Ðÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmanager_19484 b/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmanager_19484 new file mode 100644 index 00000000000..577f5f71cd0 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmanager_19484 @@ -0,0 +1 @@ +„pк \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmanager_9836 b/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmanager_9836 new file mode 100644 index 0000000000000000000000000000000000000000..14487ba70cd7971f641f8961d0d9e4dd2362284f GIT binary patch literal 13 UcmZRu5tI~^msFHgSHJfk01M#*6#xJL literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmonitor_2727 b/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmonitor_2727 new file mode 100644 index 00000000000..a1852414d9c --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmonitor_2727 @@ -0,0 +1 @@ +@ÿÿÿÿÿÿÿÿÿÿÿÿÿÜÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/ldk_crash_onchaintx_1025 b/fuzz/test_cases/chanmon_consistency/ldk_crash_onchaintx_1025 new file mode 100644 index 0000000000000000000000000000000000000000..982ac748a974a353683dce6ccbfd5bc4b1b935f3 GIT binary patch literal 14 VcmZRu5tO`e@Be>EMag?={{bmn25A5Q literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/ldk_crash_onchaintx_913 b/fuzz/test_cases/chanmon_consistency/ldk_crash_onchaintx_913 new file mode 100644 index 00000000000..6c32f6a71aa --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/ldk_crash_onchaintx_913 @@ -0,0 +1 @@ +ÒppqpÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/ldk_crash_signer_395 b/fuzz/test_cases/chanmon_consistency/ldk_crash_signer_395 new file mode 100644 index 00000000000..2fb6ec95740 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/ldk_crash_signer_395 @@ -0,0 +1 @@ +"qqqqqqqÿÿÿÿÿÿÞÞµÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/timeout-0103befb3dc5aa050668752668d04e85bd1fc14e b/fuzz/test_cases/chanmon_consistency/timeout-0103befb3dc5aa050668752668d04e85bd1fc14e new file mode 100644 index 0000000000000000000000000000000000000000..f5c015c80af9ac48a7b07e6750ceb9a37f6bf2b2 GIT binary patch literal 24 gcmZRu5R_!F5tI~^msFHYxTnVOf6d0N_x}F}06Yx{od5s; literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-05fc1bb98f2a3b29e826a4de636474de0b23c895 b/fuzz/test_cases/chanmon_consistency/timeout-05fc1bb98f2a3b29e826a4de636474de0b23c895 new file mode 100644 index 0000000000000000000000000000000000000000..e63b29d1bd588160c7f213a43693cce2daca74dd GIT binary patch literal 25 YcmdXK2aVHS}C$T2~(EtCl|D*q-v9$l8g%2YD literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-87b55c5b37383fe43420089fd3e8ccecbb034b44 b/fuzz/test_cases/chanmon_consistency/timeout-87b55c5b37383fe43420089fd3e8ccecbb034b44 new file mode 100644 index 00000000000..a9600d0cbf4 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/timeout-87b55c5b37383fe43420089fd3e8ccecbb034b44 @@ -0,0 +1 @@ +@: !<:: !' !''Ú Þÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/timeout-885f446335ae279baed408d42af8c398dfdb8c9b b/fuzz/test_cases/chanmon_consistency/timeout-885f446335ae279baed408d42af8c398dfdb8c9b new file mode 100644 index 00000000000..b9ee3e25f13 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/timeout-885f446335ae279baed408d42af8c398dfdb8c9b @@ -0,0 +1 @@ +88888£'Ðÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/timeout-8a81e4c066465a2975ef22625c0b91da6332a2c8 b/fuzz/test_cases/chanmon_consistency/timeout-8a81e4c066465a2975ef22625c0b91da6332a2c8 new file mode 100644 index 0000000000000000000000000000000000000000..735c8da14eb48e8954d925975ba2a408f04c127e GIT binary patch literal 24 fcmdm@7{j^QF#cm literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-8f2aebf3aeeb70d8edd39a886e30beb770f3b42b b/fuzz/test_cases/chanmon_consistency/timeout-8f2aebf3aeeb70d8edd39a886e30beb770f3b42b new file mode 100644 index 0000000000000000000000000000000000000000..d20f7e163f8421195cbda7574a6d90d16792e144 GIT binary patch literal 23 dcmZRu5tI~^{|^L`it3W;>KAU@ymjy1e*kHe3vmDd literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-91d97d9eea2bd59f746681ad822488262e832ff1 b/fuzz/test_cases/chanmon_consistency/timeout-91d97d9eea2bd59f746681ad822488262e832ff1 new file mode 100644 index 0000000000000000000000000000000000000000..77a897d89de4a5e40c5c3a1035890a603fc64c3b GIT binary patch literal 26 acmZRu5tI~^{|^U}ijo&^+`O}5#eV?7*$|BY literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-95a90908391d3398084b77eb11ff5c9d7fdde008 b/fuzz/test_cases/chanmon_consistency/timeout-95a90908391d3398084b77eb11ff5c9d7fdde008 new file mode 100644 index 0000000000000000000000000000000000000000..d85e723aaa1295d624ed77e1a22fcfb8a6042a74 GIT binary patch literal 24 gcmZRu5R_y%At)&*FR3V*a8Hfl|C)_k@BRM|07h{MDF6Tf literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-a31cdfc423211489c841a6ddd067f9e6cf5bed4b b/fuzz/test_cases/chanmon_consistency/timeout-a31cdfc423211489c841a6ddd067f9e6cf5bed4b new file mode 100644 index 0000000000000000000000000000000000000000..d0e80d1f6497081170669bf2b7504fd06ab7f028 GIT binary patch literal 21 dcmZSJ6SR_-RFqU#zxD5)*Z=LCkM8ex2LMDL2;Bex literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-b1c4840ea1279dd8d6080d79373ae55bbcad3061 b/fuzz/test_cases/chanmon_consistency/timeout-b1c4840ea1279dd8d6080d79373ae55bbcad3061 new file mode 100644 index 0000000000000000000000000000000000000000..50a625cb0ed0e5bea991996d2115a46175e284f4 GIT binary patch literal 25 ccmZRu`TzgFiJ+vQyriO}`Yn-r|Jfh_0CJB9Hvj+t literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-b6ef84eec94d70bbc385c98c4ab0bac77da00a2f b/fuzz/test_cases/chanmon_consistency/timeout-b6ef84eec94d70bbc385c98c4ab0bac77da00a2f new file mode 100644 index 00000000000..59319574a0e --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/timeout-b6ef84eec94d70bbc385c98c4ab0bac77da00a2f @@ -0,0 +1 @@ +<: !''<:Ý !''ÚÞÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/timeout-bae8693182b102dfebab143a0f48992dad76245d b/fuzz/test_cases/chanmon_consistency/timeout-bae8693182b102dfebab143a0f48992dad76245d new file mode 100644 index 0000000000000000000000000000000000000000..db79f209c2272dcb30fa1f220e3371a6dde9c440 GIT binary patch literal 23 dcmZRu5tI~^{|^L`it3W;>KAUP-@142KLBPL3cmmV literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-bcab049322729e275e3bbdacebc633495da7643f b/fuzz/test_cases/chanmon_consistency/timeout-bcab049322729e275e3bbdacebc633495da7643f new file mode 100644 index 0000000000000000000000000000000000000000..d774ccb3b2d39456f76b9e4ffa3e96ed2bd46038 GIT binary patch literal 26 icmZRu5tI}Zw33%plvG#0bx+Z&;QyBGk{55>`wswBRtg^g literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-d5afdff02a253c9f2fbce95cbaf730eb210128fa b/fuzz/test_cases/chanmon_consistency/timeout-d5afdff02a253c9f2fbce95cbaf730eb210128fa new file mode 100644 index 00000000000..7b8cb7b07ab --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/timeout-d5afdff02a253c9f2fbce95cbaf730eb210128fa @@ -0,0 +1 @@ +888888'Ðÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/timeout-d6494f068fb2b2d31f1ac8627752692b3c8b7d2f b/fuzz/test_cases/chanmon_consistency/timeout-d6494f068fb2b2d31f1ac8627752692b3c8b7d2f new file mode 100644 index 00000000000..ee690802eff --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/timeout-d6494f068fb2b2d31f1ac8627752692b3c8b7d2f @@ -0,0 +1 @@ +<<!!RÑØÙÚÞÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/timeout-edd3f8168217501dd93f3c24d09c2c095cdf7784 b/fuzz/test_cases/chanmon_consistency/timeout-edd3f8168217501dd93f3c24d09c2c095cdf7784 new file mode 100644 index 0000000000000000000000000000000000000000..9d78147299487bab8c60a29b319b80926ca57ae8 GIT binary patch literal 14 VcmZRu5tI~^msFHgza?_-KL8LR1Ka=r literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-fcbcc131184e33d5b000820b0972f6197b0801d2 b/fuzz/test_cases/chanmon_consistency/timeout-fcbcc131184e33d5b000820b0972f6197b0801d2 new file mode 100644 index 00000000000..df65f08fe80 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/timeout-fcbcc131184e33d5b000820b0972f6197b0801d2 @@ -0,0 +1 @@ +<: !''<: !''ÚÞÞÿ \ No newline at end of file From 89335dce3c6b5cda35e2ba5d58743daf0df6d23b Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 11:35:48 +0200 Subject: [PATCH 18/30] lightning: relax fuzz signing expectations Allow fuzz builds to use cheaper ECDSA signatures and skip assertions that rely on production signature sizes. The fuzz signer intentionally trades realistic DER lengths for speed, so weight lower-bound checks must not assume low-R signatures there. --- lightning/src/crypto/utils.rs | 16 +++++++---- lightning/src/events/bump_transaction/mod.rs | 28 ++++++++++++-------- lightning/src/sign/mod.rs | 2 ++ 3 files changed, 30 insertions(+), 16 deletions(-) diff --git a/lightning/src/crypto/utils.rs b/lightning/src/crypto/utils.rs index 88911b0baf8..8b2737fa8e9 100644 --- a/lightning/src/crypto/utils.rs +++ b/lightning/src/crypto/utils.rs @@ -67,7 +67,7 @@ pub fn hkdf_extract_expand_7x( #[inline] pub fn sign(ctx: &Secp256k1, msg: &Message, sk: &SecretKey) -> Signature { #[cfg(feature = "grind_signatures")] - let sig = ctx.sign_ecdsa_low_r(msg, sk); + let sig = if cfg!(fuzzing) { ctx.sign_ecdsa(msg, sk) } else { ctx.sign_ecdsa_low_r(msg, sk) }; #[cfg(not(feature = "grind_signatures"))] let sig = ctx.sign_ecdsa(msg, sk); sig @@ -79,10 +79,16 @@ pub fn sign_with_aux_rand( ctx: &Secp256k1, msg: &Message, sk: &SecretKey, entropy_source: &ES, ) -> Signature { #[cfg(feature = "grind_signatures")] - let sig = loop { - let sig = ctx.sign_ecdsa_with_noncedata(msg, sk, &entropy_source.get_secure_random_bytes()); - if sig.serialize_compact()[0] < 0x80 { - break sig; + let sig = { + if cfg!(fuzzing) { + return sign(ctx, msg, sk); + } + loop { + let sig = + ctx.sign_ecdsa_with_noncedata(msg, sk, &entropy_source.get_secure_random_bytes()); + if sig.serialize_compact()[0] < 0x80 { + break sig; + } } }; #[cfg(all(not(feature = "grind_signatures"), not(ldk_test_vectors)))] diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 6a5e9948653..22c70fd4d61 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -480,11 +480,15 @@ impl= signed_tx_weight); + // When fuzzing, signatures are trivially small so the actual weight can be + // significantly less than estimated. Skip the lower-bound check. + #[cfg(not(fuzzing))] assert!(expected_signed_tx_weight * 99 / 100 <= signed_tx_weight); let expected_package_fee = Amount::from_sat(fee_for_weight( @@ -629,10 +633,10 @@ impl(); - #[cfg(debug_assertions)] + #[cfg(all(debug_assertions, not(fuzzing)))] let must_spend_amount = must_spend.iter().map(|input| input.previous_utxo.value.to_sat()).sum::(); @@ -663,13 +667,13 @@ impl= signed_tx_weight); + // When fuzzing, signatures are trivially small so the actual weight can be + // significantly less than estimated. Skip the lower-bound check. assert!(expected_signed_tx_weight * 98 / 100 <= signed_tx_weight); let expected_signed_tx_fee = diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index a3dc72042cc..817e219d53b 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -75,6 +75,8 @@ pub mod tx_builder; pub(crate) const COMPRESSED_PUBLIC_KEY_SIZE: usize = bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE; +// Standard low-S ECDSA signatures fit in the secp256k1 DER bound; the appended sighash byte +// replaces the extra DER padding byte that a high-S signature could require. pub(crate) const MAX_STANDARD_SIGNATURE_SIZE: usize = bitcoin::secp256k1::constants::MAX_SIGNATURE_SIZE; From fdfe81d7ef10f910cc2593efd63485f0259b8d7d Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 5 May 2026 09:50:37 +0200 Subject: [PATCH 19/30] fuzz: seed chanmon_consistency wallets on chain Seed each harness wallet with confirmed coinbase outputs before building channels. The modeled chain now includes the wallet funding, so later chain checks can require transactions to spend existing outputs. --- fuzz/src/chanmon_consistency.rs | 47 ++++++++++++++++----------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index dfaa7d97387..a7ce7849970 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -2189,20 +2189,24 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { let wallet_b = TestWalletSource::new(SecretKey::from_slice(&[2; 32]).unwrap()); let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); let wallets = [&wallet_a, &wallet_b, &wallet_c]; - let coinbase_tx = bitcoin::Transaction { - version: bitcoin::transaction::Version::TWO, - lock_time: bitcoin::absolute::LockTime::ZERO, - input: vec![bitcoin::TxIn { ..Default::default() }], - output: wallets - .iter() - .map(|wallet| TxOut { - value: Amount::from_sat(100_000), - script_pubkey: wallet.get_change_script().unwrap(), - }) - .collect(), - }; + let mut chain_state = ChainState::new(); + let num_wallet_utxos = 50; for (idx, wallet) in wallets.iter().enumerate() { - wallet.add_utxo(coinbase_tx.clone(), idx as u32); + let coinbase_tx = bitcoin::Transaction { + version: bitcoin::transaction::Version(idx as i32 + 100), + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![bitcoin::TxIn { ..Default::default() }], + output: (0..num_wallet_utxos) + .map(|_| TxOut { + value: Amount::from_sat(100_000), + script_pubkey: wallet.get_change_script().unwrap(), + }) + .collect(), + }; + for vout in 0..num_wallet_utxos { + wallet.add_utxo(coinbase_tx.clone(), vout); + } + chain_state.confirm_tx(coinbase_tx); } let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); @@ -2249,7 +2253,6 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { chan_type, ), ]; - let mut chain_state = ChainState::new(); // Connect peers first, then create channels. connect_peers(&nodes[0], &nodes[1]); @@ -2270,16 +2273,12 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { make_channel(&mut nodes, 1, 2, 5, true, false, &mut chain_state); make_channel(&mut nodes, 1, 2, 6, false, false, &mut chain_state); - // Wipe the transactions-broadcasted set to make sure we don't broadcast - // any transactions during normal operation after setup. - nodes[0].broadcaster.txn_broadcasted.borrow_mut().clear(); - nodes[1].broadcaster.txn_broadcasted.borrow_mut().clear(); - nodes[2].broadcaster.txn_broadcasted.borrow_mut().clear(); - - // Sync all nodes to tip to lock the funding. - nodes[0].sync_with_chain_state(&chain_state, None); - nodes[1].sync_with_chain_state(&chain_state, None); - nodes[2].sync_with_chain_state(&chain_state, None); + for node in &nodes { + node.broadcaster.txn_broadcasted.borrow_mut().clear(); + } + for node in &mut nodes { + node.sync_with_chain_state(&chain_state, None); + } lock_fundings(&nodes); From 37ef991f75aa7228aac8fb9c0ac0d290bfdb52b8 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 5 May 2026 09:52:15 +0200 Subject: [PATCH 20/30] fuzz: validate chanmon_consistency chain spends Track confirmed UTXOs in the harness chain state and require transactions to spend outputs that exist and remain unspent. Also reject absolute-height timelocks before they mature, while preserving the obscured commitment number encoding used by commitment transactions. --- fuzz/src/chanmon_consistency.rs | 74 ++++++++++++++++++++++++--------- 1 file changed, 55 insertions(+), 19 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index a7ce7849970..9e9ec4c4a01 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -27,6 +27,7 @@ use bitcoin::script::{Builder, ScriptBuf}; use bitcoin::transaction::Version; use bitcoin::transaction::{Transaction, TxOut}; use bitcoin::FeeRate; +use bitcoin::OutPoint as BitcoinOutPoint; use bitcoin::block::Header; use bitcoin::hash_types::Txid; @@ -186,6 +187,11 @@ struct ChainState { /// Unconfirmed transactions (e.g., splice txs). Conflicting RBF candidates may coexist; /// `confirm_pending_txs` determines which one confirms. pending_txs: Vec<(Txid, Transaction)>, + /// Tracks unspent outputs created by confirmed transactions. Only + /// transactions that spend existing UTXOs can be confirmed, which + /// prevents fuzz hash collisions from creating phantom spends of + /// outputs that were never actually created. + utxos: HashSet, } impl ChainState { @@ -196,6 +202,7 @@ impl ChainState { blocks: vec![(genesis_header, Vec::new())], confirmed_txids: HashSet::new(), pending_txs: Vec::new(), + utxos: HashSet::new(), } } @@ -203,21 +210,57 @@ impl ChainState { (self.blocks.len() - 1) as u32 } - fn is_outpoint_spent(&self, outpoint: &bitcoin::OutPoint) -> bool { - self.blocks.iter().any(|(_, txs)| { - txs.iter().any(|tx| tx.input.iter().any(|input| input.previous_output == *outpoint)) - }) + fn can_confirm_tx( + &self, tx: &Transaction, txid: Txid, utxos: &HashSet, + ) -> bool { + if self.confirmed_txids.contains(&txid) { + return false; + } + // Reject timelocked transactions before their lock_time, matching + // consensus rules. Commitment txs encode an obscured commitment + // number with bit 29 set, which is not a real timelock. + let lock_time = tx.lock_time.to_consensus_u32(); + if lock_time > 0 + && lock_time < 500_000_000 + && lock_time & (1 << 29) == 0 + && self.tip_height() < lock_time + { + return false; + } + // Validate that all inputs spend existing, unspent outputs. This + // rejects both double-spends and spends of outputs that were never + // created (e.g. due to fuzz txid hash collisions where a different + // transaction was confirmed under the same txid). + let is_coinbase = tx.is_coinbase(); + if !is_coinbase { + for input in &tx.input { + if !utxos.contains(&input.previous_output) { + return false; + } + } + } + true + } + + fn apply_tx_to_utxos(txid: Txid, tx: &Transaction, utxos: &mut HashSet) { + let is_coinbase = tx.is_coinbase(); + if !is_coinbase { + for input in &tx.input { + utxos.remove(&input.previous_output); + } + } + for idx in 0..tx.output.len() { + utxos.insert(BitcoinOutPoint { txid, vout: idx as u32 }); + } } fn confirm_tx(&mut self, tx: Transaction) -> bool { let txid = tx.compute_txid(); - if self.confirmed_txids.contains(&txid) { - return false; - } - if tx.input.iter().any(|input| self.is_outpoint_spent(&input.previous_output)) { + if !self.can_confirm_tx(&tx, txid, &self.utxos) { return false; } self.confirmed_txids.insert(txid); + Self::apply_tx_to_utxos(txid, &tx, &mut self.utxos); let prev_hash = self.blocks.last().unwrap().0.block_hash(); let header = create_dummy_header(prev_hash, 42); @@ -245,21 +288,13 @@ impl ChainState { txs.sort_by_key(|(txid, _)| *txid); let mut confirmed = Vec::new(); - let mut spent_outpoints = Vec::new(); + let mut next_utxos = self.utxos.clone(); for (txid, tx) in txs { - if self.confirmed_txids.contains(&txid) { - continue; - } - if tx.input.iter().any(|input| { - self.is_outpoint_spent(&input.previous_output) - || spent_outpoints.contains(&input.previous_output) - }) { + if !self.can_confirm_tx(&tx, txid, &next_utxos) { continue; } self.confirmed_txids.insert(txid); - for input in &tx.input { - spent_outpoints.push(input.previous_output); - } + Self::apply_tx_to_utxos(txid, &tx, &mut next_utxos); confirmed.push(tx); } @@ -270,6 +305,7 @@ impl ChainState { let prev_hash = self.blocks.last().unwrap().0.block_hash(); let header = create_dummy_header(prev_hash, 42); self.blocks.push((header, confirmed)); + self.utxos = next_utxos; for _ in 0..5 { let prev_hash = self.blocks.last().unwrap().0.block_hash(); From 2927a193ad6f6d555693cdfcb82194437f4d40b7 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 5 May 2026 09:53:06 +0200 Subject: [PATCH 21/30] fuzz: notify chanmon_consistency monitors on sync Notify channel monitors about confirmed transactions and best-block updates while harness nodes catch up to the modeled chain. When advancing across empty blocks, jump directly to the next block that contains transactions while still reporting the intervening best block. --- fuzz/src/chanmon_consistency.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 9e9ec4c4a01..f04c8a44701 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -998,12 +998,31 @@ impl<'a> HarnessNode<'a> { }; while self.height < target_height { - self.height += 1; + let mut next_height = self.height + 1; + while next_height <= target_height && chain_state.block_at(next_height).1.is_empty() { + next_height += 1; + } + if next_height > target_height { + self.height = target_height; + let (header, _) = chain_state.block_at(self.height); + self.monitor.best_block_updated(header, self.height); + self.node.best_block_updated(header, self.height); + break; + } + if next_height > self.height + 1 { + self.height = next_height - 1; + let (header, _) = chain_state.block_at(self.height); + self.monitor.best_block_updated(header, self.height); + self.node.best_block_updated(header, self.height); + } + self.height = next_height; let (header, txn) = chain_state.block_at(self.height); let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); if !txdata.is_empty() { + self.monitor.transactions_confirmed(header, &txdata, self.height); self.node.transactions_confirmed(header, &txdata, self.height); } + self.monitor.best_block_updated(header, self.height); self.node.best_block_updated(header, self.height); } } From cd80726655722a6265d326b9589143a6a7e6ca7c Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:27:33 +0200 Subject: [PATCH 22/30] fuzz: handle chanmon_consistency control messages Teach the harness to deliver additional control and announcement messages emitted during reconnects and timer-driven state changes. This keeps delayed message handling from panicking on valid events that can be produced by the channel manager. --- fuzz/src/chanmon_consistency.rs | 81 +++++++++++++++++++++++---------- 1 file changed, 57 insertions(+), 24 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index f04c8a44701..16c5d66739d 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -806,18 +806,17 @@ type ChanMan<'a> = ChannelManager< #[inline] fn assert_action_timeout_awaiting_response(action: &msgs::ErrorAction) { - // Since sending/receiving messages may be delayed, `timer_tick_occurred` may cause a node to - // disconnect their counterparty if they're expecting a timely response. - assert!( - matches!( - action, - msgs::ErrorAction::DisconnectPeerWithWarning { msg } + // Since sending or receiving messages may be delayed, `timer_tick_occurred` may cause a node + // to disconnect their counterparty if they're expecting a timely response. We may also deliver + // the paired `error` message when one was generated alongside the disconnect. + match action { + msgs::ErrorAction::DisconnectPeerWithWarning { msg } if msg.data.contains("Disconnecting due to timeout awaiting response") - || msg.data.contains("already sent splice_locked, cannot RBF") - ), - "Expected timeout disconnect, got: {:?}", - action, - ); + || msg.data.contains("already sent splice_locked, cannot RBF") => {}, + msgs::ErrorAction::DisconnectPeer { .. } => {}, + msgs::ErrorAction::SendErrorMessage { .. } => {}, + _ => panic!("Unexpected HandleError action {:?}", action), + } } #[derive(Copy, Clone)] @@ -1351,7 +1350,9 @@ impl EventQueues { }, MessageSendEvent::SendChannelReady { .. } | MessageSendEvent::SendAnnouncementSignatures { .. } - | MessageSendEvent::BroadcastChannelUpdate { .. } => continue, + | MessageSendEvent::BroadcastChannelUpdate { .. } + | MessageSendEvent::BroadcastChannelAnnouncement { .. } + | MessageSendEvent::BroadcastNodeAnnouncement { .. } => continue, _ => panic!("Unhandled message event {:?}", event), }; if push_a { @@ -1388,6 +1389,8 @@ impl EventQueues { MessageSendEvent::SendChannelReady { .. } => {}, MessageSendEvent::SendAnnouncementSignatures { .. } => {}, MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + MessageSendEvent::BroadcastChannelAnnouncement { .. } => {}, + MessageSendEvent::BroadcastNodeAnnouncement { .. } => {}, MessageSendEvent::SendChannelUpdate { .. } => {}, MessageSendEvent::HandleError { ref action, .. } => { assert_action_timeout_awaiting_response(action); @@ -1407,6 +1410,8 @@ impl EventQueues { MessageSendEvent::SendChannelReady { .. } => {}, MessageSendEvent::SendAnnouncementSignatures { .. } => {}, MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + MessageSendEvent::BroadcastChannelAnnouncement { .. } => {}, + MessageSendEvent::BroadcastNodeAnnouncement { .. } => {}, MessageSendEvent::SendChannelUpdate { .. } => {}, MessageSendEvent::HandleError { ref action, .. } => { assert_action_timeout_awaiting_response(action); @@ -1984,6 +1989,7 @@ fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; config.channel_handshake_config.announce_for_forwarding = true; + config.channel_handshake_limits.force_announced_channel_preference = false; config.reject_inbound_splices = false; match chan_type { ChanType::Legacy => { @@ -2194,7 +2200,7 @@ fn lock_fundings(nodes: &[HarnessNode<'_>; 3]) { } } } else { - panic!("Wrong event type"); + panic!("Wrong event type in first lock_fundings pass: {:?}", event); } } } @@ -2202,9 +2208,18 @@ fn lock_fundings(nodes: &[HarnessNode<'_>; 3]) { for node in nodes.iter() { let events = node.get_and_clear_pending_msg_events(); for event in events { - if let MessageSendEvent::SendAnnouncementSignatures { .. } = event { - } else { - panic!("Wrong event type"); + match event { + MessageSendEvent::SendAnnouncementSignatures { .. } => {}, + MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { + for dest_node in nodes.iter() { + if dest_node.get_our_node_id() == *node_id { + dest_node.handle_channel_update(node.get_our_node_id(), msg); + } + } + }, + _ => { + panic!("Wrong event type in second lock_fundings pass: {:?}", event); + }, } } } @@ -2626,6 +2641,11 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { None }, MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { + if msg.next_local_commitment_number == 0 + && msg.next_remote_commitment_number == 0 + { + return None; + } let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "channel_reestablish"); nodes[dest_idx].handle_channel_reestablish(source_node_id, msg); @@ -2698,21 +2718,34 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { nodes[dest_idx].handle_splice_locked(source_node_id, msg); None }, - MessageSendEvent::HandleError { ref action, .. } => { + MessageSendEvent::HandleError { ref action, ref node_id } => { assert_action_timeout_awaiting_response(action); + if let msgs::ErrorAction::SendErrorMessage { ref msg } = action { + let dest_idx = find_destination_node(nodes, node_id); + nodes[dest_idx].handle_error(source_node_id, msg); + } None }, - MessageSendEvent::SendChannelReady { .. } - | MessageSendEvent::SendAnnouncementSignatures { .. } - | MessageSendEvent::SendChannelUpdate { .. } => { - // Can be generated as a reestablish response. + MessageSendEvent::SendChannelReady { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "channel_ready"); + nodes[dest_idx].handle_channel_ready(source_node_id, msg); None }, - MessageSendEvent::BroadcastChannelUpdate { .. } => { - // Can be generated as a result of calling `timer_tick_occurred` enough - // times while peers are disconnected. + MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => { + let dest_idx = + log_peer_message(node_idx, node_id, nodes, out, "announcement_signatures"); + nodes[dest_idx].handle_announcement_signatures(source_node_id, msg); + None + }, + MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { + let dest_idx = + log_peer_message(node_idx, node_id, nodes, out, "channel_update"); + nodes[dest_idx].handle_channel_update(source_node_id, msg); None }, + MessageSendEvent::BroadcastChannelUpdate { .. } => None, + MessageSendEvent::BroadcastChannelAnnouncement { .. } => None, + MessageSendEvent::BroadcastNodeAnnouncement { .. } => None, _ => panic!("Unhandled message event {:?}", event), } } From 1bb99a4dac360a101967a29856e00b4a1911e5be Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:28:51 +0200 Subject: [PATCH 23/30] fuzz: relax chanmon_consistency closure assumptions Stop treating every channel close or broadcast transaction as an immediate invariant failure. Later commits add explicit force-close coverage, so the baseline harness must allow channels and broadcaster queues to reflect closure progress. --- fuzz/src/chanmon_consistency.rs | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 16c5d66739d..4e3831f4961 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -15,8 +15,7 @@ //! actions such as sending payments, handling events, or changing monitor update return values on //! a per-node basis. This should allow it to find any cases where the ordering of actions results //! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or -//! send-side handling is correct, other peers. We consider it a failure if any action results in a -//! channel being force-closed. +//! send-side handling is correct, other peers. use bitcoin::amount::Amount; use bitcoin::constants::genesis_block; @@ -754,8 +753,6 @@ impl SignerProvider for KeyProvider { } } -// Since this fuzzer is only concerned with live-channel operations, we don't need to worry about -// any signer operations that come after a force close. const SUPPORTED_SIGNER_OPS: [SignerOp; 3] = [ SignerOp::SignCounterpartyCommitment, SignerOp::GetPerCommitmentPoint, @@ -2009,14 +2006,12 @@ fn build_node_config(chan_type: ChanType) -> UserConfig { } fn assert_test_invariants(nodes: &[HarnessNode<'_>; 3]) { - assert_eq!(nodes[0].list_channels().len(), 3); - assert_eq!(nodes[1].list_channels().len(), 6); - assert_eq!(nodes[2].list_channels().len(), 3); - - // All broadcasters should be empty. Broadcast transactions are handled explicitly. - assert!(nodes[0].broadcaster.txn_broadcasted.borrow().is_empty()); - assert!(nodes[1].broadcaster.txn_broadcasted.borrow().is_empty()); - assert!(nodes[2].broadcaster.txn_broadcasted.borrow().is_empty()); + assert!(nodes[0].list_channels().len() <= 3); + assert!(nodes[1].list_channels().len() <= 6); + assert!(nodes[2].list_channels().len() <= 3); + for node in nodes { + node.broadcaster.txn_broadcasted.borrow_mut().clear(); + } } fn connect_peers(source: &ChanMan<'_>, dest: &ChanMan<'_>) { From 2214cfd9d4c045b209c6d445129a5da4982210ee Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:41:14 +0200 Subject: [PATCH 24/30] fuzz: sync chanmon_consistency pending tx wallets Return the transactions confirmed from the pending pool and apply their effects to the harness wallets. This keeps wallet UTXO state aligned with the fake chain when splice or other pending transactions are mined by fuzz input. --- fuzz/src/chanmon_consistency.rs | 43 +++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 4e3831f4961..0b26c8ced4e 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -282,7 +282,7 @@ impl ChainState { /// Confirm pending transactions in a single block, selecting deterministically among /// conflicting RBF candidates. Sorting by txid ensures the winner is determined by fuzz input /// content. Transactions that double-spend an already-confirmed outpoint are skipped. - fn confirm_pending_txs(&mut self) { + fn confirm_pending_txs(&mut self) -> Vec { let mut txs = std::mem::take(&mut self.pending_txs); txs.sort_by_key(|(txid, _)| *txid); @@ -298,11 +298,12 @@ impl ChainState { } if confirmed.is_empty() { - return; + return Vec::new(); } let prev_hash = self.blocks.last().unwrap().0.block_hash(); let header = create_dummy_header(prev_hash, 42); + let confirmed_txs = confirmed.clone(); self.blocks.push((header, confirmed)); self.utxos = next_utxos; @@ -311,6 +312,7 @@ impl ChainState { let header = create_dummy_header(prev_hash, 42); self.blocks.push((header, Vec::new())); } + confirmed_txs } fn block_at(&self, height: u32) -> &(Header, Vec) { @@ -3032,6 +3034,31 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { } made_progress } + + fn confirm_pending_txs_and_sync_wallets(&mut self) -> bool { + let confirmed_txs = self.chain_state.confirm_pending_txs(); + for tx in &confirmed_txs { + sync_wallets_with_confirmed_tx( + [&self.nodes[0].wallet, &self.nodes[1].wallet, &self.nodes[2].wallet].as_slice(), + tx, + ); + } + !confirmed_txs.is_empty() + } +} + +fn sync_wallets_with_confirmed_tx(wallets: &[&TestWalletSource], tx: &Transaction) { + for wallet in wallets { + let change_script = wallet.get_change_script().unwrap(); + for input in &tx.input { + wallet.remove_utxo(input.previous_output); + } + for (vout, output) in tx.output.iter().enumerate() { + if output.script_pubkey == change_script { + wallet.add_utxo(tx.clone(), vout as u32); + } + } + } } #[inline] @@ -3257,28 +3284,28 @@ pub fn do_test(data: &[u8], out: Out) { // Sync node by 1 block to cover confirmation of a transaction. 0xa8 => { - harness.chain_state.confirm_pending_txs(); + harness.confirm_pending_txs_and_sync_wallets(); harness.nodes[0].sync_with_chain_state(&harness.chain_state, Some(1)); }, 0xa9 => { - harness.chain_state.confirm_pending_txs(); + harness.confirm_pending_txs_and_sync_wallets(); harness.nodes[1].sync_with_chain_state(&harness.chain_state, Some(1)); }, 0xaa => { - harness.chain_state.confirm_pending_txs(); + harness.confirm_pending_txs_and_sync_wallets(); harness.nodes[2].sync_with_chain_state(&harness.chain_state, Some(1)); }, // Sync node to chain tip to cover confirmation of a transaction post-reorg-risk. 0xab => { - harness.chain_state.confirm_pending_txs(); + harness.confirm_pending_txs_and_sync_wallets(); harness.nodes[0].sync_with_chain_state(&harness.chain_state, None); }, 0xac => { - harness.chain_state.confirm_pending_txs(); + harness.confirm_pending_txs_and_sync_wallets(); harness.nodes[1].sync_with_chain_state(&harness.chain_state, None); }, 0xad => { - harness.chain_state.confirm_pending_txs(); + harness.confirm_pending_txs_and_sync_wallets(); harness.nodes[2].sync_with_chain_state(&harness.chain_state, None); }, From aa80e5e9609b4fbce9b428b071c9ea117253c409 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:41:43 +0200 Subject: [PATCH 25/30] fuzz: regularize chanmon_consistency signer controls Map the primary signer-unblock opcodes so nodes 0, 1, and 2 each get distinct controls for counterparty commitment signing, per-commitment points, and commitment secret release. Make node B's primary signer-unblock controls retry all pending channels once an operation is available, while keeping the older channel-specific release-secret controls for now. This avoids duplicated node coverage and makes the byte controls easier to reason about in reduced test cases. --- fuzz/src/chanmon_consistency.rs | 40 +++++++++++++++------------------ 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 0b26c8ced4e..7eaef64b315 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -3338,54 +3338,50 @@ pub fn do_test(data: &[u8], out: Out) { harness.nodes[1] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - let filter = Some((harness.nodes[0].get_our_node_id(), harness.chan_a_id())); - harness.nodes[1].signer_unblocked(filter); + harness.nodes[1].signer_unblocked(None); }, 0xc5 => { - harness.nodes[1] - .keys_manager - .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - let filter = Some((harness.nodes[2].get_our_node_id(), harness.chan_b_id())); - harness.nodes[1].signer_unblocked(filter); - }, - 0xc6 => { harness.nodes[2] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); harness.nodes[2].signer_unblocked(None); }, - 0xc7 => { + 0xc6 => { harness.nodes[0] .keys_manager .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); harness.nodes[0].signer_unblocked(None); }, - 0xc8 => { - harness.nodes[1] - .keys_manager - .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - let filter = Some((harness.nodes[0].get_our_node_id(), harness.chan_a_id())); - harness.nodes[1].signer_unblocked(filter); - }, - 0xc9 => { + 0xc7 => { harness.nodes[1] .keys_manager .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - let filter = Some((harness.nodes[2].get_our_node_id(), harness.chan_b_id())); - harness.nodes[1].signer_unblocked(filter); + harness.nodes[1].signer_unblocked(None); }, - 0xca => { + 0xc8 => { harness.nodes[2] .keys_manager .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); harness.nodes[2].signer_unblocked(None); }, - 0xcb => { + 0xc9 => { harness.nodes[0] .keys_manager .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); harness.nodes[0].signer_unblocked(None); }, + 0xca => { + harness.nodes[1] + .keys_manager + .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + harness.nodes[1].signer_unblocked(None); + }, + 0xcb => { + harness.nodes[2] + .keys_manager + .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + harness.nodes[2].signer_unblocked(None); + }, 0xcc => { harness.nodes[1] .keys_manager From 2192e1619870d3229049b62c3f65d8f8063bb416 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:42:35 +0200 Subject: [PATCH 26/30] fuzz: tolerate chanmon_consistency on-chain events Make event processing robust to splice, close, spendable-output, and bump-transaction events that can arise during on-chain cleanup. Splice pending handling now finds the matching broadcast transaction by txid instead of assuming queue order. --- fuzz/src/chanmon_consistency.rs | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 7eaef64b315..98d5b87d078 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -2838,18 +2838,21 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { .unwrap(); }, events::Event::SpliceNegotiated { new_funding_txo, .. } => { - let mut txs = nodes[node_idx].broadcaster.txn_broadcasted.borrow_mut(); - assert!(txs.len() >= 1); - let splice_tx = txs.remove(0); - assert_eq!(new_funding_txo.txid, splice_tx.compute_txid()); - chain_state.add_pending_tx(splice_tx); + if !chain_state.confirmed_txids.contains(&new_funding_txo.txid) { + let mut txs = nodes[node_idx].broadcaster.txn_broadcasted.borrow_mut(); + if let Some(pos) = + txs.iter().position(|tx| new_funding_txo.txid == tx.compute_txid()) + { + let splice_tx = txs.remove(pos); + chain_state.add_pending_tx(splice_tx); + } + } }, events::Event::SpliceNegotiationFailed { .. } => {}, - events::Event::DiscardFunding { - funding_info: - events::FundingInfo::Contribution { .. } | events::FundingInfo::Tx { .. }, - .. - } => {}, + events::Event::ChannelClosed { .. } => {}, + events::Event::DiscardFunding { .. } => {}, + events::Event::SpendableOutputs { .. } => {}, + events::Event::BumpTransaction(..) => {}, _ => panic!("Unhandled event: {:?}", event), } } From 3382a0469b29b5d5ca462bd6c20df620008a3767 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:44:40 +0200 Subject: [PATCH 27/30] fuzz: drive chanmon_consistency broadcast cleanup Add cleanup helpers and fuzz opcodes for monitor bump events, broadcast confirmation, careful chain advancement, and node resyncs. The all-events loop now advances messages, node events, monitor events, pending transactions, and broadcasts until the harness quiesces. --- fuzz/src/chanmon_consistency.rs | 513 +++++++++++++++++++++++++++++--- 1 file changed, 468 insertions(+), 45 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 98d5b87d078..5d5d9eab004 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -41,11 +41,11 @@ use lightning::chain; use lightning::chain::chaininterface::{ BroadcasterInterface, ConfirmationTarget, FeeEstimator, TransactionType, }; -use lightning::chain::channelmonitor::ChannelMonitor; +use lightning::chain::channelmonitor::{Balance, ChannelMonitor}; use lightning::chain::{ chainmonitor, channelmonitor, BlockLocator, ChannelMonitorUpdateStatus, Confirm, Watch, }; -use lightning::events; +use lightning::events::{self, EventsProvider}; use lightning::ln::channel::{ FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS, }; @@ -83,6 +83,8 @@ use lightning::util::test_channel_signer::{EnforcementState, SignerOp, TestChann use lightning::util::test_utils::TestWalletSource; use lightning::util::wallet_utils::{WalletSourceSync, WalletSync}; +use lightning::events::bump_transaction::sync::BumpTransactionEventHandlerSync; + use lightning_invoice::RawBolt11Invoice; use crate::utils::test_logger::{self, Output}; @@ -315,6 +317,14 @@ impl ChainState { confirmed_txs } + fn advance_height(&mut self, num_blocks: u32) { + for _ in 0..num_blocks { + let prev_hash = self.blocks.last().unwrap().0.block_hash(); + let header = create_dummy_header(prev_hash, 42); + self.blocks.push((header, Vec::new())); + } + } + fn block_at(&self, height: u32) -> &(Header, Vec) { &self.blocks[height as usize] } @@ -2874,59 +2884,47 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { } fn process_all_events(&mut self) { + let mut settled = false; let mut last_pass_no_updates = false; - for i in 0..std::usize::MAX { - if i == 100 { - panic!( - "It may take may iterations to settle the state, but it should not take forever" - ); - } - let mut made_progress = self.checkpoint_manager_persistences(); - // Next, make sure no monitor completion callbacks are pending. - made_progress |= self.ab_link.complete_all_monitor_updates(&self.nodes); - made_progress |= self.bc_link.complete_all_monitor_updates(&self.nodes); - // Then, make sure any current forwards make their way to their destination. - if self.process_msg_events(0, false, ProcessMessages::AllMessages) { - last_pass_no_updates = false; - continue; - } - if self.process_msg_events(1, false, ProcessMessages::AllMessages) { - last_pass_no_updates = false; - continue; - } - if self.process_msg_events(2, false, ProcessMessages::AllMessages) { - last_pass_no_updates = false; - continue; - } - // ...making sure any payments are claimed. - if self.process_events(0, false) { - last_pass_no_updates = false; - continue; - } - if self.process_events(1, false) { - last_pass_no_updates = false; - continue; + for settle_iter in 0..100 { + let made_progress = self.checkpoint_manager_persistences(); + let completed_monitor_update = self.complete_pending_monitor_updates(); + let mut had_msg_or_ev = false; + for node_idx in 0..3 { + if self.process_msg_events(node_idx, false, ProcessMessages::AllMessages) { + had_msg_or_ev = true; + } } - if self.process_events(2, false) { - last_pass_no_updates = false; - continue; + for node_idx in 0..3 { + if self.process_events(node_idx, false) { + had_msg_or_ev = true; + } } - if made_progress { + let had_pending_txs = self.confirm_pending_txs_and_sync_wallets(); + self.sync_all_nodes_with_chain_state(); + self.process_monitor_pending_events(); + let had_new_txs = self + .drain_and_confirm_broadcast_transactions("process_all_events", Some(settle_iter)); + if made_progress + || completed_monitor_update + || had_new_txs + || had_msg_or_ev + || had_pending_txs + { last_pass_no_updates = false; continue; } if last_pass_no_updates { - // In some cases, we may generate a message to send in - // `process_msg_events`, but block sending until - // `complete_all_monitor_updates` gets called on the next - // iteration. - // - // Thus, we only exit if we manage two iterations with no messages - // or events to process. + settled = true; break; } last_pass_no_updates = true; } + assert!( + settled, + "process_all_events exceeded settle budget: {}", + self.pending_work_summary(), + ); } fn disconnect_ab(&mut self) { @@ -3038,6 +3036,26 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { made_progress } + fn confirm_broadcasts_for_node(&mut self, node_idx: usize) { + let txs = self.nodes[node_idx] + .broadcaster + .txn_broadcasted + .borrow_mut() + .drain(..) + .collect::>(); + for tx in txs { + self.confirm_tx_and_sync_wallets(tx); + } + } + + fn confirm_tx_and_sync_wallets(&mut self, tx: Transaction) -> bool { + confirm_tx_and_sync_wallets( + &mut self.chain_state, + [&self.nodes[0].wallet, &self.nodes[1].wallet, &self.nodes[2].wallet].as_slice(), + tx, + ) + } + fn confirm_pending_txs_and_sync_wallets(&mut self) -> bool { let confirmed_txs = self.chain_state.confirm_pending_txs(); for tx in &confirmed_txs { @@ -3048,6 +3066,363 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { } !confirmed_txs.is_empty() } + + fn open_channels(&self) -> Vec { + self.nodes[0] + .node + .list_channels() + .iter() + .chain(self.nodes[1].node.list_channels().iter()) + .chain(self.nodes[2].node.list_channels().iter()) + .cloned() + .collect::>() + } + + fn has_pending_monitor_updates(&self) -> bool { + self.nodes.iter().any(|node| { + node.persister + .latest_monitors + .lock() + .unwrap() + .values() + .any(|state| !state.pending_monitor_completions.is_empty()) + }) + } + + fn has_time_dependent_work(&self) -> bool { + let open_channels = self.open_channels(); + let open_refs: Vec<_> = open_channels.iter().collect(); + self.nodes.iter().any(|node| { + node.monitor.get_claimable_balances(&open_refs).iter().any(|balance| { + matches!( + balance, + Balance::ClaimableOnChannelClose { .. } + | Balance::ClaimableAwaitingConfirmations { .. } + | Balance::ContentiousClaimable { .. } + | Balance::MaybeTimeoutClaimableHTLC { .. } + | Balance::MaybePreimageClaimableHTLC { .. } + | Balance::CounterpartyRevokedOutputClaimable { .. } + ) + }) + }) + } + + fn has_pending_work(&self) -> bool { + !self.queues.ab.is_empty() + || !self.queues.ba.is_empty() + || !self.queues.bc.is_empty() + || !self.queues.cb.is_empty() + || !self.chain_state.pending_txs.is_empty() + || self.nodes.iter().any(|node| !node.broadcaster.txn_broadcasted.borrow().is_empty()) + || self.has_pending_monitor_updates() + || self.has_time_dependent_work() + } + + fn pending_work_summary(&self) -> String { + let open_channels = self.open_channels(); + let open_refs: Vec<_> = open_channels.iter().collect(); + let balances_a = self.nodes[0].monitor.get_claimable_balances(&open_refs); + let balances_b = self.nodes[1].monitor.get_claimable_balances(&open_refs); + let balances_c = self.nodes[2].monitor.get_claimable_balances(&open_refs); + format!( + "queues ab={} ba={} bc={} cb={} pending_txs={} bcast=({},{},{}) pending=({},{},{}) monitor_updates={} timed_work={} heights=({},{},{}) tip={} balances_a=[{}] balances_b=[{}] balances_c=[{}]", + self.queues.ab.len(), + self.queues.ba.len(), + self.queues.bc.len(), + self.queues.cb.len(), + self.chain_state.pending_txs.len(), + self.nodes[0].broadcaster.txn_broadcasted.borrow().len(), + self.nodes[1].broadcaster.txn_broadcasted.borrow().len(), + self.nodes[2].broadcaster.txn_broadcasted.borrow().len(), + self.payments.nodes[0].pending.len(), + self.payments.nodes[1].pending.len(), + self.payments.nodes[2].pending.len(), + self.has_pending_monitor_updates(), + self.has_time_dependent_work(), + self.nodes[0].height, + self.nodes[1].height, + self.nodes[2].height, + self.chain_state.tip_height(), + summarize_balances(&balances_a), + summarize_balances(&balances_b), + summarize_balances(&balances_c), + ) + } + + fn complete_pending_monitor_updates(&self) -> bool { + let mut completed_monitor_update = false; + for id in self.ab_link.channel_ids() { + completed_monitor_update |= self.nodes[0].complete_all_monitor_updates(id); + completed_monitor_update |= self.nodes[1].complete_all_monitor_updates(id); + } + for id in self.bc_link.channel_ids() { + completed_monitor_update |= self.nodes[1].complete_all_monitor_updates(id); + completed_monitor_update |= self.nodes[2].complete_all_monitor_updates(id); + } + completed_monitor_update + } + + fn sync_all_nodes_with_chain_state(&mut self) { + let chain_state = &self.chain_state; + for node in &mut self.nodes { + node.sync_with_chain_state(chain_state, None); + } + } + + fn process_monitor_pending_events(&self) { + for node in &self.nodes { + let logger = Arc::clone(&node.logger); + let wallet = WalletSync::new(&node.wallet, Arc::clone(&logger)); + let handler = BumpTransactionEventHandlerSync::new( + node.broadcaster.as_ref(), + &wallet, + node.keys_manager.as_ref(), + Arc::clone(&logger), + ); + let broadcaster = &node.broadcaster; + node.monitor.process_pending_events(&|event: events::Event| { + if let events::Event::BumpTransaction(ref bump) = event { + match bump { + events::bump_transaction::BumpTransactionEvent::ChannelClose { + commitment_tx, + channel_id, + counterparty_node_id, + .. + } => { + broadcaster.broadcast_transactions(&[( + commitment_tx, + lightning::chain::chaininterface::TransactionType::UnilateralClose { + counterparty_node_id: *counterparty_node_id, + channel_id: *channel_id, + }, + )]); + }, + events::bump_transaction::BumpTransactionEvent::HTLCResolution { + .. + } => { + handler.handle_event(bump); + }, + } + } + Ok(()) + }); + } + } + + fn drain_and_confirm_broadcast_transactions( + &mut self, context: &str, settle_iter: Option, + ) -> bool { + let mut had_new_txs = false; + for confirm_iter in 0..32 { + let mut found = false; + let mut pending_txs = Vec::new(); + for node in &self.nodes { + for tx in node.broadcaster.txn_broadcasted.borrow_mut().drain(..) { + pending_txs.push(tx); + } + } + pending_txs.sort_by_key(|tx| tx.lock_time.to_consensus_u32()); + let mut deferred_txs = pending_txs; + loop { + let mut next_deferred_txs = Vec::new(); + let mut progressed = false; + for tx in deferred_txs { + if self.confirm_tx_and_sync_wallets(tx.clone()) { + found = true; + progressed = true; + } else { + next_deferred_txs.push(tx); + } + } + if !progressed { + deferred_txs = next_deferred_txs + .into_iter() + .filter(|tx| should_retry_confirm_later(&self.chain_state, tx)) + .collect(); + break; + } + deferred_txs = next_deferred_txs; + } + if !deferred_txs.is_empty() { + self.nodes[0].broadcaster.txn_broadcasted.borrow_mut().extend(deferred_txs); + } + if !found { + break; + } + let quiesce_context = match settle_iter { + Some(iter) => format!( + "{context} tx confirmation loop failed to quiesce at settle iter {iter}: {}", + self.pending_work_summary(), + ), + None => format!( + "{context} tx confirmation loop failed to quiesce: {}", + self.pending_work_summary(), + ), + }; + assert!(confirm_iter < 31, "{quiesce_context}"); + had_new_txs = true; + self.sync_all_nodes_with_chain_state(); + } + had_new_txs + } + + fn progress_round(&mut self) -> bool { + let made_progress = self.checkpoint_manager_persistences(); + let completed_monitor_update = self.complete_pending_monitor_updates(); + let mut had_msg_or_ev = false; + for node_idx in 0..3 { + if self.process_msg_events(node_idx, false, ProcessMessages::AllMessages) { + had_msg_or_ev = true; + } + } + for node_idx in 0..3 { + if self.process_events(node_idx, false) { + had_msg_or_ev = true; + } + } + let had_pending_txs = self.confirm_pending_txs_and_sync_wallets(); + self.sync_all_nodes_with_chain_state(); + self.process_monitor_pending_events(); + let had_new_txs = self.drain_and_confirm_broadcast_transactions("flush_progress", None); + made_progress || completed_monitor_update || had_new_txs || had_msg_or_ev || had_pending_txs + } + + fn flush_progress(&mut self, max_iters: usize) { + let mut last_pass_no_updates = false; + for _ in 0..max_iters { + if self.progress_round() { + last_pass_no_updates = false; + continue; + } + if last_pass_no_updates { + break; + } + last_pass_no_updates = true; + } + let pending_work = self.has_pending_work(); + let summary = self.pending_work_summary(); + assert!( + !pending_work || last_pass_no_updates, + "flush_progress exhausted {max_iters} iterations without quiescing: {summary}", + ); + assert!( + !pending_work || !last_pass_no_updates || max_iters > 0, + "flush_progress made no progress: {summary}", + ); + } + + fn advance_chain_carefully(&mut self, num_blocks: u32) { + for _ in 0..num_blocks { + self.flush_progress(32); + if !self.has_pending_work() { + break; + } + self.chain_state.advance_height(1); + self.flush_progress(32); + if !self.has_pending_work() { + break; + } + } + } + + fn catch_up_raw_monitors(&self) { + for node in &self.nodes { + let mut min_monitor_height = node.height; + for chan_id in node.monitor.list_monitors() { + if let Ok(mon) = node.monitor.get_monitor(chan_id) { + min_monitor_height = + std::cmp::min(min_monitor_height, mon.current_best_block().height); + } + } + let mut h = min_monitor_height; + while h < node.height { + let mut next_height = h + 1; + while next_height <= node.height + && self.chain_state.block_at(next_height).1.is_empty() + { + next_height += 1; + } + if next_height > node.height { + h = node.height; + let (header, _) = self.chain_state.block_at(h); + node.monitor.best_block_updated(header, h); + break; + } + if next_height > h + 1 { + h = next_height - 1; + let (header, _) = self.chain_state.block_at(h); + node.monitor.best_block_updated(header, h); + } + h = next_height; + let (header, txn) = self.chain_state.block_at(h); + let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); + if !txdata.is_empty() { + node.monitor.transactions_confirmed(header, &txdata, h); + } + node.monitor.best_block_updated(header, h); + } + } + } + + fn process_messages_and_events_only(&mut self) { + let mut settled = false; + let mut last_pass_no_updates = false; + for _ in 0..100 { + let made_progress = self.checkpoint_manager_persistences(); + let completed_monitor_update = self.complete_pending_monitor_updates(); + let mut had_msg_or_ev = false; + for node_idx in 0..3 { + if self.process_msg_events(node_idx, false, ProcessMessages::AllMessages) { + had_msg_or_ev = true; + } + } + for node_idx in 0..3 { + if self.process_events(node_idx, false) { + had_msg_or_ev = true; + } + } + if made_progress || completed_monitor_update || had_msg_or_ev { + last_pass_no_updates = false; + continue; + } + if last_pass_no_updates { + settled = true; + break; + } + last_pass_no_updates = true; + } + assert!(settled, "message-only settle exceeded budget: {}", self.pending_work_summary(),); + } + + fn probe_amount_for_direction( + &self, source_idx: usize, dest_chan_id: ChannelId, + ) -> Option { + self.nodes[source_idx] + .node + .list_usable_channels() + .iter() + .find(|chan| chan.channel_id == dest_chan_id) + .and_then(|chan| { + let probe_amt = cmp::max( + cmp::min(10_000_000, chan.next_outbound_htlc_limit_msat), + chan.next_outbound_htlc_minimum_msat, + ); + if probe_amt == 0 || probe_amt > chan.next_outbound_htlc_limit_msat { + None + } else { + Some(probe_amt) + } + }) + } + + fn can_send_after_settle( + &mut self, source_idx: usize, dest_idx: usize, dest_chan_id: ChannelId, + ) -> bool { + let Some(amt) = self.probe_amount_for_direction(source_idx, dest_chan_id) else { + return false; + }; + self.send_on_channel(source_idx, dest_idx, dest_chan_id, amt) + } } fn sync_wallets_with_confirmed_tx(wallets: &[&TestWalletSource], tx: &Transaction) { @@ -3064,6 +3439,47 @@ fn sync_wallets_with_confirmed_tx(wallets: &[&TestWalletSource], tx: &Transactio } } +fn confirm_tx_and_sync_wallets( + chain_state: &mut ChainState, wallets: &[&TestWalletSource], tx: Transaction, +) -> bool { + if chain_state.confirm_tx(tx.clone()) { + sync_wallets_with_confirmed_tx(wallets, &tx); + true + } else { + false + } +} + +fn summarize_balances(balances: &[Balance]) -> String { + let mut on_close = 0; + let mut awaiting = 0; + let mut contentious = 0; + let mut maybe_timeout = 0; + let mut maybe_preimage = 0; + let mut revoked = 0; + for balance in balances { + match balance { + Balance::ClaimableOnChannelClose { .. } => on_close += 1, + Balance::ClaimableAwaitingConfirmations { .. } => awaiting += 1, + Balance::ContentiousClaimable { .. } => contentious += 1, + Balance::MaybeTimeoutClaimableHTLC { .. } => maybe_timeout += 1, + Balance::MaybePreimageClaimableHTLC { .. } => maybe_preimage += 1, + Balance::CounterpartyRevokedOutputClaimable { .. } => revoked += 1, + } + } + format!( + "on_close={on_close} awaiting={awaiting} contentious={contentious} maybe_timeout={maybe_timeout} maybe_preimage={maybe_preimage} revoked={revoked}" + ) +} + +fn should_retry_confirm_later(chain_state: &ChainState, tx: &Transaction) -> bool { + let lock_time = tx.lock_time.to_consensus_u32(); + lock_time > 0 + && lock_time < 500_000_000 + && lock_time & (1 << 29) == 0 + && chain_state.tip_height() < lock_time +} + #[inline] pub fn do_test(data: &[u8], out: Out) { let router = FuzzRouter {}; @@ -3406,6 +3822,14 @@ pub fn do_test(data: &[u8], out: Out) { harness.nodes[2].signer_unblocked(None); }, + 0xd8 => harness.confirm_broadcasts_for_node(0), + 0xd9 => harness.confirm_broadcasts_for_node(1), + 0xda => harness.confirm_broadcasts_for_node(2), + + 0xdc => harness.advance_chain_carefully(50), + 0xdd => harness.advance_chain_carefully(100), + 0xde => harness.advance_chain_carefully(200), + 0xf0 => harness.ab_link.complete_monitor_updates_for_node( 0, &harness.nodes, @@ -3469,7 +3893,6 @@ pub fn do_test(data: &[u8], out: Out) { &harness.nodes, MonitorUpdateSelector::Last, ), - 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. From 203746e3824520bd0d375e2110b9bec14fd1cde8 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:45:17 +0200 Subject: [PATCH 28/30] fuzz: settle chanmon_consistency cleanup work Before final assertions, catch raw monitors up to node height and drive timer ticks plus block advancement until pending work clears. The final liveness probe now uses each channel's advertised sendable range instead of a fixed amount that may be outside its limits. --- fuzz/src/chanmon_consistency.rs | 60 +++++++++++++++++++++++++-------- 1 file changed, 46 insertions(+), 14 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 5d5d9eab004..970b9bf6c65 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -2974,6 +2974,18 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { self.nodes[1].signer_unblocked(None); self.nodes[2].signer_unblocked(None); + let has_stale_raw_monitors = self.nodes.iter().any(|node| { + node.monitor.list_monitors().into_iter().any(|chan_id| { + node.monitor + .get_monitor(chan_id) + .map(|mon| mon.current_best_block().height < node.height) + .unwrap_or(false) + }) + }); + if has_stale_raw_monitors { + self.process_messages_and_events_only(); + self.catch_up_raw_monitors(); + } self.process_all_events(); // Since MPP payments are supported, we wait until we fully settle the state of all @@ -2984,7 +2996,25 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { } self.process_all_events(); - // Verify no payments are stuck - all should have resolved + for _ in 0..4096 { + self.flush_progress(32); + if !self.has_pending_work() { + break; + } + for node in self.nodes.iter() { + node.timer_tick_occurred(); + } + self.flush_progress(32); + if !self.has_pending_work() { + break; + } + self.chain_state.advance_height(1); + self.flush_progress(32); + if !self.has_pending_work() { + break; + } + } + self.payments.assert_all_resolved(); // Verify that every payment claimed by a receiver resulted in a // PaymentSent event at the sender. @@ -3007,20 +3037,22 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { } } - // Finally, make sure that at least one end of each channel can make a substantial payment. - let chan_ab_ids = self.ab_link.channel_ids().clone(); - let chan_bc_ids = self.bc_link.channel_ids().clone(); - for chan_id in chan_ab_ids { - assert!( - self.send_on_channel(0, 1, chan_id, 10_000_000) - || self.send_on_channel(1, 0, chan_id, 10_000_000) - ); + self.ab_link.complete_all_monitor_updates(&self.nodes); + self.bc_link.complete_all_monitor_updates(&self.nodes); + + for chan_id in *self.ab_link.channel_ids() { + if self.probe_amount_for_direction(0, chan_id).is_some() { + assert!(self.can_send_after_settle(0, 1, chan_id)); + } else if self.probe_amount_for_direction(1, chan_id).is_some() { + assert!(self.can_send_after_settle(1, 0, chan_id)); + } } - for chan_id in chan_bc_ids { - assert!( - self.send_on_channel(1, 2, chan_id, 10_000_000) - || self.send_on_channel(2, 1, chan_id, 10_000_000) - ); + for chan_id in *self.bc_link.channel_ids() { + if self.probe_amount_for_direction(1, chan_id).is_some() { + assert!(self.can_send_after_settle(1, 2, chan_id)); + } else if self.probe_amount_for_direction(2, chan_id).is_some() { + assert!(self.can_send_after_settle(2, 1, chan_id)); + } } self.nodes[0].record_last_htlc_clear_fee(); From 19b14a14c2aa39325beee1812f8132aed5b5c48e Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 12:34:14 +0200 Subject: [PATCH 29/30] fuzz: track chanmon_consistency payment resolution Track payment hashes, paths, claims, sender outcomes, and closed channels so settle-all can distinguish unresolved work from valid force-close outcomes. Keep payments pending until the sender observes PaymentSent or PaymentFailed, including abandoned sends with committed HTLCs still in flight. When reload selects an older raw monitor, catch it up to the harness node height immediately so it observes historical funding spends before later sync starts from the manager height. This lets the harness accept sender failure for claimed dust paths while still asserting that observable payment lifecycles complete. --- fuzz/src/chanmon_consistency.rs | 620 ++++++++++++++++++++++---------- 1 file changed, 438 insertions(+), 182 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 970b9bf6c65..cf7a8f7e2fd 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1545,35 +1545,105 @@ impl PeerLink { } } -struct NodePayments { - pending: Vec, - resolved: HashMap>, -} - -impl NodePayments { - fn new() -> Self { - Self { pending: Vec::new(), resolved: new_hash_map() } - } -} - struct PaymentTracker { - nodes: [NodePayments; 3], - claimed_payment_hashes: HashSet, - payment_preimages: HashMap, payment_ctr: u64, + pending_payments: RefCell<[Vec; 3]>, + resolved_payment_ids: RefCell<[HashSet; 3]>, + claimed_payment_hashes: RefCell>, + receiver_claimed_payment_hashes: RefCell>, + sender_sent_payment_hashes: RefCell>, + sender_failed_payment_hashes: RefCell>, + payment_hashes_by_id: RefCell>, + payment_paths_by_hash: RefCell>>>, + blocked_dust_paths_by_hash: RefCell>>, + payment_preimages: RefCell>, + closed_channels: RefCell>, } impl PaymentTracker { fn new() -> Self { Self { - nodes: [NodePayments::new(), NodePayments::new(), NodePayments::new()], - claimed_payment_hashes: HashSet::new(), - payment_preimages: new_hash_map(), payment_ctr: 0, + pending_payments: RefCell::new([Vec::new(), Vec::new(), Vec::new()]), + resolved_payment_ids: RefCell::new([HashSet::new(), HashSet::new(), HashSet::new()]), + claimed_payment_hashes: RefCell::new(HashSet::new()), + receiver_claimed_payment_hashes: RefCell::new(HashSet::new()), + sender_sent_payment_hashes: RefCell::new(HashSet::new()), + sender_failed_payment_hashes: RefCell::new(HashSet::new()), + payment_hashes_by_id: RefCell::new(new_hash_map()), + payment_paths_by_hash: RefCell::new(new_hash_map()), + blocked_dust_paths_by_hash: RefCell::new(new_hash_map()), + payment_preimages: RefCell::new(new_hash_map()), + closed_channels: RefCell::new(HashSet::new()), } } - // Returns a bool indicating whether the payment failed. + fn register_payment( + &self, source_idx: usize, payment_id: PaymentId, payment_hash: PaymentHash, + payment_paths: Vec>, + ) { + assert!( + self.payment_hashes_by_id.borrow_mut().insert(payment_id, payment_hash).is_none(), + "duplicate payment_id {:?}", + payment_id + ); + assert!( + self.payment_paths_by_hash.borrow_mut().insert(payment_hash, payment_paths).is_none(), + "duplicate payment_hash {:?}", + payment_hash + ); + self.pending_payments.borrow_mut()[source_idx].push(payment_id); + } + + fn claim_allows_sender_failure(&self, hash: &PaymentHash) -> bool { + self.blocked_dust_paths_by_hash + .borrow() + .get(hash) + .is_some_and(|blocked_paths| !blocked_paths.is_empty()) + } + + fn summarize_claim_tracking(&self) -> String { + let claim_requested = self.claimed_payment_hashes.borrow(); + let receiver_claimed = self.receiver_claimed_payment_hashes.borrow(); + let sender_sent = self.sender_sent_payment_hashes.borrow(); + let sender_failed = self.sender_failed_payment_hashes.borrow(); + let failure_allowed_count = + claim_requested.iter().filter(|hash| self.claim_allows_sender_failure(hash)).count(); + let missing_receiver = + claim_requested.iter().filter(|hash| !receiver_claimed.contains(*hash)).count(); + let missing_sender = claim_requested + .iter() + .filter(|hash| !sender_sent.contains(*hash) && !sender_failed.contains(*hash)) + .count(); + format!( + "claims requested={} receiver_claimed={} sender_sent={} sender_failed={} failure_allowed={} missing_receiver={} missing_sender={}", + claim_requested.len(), + receiver_claimed.len(), + sender_sent.len(), + sender_failed.len(), + failure_allowed_count, + missing_receiver, + missing_sender, + ) + } + + fn has_unfinished_claims(&self) -> bool { + let claim_requested = self.claimed_payment_hashes.borrow(); + let receiver_claimed = self.receiver_claimed_payment_hashes.borrow(); + let sender_sent = self.sender_sent_payment_hashes.borrow(); + let sender_failed = self.sender_failed_payment_hashes.borrow(); + claim_requested.iter().any(|hash| { + !receiver_claimed.contains(hash) + || (!sender_sent.contains(hash) && !sender_failed.contains(hash)) + }) + } + + fn has_live_payment_work(&self) -> bool { + self.pending_payments.borrow().iter().any(|payments| !payments.is_empty()) + || self.has_unfinished_claims() + } + + // Returns whether the payment entered a tracked outbound state. fn check_payment_send_events(source: &ChanMan, sent_payment_id: PaymentId) -> bool { for payment in source.list_recent_payments() { match payment { @@ -1585,7 +1655,9 @@ impl PaymentTracker { RecentPaymentDetails::Abandoned { payment_id, .. } if payment_id == sent_payment_id => { - return false; + // Retries may already be exhausted even though committed HTLCs are still + // in flight and will later resolve with PaymentFailed. + return true; }, _ => {}, } @@ -1601,7 +1673,7 @@ impl PaymentTracker { let secret = dest .create_inbound_payment_for_hash(hash, None, 3600, None, None) .expect("create_inbound_payment_for_hash failed"); - assert!(self.payment_preimages.insert(hash, payment_preimage).is_none()); + assert!(self.payment_preimages.borrow_mut().insert(hash, payment_preimage).is_none()); let mut id = PaymentId([0; 32]); id.0[0..8].copy_from_slice(&self.payment_ctr.to_ne_bytes()); (secret, hash, id) @@ -1611,6 +1683,9 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, dest_idx: usize, dest_chan_id: ChannelId, amt: u64, ) -> bool { + if self.closed_channels.borrow().contains(&dest_chan_id) { + return false; + } let source = &nodes[source_idx]; let dest = &nodes[dest_idx]; let (secret, hash, id) = self.next_payment(dest); @@ -1659,7 +1734,7 @@ impl PaymentTracker { }, }; if succeeded { - self.nodes[source_idx].pending.push(id); + self.register_payment(source_idx, id, hash, vec![vec![(dest_chan_id, amt)]]); } succeeded } @@ -1668,6 +1743,11 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, middle_idx: usize, middle_chan_id: ChannelId, dest_idx: usize, dest_chan_id: ChannelId, amt: u64, ) { + let closed_channels = self.closed_channels.borrow(); + if closed_channels.contains(&middle_chan_id) || closed_channels.contains(&dest_chan_id) { + return; + } + drop(closed_channels); let source = &nodes[source_idx]; let middle = &nodes[middle_idx]; let dest = &nodes[dest_idx]; @@ -1684,12 +1764,14 @@ impl PaymentTracker { ) }) .unwrap_or((0, 0, 0)); - let dest_scid = dest + let Some(dest_scid) = dest .list_channels() .iter() .find(|chan| chan.channel_id == dest_chan_id) .and_then(|chan| chan.short_channel_id) - .unwrap_or(0); + else { + return; + }; let first_hop_fee = 50_000; let route_params = RouteParameters::from_payment_params_and_value( PaymentParameters::from_node_id(source.get_our_node_id(), TEST_FINAL_CLTV), @@ -1736,7 +1818,12 @@ impl PaymentTracker { }, }; if succeeded { - self.nodes[source_idx].pending.push(id); + self.register_payment( + source_idx, + id, + hash, + vec![vec![(middle_chan_id, amt + first_hop_fee), (dest_chan_id, amt)]], + ); } } @@ -1752,38 +1839,48 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, dest_idx: usize, dest_chan_ids: &[ChannelId], amt: u64, ) { + let live_dest_chan_ids = { + let closed_channels = self.closed_channels.borrow(); + dest_chan_ids + .iter() + .copied() + .filter(|chan_id| !closed_channels.contains(chan_id)) + .collect::>() + }; + if live_dest_chan_ids.is_empty() { + return; + } let source = &nodes[source_idx]; let dest = &nodes[dest_idx]; let (secret, hash, id) = self.next_payment(dest); - let num_paths = dest_chan_ids.len(); + let mut paths = Vec::new(); + let dest_chans = dest.list_channels(); + let dest_scids: Vec<_> = live_dest_chan_ids + .iter() + .filter_map(|chan_id| { + dest_chans + .iter() + .find(|chan| chan.channel_id == *chan_id) + .and_then(|chan| chan.short_channel_id) + .map(|scid| (*chan_id, scid)) + }) + .collect(); + let num_paths = dest_scids.len(); if num_paths == 0 { return; } - let amt_per_path = amt / num_paths as u64; - let mut paths = Vec::with_capacity(num_paths); - - let dest_chans = dest.list_channels(); - let dest_scids = dest_chan_ids.iter().map(|chan_id| { - dest_chans - .iter() - .find(|chan| chan.channel_id == *chan_id) - .and_then(|chan| chan.short_channel_id) - .unwrap() - }); - - for (i, dest_scid) in dest_scids.enumerate() { + for (i, (_, dest_scid)) in dest_scids.iter().enumerate() { let path_amt = if i == num_paths - 1 { amt - amt_per_path * (num_paths as u64 - 1) } else { amt_per_path }; - paths.push(Path { hops: vec![RouteHop { pubkey: dest.get_our_node_id(), node_features: dest.node_features(), - short_channel_id: dest_scid, + short_channel_id: *dest_scid, channel_features: dest.channel_features(), fee_msat: path_amt, cltv_expiry_delta: 200, @@ -1792,7 +1889,6 @@ impl PaymentTracker { blinded_tail: None, }); } - let route_params = RouteParameters::from_payment_params_and_value( PaymentParameters::from_node_id(dest.get_our_node_id(), TEST_FINAL_CLTV), amt, @@ -1805,7 +1901,19 @@ impl PaymentTracker { Ok(()) => Self::check_payment_send_events(source, id), }; if succeeded { - self.nodes[source_idx].pending.push(id); + let payment_paths = dest_scids + .iter() + .enumerate() + .map(|(i, (chan_id, _))| { + let path_amt = if i == num_paths - 1 { + amt - amt_per_path * (num_paths as u64 - 1) + } else { + amt_per_path + }; + vec![(*chan_id, path_amt)] + }) + .collect(); + self.register_payment(source_idx, id, hash, payment_paths); } } @@ -1814,49 +1922,61 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, middle_idx: usize, middle_chan_ids: &[ChannelId], dest_idx: usize, dest_chan_ids: &[ChannelId], amt: u64, ) { + let (live_middle_chan_ids, live_dest_chan_ids) = { + let closed_channels = self.closed_channels.borrow(); + ( + middle_chan_ids + .iter() + .copied() + .filter(|chan_id| !closed_channels.contains(chan_id)) + .collect::>(), + dest_chan_ids + .iter() + .copied() + .filter(|chan_id| !closed_channels.contains(chan_id)) + .collect::>(), + ) + }; + if live_middle_chan_ids.is_empty() || live_dest_chan_ids.is_empty() { + return; + } let source = &nodes[source_idx]; let middle = &nodes[middle_idx]; let dest = &nodes[dest_idx]; let (secret, hash, id) = self.next_payment(dest); - // Create paths by pairing middle_scids with dest_scids. - let num_paths = middle_chan_ids.len().max(dest_chan_ids.len()); - if num_paths == 0 { - return; - } - - let first_hop_fee = 50_000; - let amt_per_path = amt / num_paths as u64; - let fee_per_path = first_hop_fee / num_paths as u64; - let mut paths = Vec::with_capacity(num_paths); - let middle_chans = middle.list_channels(); - let middle_scids: Vec<_> = middle_chan_ids + let middle_scids: Vec<_> = live_middle_chan_ids .iter() - .map(|chan_id| { + .filter_map(|chan_id| { middle_chans .iter() .find(|chan| chan.channel_id == *chan_id) .and_then(|chan| chan.short_channel_id) - .unwrap() + .map(|scid| (*chan_id, scid)) }) .collect(); - let dest_chans = dest.list_channels(); - let dest_scids: Vec<_> = dest_chan_ids + let dest_scids: Vec<_> = live_dest_chan_ids .iter() - .map(|chan_id| { + .filter_map(|chan_id| { dest_chans .iter() .find(|chan| chan.channel_id == *chan_id) .and_then(|chan| chan.short_channel_id) - .unwrap() + .map(|scid| (*chan_id, scid)) }) .collect(); - + let num_paths = middle_scids.len().max(dest_scids.len()); + if middle_scids.is_empty() || dest_scids.is_empty() { + return; + } + let first_hop_fee = 50_000; + let amt_per_path = amt / num_paths as u64; + let fee_per_path = first_hop_fee / num_paths as u64; + let mut paths = Vec::with_capacity(num_paths); for i in 0..num_paths { - let middle_scid = middle_scids[i % middle_scids.len()]; - let dest_scid = dest_scids[i % dest_scids.len()]; - + let (_, middle_scid) = middle_scids[i % middle_scids.len()]; + let (_, dest_scid) = dest_scids[i % dest_scids.len()]; let path_amt = if i == num_paths - 1 { amt - amt_per_path * (num_paths as u64 - 1) } else { @@ -1867,7 +1987,6 @@ impl PaymentTracker { } else { fee_per_path }; - paths.push(Path { hops: vec![ RouteHop { @@ -1892,7 +2011,6 @@ impl PaymentTracker { blinded_tail: None, }); } - let route_params = RouteParameters::from_payment_params_and_value( PaymentParameters::from_node_id(dest.get_our_node_id(), TEST_FINAL_CLTV), amt, @@ -1905,7 +2023,24 @@ impl PaymentTracker { Ok(()) => Self::check_payment_send_events(source, id), }; if succeeded { - self.nodes[source_idx].pending.push(id); + let payment_paths = (0..num_paths) + .map(|i| { + let (middle_chan_id, _) = middle_scids[i % middle_scids.len()]; + let (dest_chan_id, _) = dest_scids[i % dest_scids.len()]; + let path_amt = if i == num_paths - 1 { + amt - amt_per_path * (num_paths as u64 - 1) + } else { + amt_per_path + }; + let path_fee = if i == num_paths - 1 { + first_hop_fee - fee_per_path * (num_paths as u64 - 1) + } else { + fee_per_path + }; + vec![(middle_chan_id, path_amt + path_fee), (dest_chan_id, path_amt)] + }) + .collect(); + self.register_payment(source_idx, id, hash, payment_paths); } } @@ -1915,70 +2050,55 @@ impl PaymentTracker { } else { let payment_preimage = *self .payment_preimages + .borrow() .get(&payment_hash) .expect("PaymentClaimable for unknown payment hash"); node.claim_funds(payment_preimage); - self.claimed_payment_hashes.insert(payment_hash); + self.claimed_payment_hashes.borrow_mut().insert(payment_hash); } } fn mark_sent(&mut self, node_idx: usize, sent_id: PaymentId, payment_hash: PaymentHash) { - let node = &mut self.nodes[node_idx]; - let idx_opt = node.pending.iter().position(|id| *id == sent_id); - if let Some(idx) = idx_opt { - node.pending.remove(idx); - node.resolved.insert(sent_id, Some(payment_hash)); - } else { - assert!(node.resolved.contains_key(&sent_id)); + self.sender_sent_payment_hashes.borrow_mut().insert(payment_hash); + self.mark_resolved_payment(node_idx, sent_id, true); + } + + fn mark_failed( + &mut self, node_idx: usize, payment_id: PaymentId, payment_hash: Option, + ) { + let payment_hash = + payment_hash.or_else(|| self.payment_hashes_by_id.borrow().get(&payment_id).copied()); + if let Some(payment_hash) = payment_hash { + self.sender_failed_payment_hashes.borrow_mut().insert(payment_hash); } + self.mark_resolved_payment(node_idx, payment_id, false); } fn mark_resolved_without_hash(&mut self, node_idx: usize, payment_id: PaymentId) { - let node = &mut self.nodes[node_idx]; - let idx_opt = node.pending.iter().position(|id| *id == payment_id); - if let Some(idx) = idx_opt { - node.pending.remove(idx); - node.resolved.insert(payment_id, None); - } else if !node.resolved.contains_key(&payment_id) { - // Some resolutions can arrive immediately, before the send helper records - // the payment as pending. Track them so later duplicate events are accepted. - node.resolved.insert(payment_id, None); - } + self.mark_resolved_payment(node_idx, payment_id, false); } - fn mark_successful_probe(&mut self, node_idx: usize, payment_id: PaymentId) { - let node = &mut self.nodes[node_idx]; - let idx_opt = node.pending.iter().position(|id| *id == payment_id); - if let Some(idx) = idx_opt { - node.pending.remove(idx); - node.resolved.insert(payment_id, None); - } else { - assert!(node.resolved.contains_key(&payment_id)); - } + fn mark_receiver_claimed(&mut self, payment_hash: PaymentHash) { + self.receiver_claimed_payment_hashes.borrow_mut().insert(payment_hash); } - fn assert_all_resolved(&self) { - for (idx, node) in self.nodes.iter().enumerate() { - assert!( - node.pending.is_empty(), - "Node {} has {} stuck pending payments after settling all state", - idx, - node.pending.len() - ); - } + fn mark_channel_closed(&mut self, channel_id: ChannelId) { + self.closed_channels.borrow_mut().insert(channel_id); } - fn assert_claims_reported(&self) { - for hash in self.claimed_payment_hashes.iter() { - let found = self - .nodes - .iter() - .any(|node| node.resolved.values().any(|h| h.as_ref() == Some(hash))); - assert!( - found, - "Payment {:?} was claimed by receiver but sender never got PaymentSent", - hash - ); + fn mark_resolved_payment( + &self, node_idx: usize, payment_id: PaymentId, assert_already_resolved: bool, + ) { + let mut pending_payments = self.pending_payments.borrow_mut(); + let mut resolved_payment_ids = self.resolved_payment_ids.borrow_mut(); + let idx_opt = pending_payments[node_idx].iter().position(|id| *id == payment_id); + if let Some(idx) = idx_opt { + pending_payments[node_idx].remove(idx); + resolved_payment_ids[node_idx].insert(payment_id); + } else if assert_already_resolved { + assert!(resolved_payment_ids[node_idx].contains(&payment_id)); + } else if !resolved_payment_ids[node_idx].contains(&payment_id) { + resolved_payment_ids[node_idx].insert(payment_id); } } } @@ -2824,17 +2944,31 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { // hashing the payment hash+preimage, it is rather trivial for the fuzzer to build // payments that accidentally end up looking like probes. events::Event::ProbeSuccessful { payment_id, .. } => { - payments.mark_successful_probe(node_idx, payment_id); + payments.mark_resolved_without_hash(node_idx, payment_id); + }, + events::Event::PaymentFailed { payment_id, payment_hash, .. } => { + payments.mark_failed(node_idx, payment_id, payment_hash); }, - events::Event::PaymentFailed { payment_id, .. } - | events::Event::ProbeFailed { payment_id, .. } => { + events::Event::ProbeFailed { payment_id, .. } => { payments.mark_resolved_without_hash(node_idx, payment_id); }, - events::Event::PaymentClaimed { .. } => {}, + events::Event::PaymentClaimed { payment_hash, .. } => { + payments.mark_receiver_claimed(payment_hash); + }, events::Event::PaymentPathSuccessful { .. } => {}, events::Event::PaymentPathFailed { .. } => {}, events::Event::PaymentForwarded { .. } if node_idx == 1 => {}, events::Event::ChannelReady { .. } => {}, + events::Event::HTLCHandlingFailed { + failure_type: events::HTLCHandlingFailureType::Receive { payment_hash }, + .. + } => { + assert!( + !payments.claimed_payment_hashes.borrow().contains(&payment_hash), + "Payment {:?} hit HTLCHandlingFailed::Receive after claim_funds", + payment_hash, + ); + }, events::Event::HTLCHandlingFailed { .. } => {}, events::Event::FundingTransactionReadyForSigning { channel_id, @@ -2859,7 +2993,9 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { } }, events::Event::SpliceNegotiationFailed { .. } => {}, - events::Event::ChannelClosed { .. } => {}, + events::Event::ChannelClosed { channel_id, .. } => { + payments.mark_channel_closed(channel_id); + }, events::Event::DiscardFunding { .. } => {}, events::Event::SpendableOutputs { .. } => {}, events::Event::BumpTransaction(..) => {}, @@ -2958,6 +3094,7 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { _ => panic!("invalid node index"), } self.nodes[node_idx].reload(v, &self.out, router, self.chan_type); + self.catch_up_raw_monitors_for_node(node_idx); } fn settle_all(&mut self) { @@ -2996,29 +3133,129 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { } self.process_all_events(); - for _ in 0..4096 { - self.flush_progress(32); - if !self.has_pending_work() { - break; - } - for node in self.nodes.iter() { - node.timer_tick_occurred(); - } - self.flush_progress(32); - if !self.has_pending_work() { - break; + if !self.payments.closed_channels.borrow().is_empty() { + for _ in 0..4096 { + self.flush_progress(32); + for node in self.nodes.iter() { + node.timer_tick_occurred(); + } + self.flush_progress(32); + let open_channels = self.open_channels(); + let open_refs: Vec<_> = open_channels.iter().collect(); + let balances_a = self.nodes[0].monitor.get_claimable_balances(&open_refs); + let balances_b = self.nodes[1].monitor.get_claimable_balances(&open_refs); + let balances_c = self.nodes[2].monitor.get_claimable_balances(&open_refs); + let needs_payment_completion = self.payments.has_live_payment_work(); + let has_cleanup_balances = + !balances_a.is_empty() || !balances_b.is_empty() || !balances_c.is_empty(); + let can_drive_more_cleanup = has_cleanup_balances || self.has_pending_work(); + let next_claimed_htlc_boundary = { + let claimed_hashes = self.payments.claimed_payment_hashes.borrow(); + let sender_sent = self.payments.sender_sent_payment_hashes.borrow(); + let sender_failed = self.payments.sender_failed_payment_hashes.borrow(); + balances_a + .iter() + .chain(balances_b.iter()) + .chain(balances_c.iter()) + .filter_map(|balance| match balance { + Balance::ContentiousClaimable { + timeout_height, payment_hash, .. + } if claimed_hashes.contains(payment_hash) + && !sender_sent.contains(payment_hash) + && !sender_failed.contains(payment_hash) => + { + Some(*timeout_height) + }, + Balance::MaybeTimeoutClaimableHTLC { + claimable_height, + payment_hash, + .. + } if claimed_hashes.contains(payment_hash) + && !sender_sent.contains(payment_hash) + && !sender_failed.contains(payment_hash) => + { + Some(*claimable_height) + }, + Balance::MaybePreimageClaimableHTLC { + expiry_height, + payment_hash, + .. + } if claimed_hashes.contains(payment_hash) + && !sender_sent.contains(payment_hash) + && !sender_failed.contains(payment_hash) => + { + Some(*expiry_height) + }, + _ => None, + }) + .min() + }; + let can_advance_without_claimed_expiry = next_claimed_htlc_boundary + .map_or(true, |boundary| { + self.chain_state.tip_height().saturating_add(1) < boundary + }); + if !needs_payment_completion || !can_drive_more_cleanup { + break; + } + if self.payments.has_unfinished_claims() && !can_advance_without_claimed_expiry { + break; + } + self.chain_state.advance_height(1); + self.flush_progress(32); } - self.chain_state.advance_height(1); - self.flush_progress(32); - if !self.has_pending_work() { - break; + } + + { + let payment_hashes = self.payments.payment_hashes_by_id.borrow(); + let sender_sent = self.payments.sender_sent_payment_hashes.borrow(); + let sender_failed = self.payments.sender_failed_payment_hashes.borrow(); + let mut pending = self.payments.pending_payments.borrow_mut(); + let mut resolved = self.payments.resolved_payment_ids.borrow_mut(); + for (node_idx, payment_ids) in pending.iter_mut().enumerate() { + payment_ids.retain(|payment_id| { + let payment_hash = *payment_hashes + .get(payment_id) + .expect("pending payment missing payment hash"); + let sender_resolved = sender_sent.contains(&payment_hash) + || sender_failed.contains(&payment_hash); + if sender_resolved { + resolved[node_idx].insert(*payment_id); + } + !sender_resolved + }); } } - self.payments.assert_all_resolved(); - // Verify that every payment claimed by a receiver resulted in a - // PaymentSent event at the sender. - self.payments.assert_claims_reported(); + for (idx, pending) in self.payments.pending_payments.borrow().iter().enumerate() { + assert!( + pending.is_empty(), + "Node {} has {} stuck pending payments after settling all state: ids={:?}; {}", + idx, + pending.len(), + pending, + self.pending_work_summary(), + ); + } + + let claimed_hashes = + self.payments.claimed_payment_hashes.borrow().iter().copied().collect::>(); + for hash in claimed_hashes { + let receiver_saw_claim = + self.payments.receiver_claimed_payment_hashes.borrow().contains(&hash); + assert!( + receiver_saw_claim, + "Payment {:?} was claimed with claim_funds but receiver never got PaymentClaimed", + hash, + ); + let sender_saw_sent = self.payments.sender_sent_payment_hashes.borrow().contains(&hash); + let sender_saw_failed = + self.payments.sender_failed_payment_hashes.borrow().contains(&hash); + assert!(!(sender_saw_sent && sender_saw_failed)); + assert!(sender_saw_sent || sender_saw_failed); + if sender_saw_failed { + assert!(self.payments.claim_allows_sender_failure(&hash)); + } + } // All HTLCs should have been claimed or failed once we reach quiescence. for (idx, node) in self.nodes.iter().enumerate() { @@ -3041,6 +3278,9 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { self.bc_link.complete_all_monitor_updates(&self.nodes); for chan_id in *self.ab_link.channel_ids() { + if self.payments.closed_channels.borrow().contains(&chan_id) { + continue; + } if self.probe_amount_for_direction(0, chan_id).is_some() { assert!(self.can_send_after_settle(0, 1, chan_id)); } else if self.probe_amount_for_direction(1, chan_id).is_some() { @@ -3048,6 +3288,9 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { } } for chan_id in *self.bc_link.channel_ids() { + if self.payments.closed_channels.borrow().contains(&chan_id) { + continue; + } if self.probe_amount_for_direction(1, chan_id).is_some() { assert!(self.can_send_after_settle(1, 2, chan_id)); } else if self.probe_amount_for_direction(2, chan_id).is_some() { @@ -3156,8 +3399,9 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { let balances_a = self.nodes[0].monitor.get_claimable_balances(&open_refs); let balances_b = self.nodes[1].monitor.get_claimable_balances(&open_refs); let balances_c = self.nodes[2].monitor.get_claimable_balances(&open_refs); + let pending_payments = self.payments.pending_payments.borrow(); format!( - "queues ab={} ba={} bc={} cb={} pending_txs={} bcast=({},{},{}) pending=({},{},{}) monitor_updates={} timed_work={} heights=({},{},{}) tip={} balances_a=[{}] balances_b=[{}] balances_c=[{}]", + "queues ab={} ba={} bc={} cb={} pending_txs={} bcast=({},{},{}) pending=({},{},{}) monitor_updates={} timed_work={} heights=({},{},{}) tip={} {} balances_a=[{}] balances_b=[{}] balances_c=[{}]", self.queues.ab.len(), self.queues.ba.len(), self.queues.bc.len(), @@ -3166,15 +3410,16 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { self.nodes[0].broadcaster.txn_broadcasted.borrow().len(), self.nodes[1].broadcaster.txn_broadcasted.borrow().len(), self.nodes[2].broadcaster.txn_broadcasted.borrow().len(), - self.payments.nodes[0].pending.len(), - self.payments.nodes[1].pending.len(), - self.payments.nodes[2].pending.len(), + pending_payments[0].len(), + pending_payments[1].len(), + pending_payments[2].len(), self.has_pending_monitor_updates(), self.has_time_dependent_work(), self.nodes[0].height, self.nodes[1].height, self.nodes[2].height, self.chain_state.tip_height(), + self.payments.summarize_claim_tracking(), summarize_balances(&balances_a), summarize_balances(&balances_b), summarize_balances(&balances_c), @@ -3344,55 +3589,63 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { } fn advance_chain_carefully(&mut self, num_blocks: u32) { - for _ in 0..num_blocks { - self.flush_progress(32); - if !self.has_pending_work() { - break; - } - self.chain_state.advance_height(1); + if self.payments.has_live_payment_work() { self.flush_progress(32); - if !self.has_pending_work() { - break; + } else { + for _ in 0..num_blocks { + self.flush_progress(32); + if !self.has_pending_work() { + break; + } + self.chain_state.advance_height(1); + self.flush_progress(32); + if !self.has_pending_work() { + break; + } } } } fn catch_up_raw_monitors(&self) { - for node in &self.nodes { - let mut min_monitor_height = node.height; - for chan_id in node.monitor.list_monitors() { - if let Ok(mon) = node.monitor.get_monitor(chan_id) { - min_monitor_height = - std::cmp::min(min_monitor_height, mon.current_best_block().height); - } + for node_idx in 0..self.nodes.len() { + self.catch_up_raw_monitors_for_node(node_idx); + } + } + + fn catch_up_raw_monitors_for_node(&self, node_idx: usize) { + let node = &self.nodes[node_idx]; + let mut min_monitor_height = node.height; + for chan_id in node.monitor.list_monitors() { + if let Ok(mon) = node.monitor.get_monitor(chan_id) { + min_monitor_height = + std::cmp::min(min_monitor_height, mon.current_best_block().height); } - let mut h = min_monitor_height; - while h < node.height { - let mut next_height = h + 1; - while next_height <= node.height - && self.chain_state.block_at(next_height).1.is_empty() - { - next_height += 1; - } - if next_height > node.height { - h = node.height; - let (header, _) = self.chain_state.block_at(h); - node.monitor.best_block_updated(header, h); - break; - } - if next_height > h + 1 { - h = next_height - 1; - let (header, _) = self.chain_state.block_at(h); - node.monitor.best_block_updated(header, h); - } - h = next_height; - let (header, txn) = self.chain_state.block_at(h); - let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); - if !txdata.is_empty() { - node.monitor.transactions_confirmed(header, &txdata, h); - } + } + let mut h = min_monitor_height; + while h < node.height { + let mut next_height = h + 1; + while next_height <= node.height && self.chain_state.block_at(next_height).1.is_empty() + { + next_height += 1; + } + if next_height > node.height { + h = node.height; + let (header, _) = self.chain_state.block_at(h); + node.monitor.best_block_updated(header, h); + break; + } + if next_height > h + 1 { + h = next_height - 1; + let (header, _) = self.chain_state.block_at(h); node.monitor.best_block_updated(header, h); } + h = next_height; + let (header, txn) = self.chain_state.block_at(h); + let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); + if !txdata.is_empty() { + node.monitor.transactions_confirmed(header, &txdata, h); + } + node.monitor.best_block_updated(header, h); } } @@ -3450,6 +3703,9 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { fn can_send_after_settle( &mut self, source_idx: usize, dest_idx: usize, dest_chan_id: ChannelId, ) -> bool { + if self.payments.closed_channels.borrow().contains(&dest_chan_id) { + return false; + } let Some(amt) = self.probe_amount_for_direction(source_idx, dest_chan_id) else { return false; }; From 5b5597fe33c40f72d451ac3ee303eeae25cfa060 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 12:34:33 +0200 Subject: [PATCH 30/30] fuzz: add force close actions to chanmon_consistency Add explicit force-close fuzz actions for the A-B and B-C channels. Enable holder commitment and holder HTLC signing together so on-chain cleanup retries do not split the paired monitor-side signer operations. The all-node holder-signing byte remains as a compatibility alias for existing fuzz inputs. The harness records dust HTLC paths before closing so later payment resolution checks can account for claims blocked by dust outputs. --- fuzz/src/chanmon_consistency.rs | 115 ++++++++++++++++++++++++++------ 1 file changed, 93 insertions(+), 22 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index cf7a8f7e2fd..666412533e4 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -15,7 +15,8 @@ //! actions such as sending payments, handling events, or changing monitor update return values on //! a per-node basis. This should allow it to find any cases where the ordering of actions results //! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or -//! send-side handling is correct, other peers. +//! send-side handling is correct, other peers. The fuzzer also exercises user-initiated +//! force-closes with on-chain commitment transaction confirmation. use bitcoin::amount::Amount; use bitcoin::constants::genesis_block; @@ -49,7 +50,7 @@ use lightning::events::{self, EventsProvider}; use lightning::ln::channel::{ FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS, }; -use lightning::ln::channel_state::ChannelDetails; +use lightning::ln::channel_state::{ChannelDetails, InboundHTLCDetails, OutboundHTLCDetails}; use lightning::ln::channelmanager::{ ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, RecentPaymentDetails, TrustedChannelFeatures, @@ -765,10 +766,12 @@ impl SignerProvider for KeyProvider { } } -const SUPPORTED_SIGNER_OPS: [SignerOp; 3] = [ +const SUPPORTED_SIGNER_OPS: [SignerOp; 5] = [ SignerOp::SignCounterpartyCommitment, SignerOp::GetPerCommitmentPoint, SignerOp::ReleaseCommitmentSecret, + SignerOp::SignHolderCommitment, + SignerOp::SignHolderHtlcTransaction, ]; impl KeyProvider { @@ -1078,6 +1081,12 @@ impl<'a> HarnessNode<'a> { self.node.timer_tick_occurred(); } + fn enable_holder_signer_ops(&self) { + self.keys_manager.enable_op_for_all_signers(SignerOp::SignHolderCommitment); + self.keys_manager.enable_op_for_all_signers(SignerOp::SignHolderHtlcTransaction); + self.node.signer_unblocked(None); + } + fn current_feerate_sat_per_kw(&self) -> FeeRate { self.fee_estimator.feerate_sat_per_kw() } @@ -1236,6 +1245,16 @@ impl<'a> HarnessNode<'a> { } } +#[inline] +fn inbound_dust_blocks_path(htlc: &InboundHTLCDetails) -> bool { + htlc.is_dust +} + +#[inline] +fn outbound_dust_blocks_path(htlc: &OutboundHTLCDetails) -> bool { + htlc.is_dust +} + #[derive(Copy, Clone)] enum MonitorReloadSelector { Persisted, @@ -3679,6 +3698,65 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { assert!(settled, "message-only settle exceeded budget: {}", self.pending_work_summary(),); } + fn record_force_close_dust(&self, closer_idx: usize, channel_id: ChannelId) { + if let Some(channel) = self.nodes[closer_idx] + .node + .list_channels() + .into_iter() + .find(|chan| chan.channel_id == channel_id) + { + let mut dust_parts = channel + .pending_inbound_htlcs + .iter() + .filter(|htlc| inbound_dust_blocks_path(htlc)) + .map(|htlc| (htlc.payment_hash, htlc.amount_msat)) + .chain( + channel + .pending_outbound_htlcs + .iter() + .filter(|htlc| outbound_dust_blocks_path(htlc)) + .map(|htlc| (htlc.payment_hash, htlc.amount_msat)), + ) + .collect::>(); + let payment_paths = self.payments.payment_paths_by_hash.borrow(); + let mut blocked_paths = self.payments.blocked_dust_paths_by_hash.borrow_mut(); + for (payment_hash, amount_msat) in dust_parts.drain(..) { + let Some(paths) = payment_paths.get(&payment_hash) else { + continue; + }; + let blocked_for_hash = + blocked_paths.entry(payment_hash).or_insert_with(HashSet::new); + if let Some((path_idx, _)) = paths.iter().enumerate().find(|(path_idx, path)| { + !blocked_for_hash.contains(path_idx) + && path.iter().any(|(chan_id, part_amt)| { + *chan_id == channel_id && *part_amt == amount_msat + }) + }) { + blocked_for_hash.insert(path_idx); + } + } + } + } + + fn force_close( + &mut self, closer_idx: usize, channel_id: ChannelId, counterparty_idx: usize, reason: &str, + ) { + self.flush_progress(32); + self.record_force_close_dust(closer_idx, channel_id); + if self.nodes[closer_idx] + .node + .force_close_broadcasting_latest_txn( + &channel_id, + &self.nodes[counterparty_idx].get_our_node_id(), + reason.to_string(), + ) + .is_ok() + { + self.payments.closed_channels.borrow_mut().insert(channel_id); + self.flush_progress(32); + } + } + fn probe_amount_for_direction( &self, source_idx: usize, dest_chan_id: ChannelId, ) -> Option { @@ -4089,27 +4167,20 @@ pub fn do_test(data: &[u8], out: Out) { .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); harness.nodes[2].signer_unblocked(None); }, - 0xcc => { - harness.nodes[1] - .keys_manager - .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - let filter = Some((harness.nodes[0].get_our_node_id(), harness.chan_a_id())); - harness.nodes[1].signer_unblocked(filter); - }, - 0xcd => { - harness.nodes[1] - .keys_manager - .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - let filter = Some((harness.nodes[2].get_our_node_id(), harness.chan_b_id())); - harness.nodes[1].signer_unblocked(filter); - }, - 0xce => { - harness.nodes[2] - .keys_manager - .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - harness.nodes[2].signer_unblocked(None); + 0xcc => harness.nodes[0].enable_holder_signer_ops(), + 0xcd => harness.nodes[1].enable_holder_signer_ops(), + 0xce => harness.nodes[2].enable_holder_signer_ops(), + 0xcf => { + harness.nodes[0].enable_holder_signer_ops(); + harness.nodes[1].enable_holder_signer_ops(); + harness.nodes[2].enable_holder_signer_ops(); }, + 0xd0 => harness.force_close(0, harness.chan_a_id(), 1, "]]]]]]]]]"), + 0xd1 => harness.force_close(1, harness.chan_b_id(), 2, "]]]]]]]]"), + 0xd2 => harness.force_close(1, harness.chan_a_id(), 0, "]]]]]]]"), + 0xd3 => harness.force_close(2, harness.chan_b_id(), 1, "]]]]]"), + 0xd8 => harness.confirm_broadcasts_for_node(0), 0xd9 => harness.confirm_broadcasts_for_node(1), 0xda => harness.confirm_broadcasts_for_node(2),