From a36c85a92d41ad32b67761d2ff8270b6948362fa Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 5 Jan 2026 16:20:21 -0500 Subject: [PATCH 01/23] Trivial: document some fields on MonitorRestoreUpdates --- lightning/src/ln/channel.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 2762ab63fc1..8e694303942 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1137,11 +1137,14 @@ pub enum UpdateFulfillCommitFetch { /// The return value of `monitor_updating_restored` pub(super) struct MonitorRestoreUpdates { pub raa: Option, + /// A `CommitmentUpdate` to be sent to our channel peer. pub commitment_update: Option, pub commitment_order: RAACommitmentOrder, pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, + /// Inbound update_adds that are now irrevocably committed to this channel and are ready for the + /// onion to be processed in order to forward or receive the HTLC. pub pending_update_adds: Vec, pub funding_broadcastable: Option, pub channel_ready: Option, From 56b9f417d47297613dfb2fccd87e2fcc43a17f7a Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 16 Jan 2026 18:03:40 -0500 Subject: [PATCH 02/23] De-dup decode_htlcs from monitor only if channel is closed We recently added support for reconstructing ChannelManager::decode_update_add_htlcs on startup, using data present in the Channels. However, we failed to prune HTLCs from this rebuilt map if a given inbound HTLC was already forwarded to the outbound edge and in the outbound holding cell (this bug could've caused us to double-forward HTLCs, fortunately it never shipped). As part of fixing this bug, we clean up the overall pruning approach by: 1. If the Channel is open, then it is the source of truth for what HTLCs are outbound+pending (including pending in the holding cell) 2. If the Channel is closed, then the corresponding ChannelMonitor is the source of truth for what HTLCs are outbound+pending Previously, we would only consider the monitor's pending HTLCs, which ignored holding cell HTLCs. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channel.rs | 39 +++++++- lightning/src/ln/channelmanager.rs | 31 +++++- lightning/src/ln/functional_test_utils.rs | 20 +++- lightning/src/ln/reload_tests.rs | 111 ++++++++++++++++++++++ 4 files changed, 194 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 8e694303942..3678ccba9a8 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,8 +50,8 @@ use crate::ln::channel_state::{ OutboundHTLCDetails, OutboundHTLCStateDetails, }; use crate::ln::channelmanager::{ - self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCSource, - OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, + self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCPreviousHopData, + HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; @@ -7852,6 +7852,41 @@ where .collect() } + /// Useful when reconstructing the set of pending HTLC forwards when deserializing the + /// `ChannelManager`. We don't want to cache an HTLC as needing to be forwarded if it's already + /// present in the outbound edge, or else we'll double-forward. + pub(super) fn outbound_htlc_forwards(&self) -> impl Iterator + '_ { + let holding_cell_outbounds = + self.context.holding_cell_htlc_updates.iter().filter_map(|htlc| match htlc { + HTLCUpdateAwaitingACK::AddHTLC { source, .. } => match source { + HTLCSource::PreviousHopData(prev_hop_data) => Some(prev_hop_data.clone()), + _ => None, + }, + _ => None, + }); + let committed_outbounds = + self.context.pending_outbound_htlcs.iter().filter_map(|htlc| match &htlc.source { + HTLCSource::PreviousHopData(prev_hop_data) => Some(prev_hop_data.clone()), + _ => None, + }); + holding_cell_outbounds.chain(committed_outbounds) + } + + #[cfg(test)] + pub(super) fn test_holding_cell_outbound_htlc_forwards_count(&self) -> usize { + self.context + .holding_cell_htlc_updates + .iter() + .filter_map(|htlc| match htlc { + HTLCUpdateAwaitingACK::AddHTLC { source, .. } => match source { + HTLCSource::PreviousHopData(prev_hop_data) => Some(prev_hop_data.clone()), + _ => None, + }, + _ => None, + }) + .count() + } + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] fn mark_outbound_htlc_removed( diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 13197ea44ec..9e522820610 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10134,6 +10134,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } + #[cfg(test)] + pub(crate) fn test_holding_cell_outbound_htlc_forwards_count( + &self, cp_id: PublicKey, chan_id: ChannelId, + ) -> usize { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); + let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); + chan.test_holding_cell_outbound_htlc_forwards_count() + } + /// Completes channel resumption after locks have been released. /// /// Processes the [`PostMonitorUpdateChanResume`] returned by @@ -18600,6 +18610,20 @@ impl< let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); + if reconstruct_manager_from_monitors && !is_channel_closed { + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + for prev_hop in funded_chan.outbound_htlc_forwards() { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop, + "HTLC already forwarded to the outbound edge", + &args.logger, + ); + } + } + } + } } for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() @@ -18613,6 +18637,10 @@ impl< info.prev_funding_outpoint == prev_hop_data.outpoint && info.prev_htlc_id == prev_hop_data.htlc_id }; + if !is_channel_closed { + continue; + } + // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, @@ -18626,9 +18654,6 @@ impl< ); } - if !is_channel_closed || reconstruct_manager_from_monitors { - continue; - } // The ChannelMonitor is now responsible for this HTLC's // failure/success and will let us know what its outcome is. If we // still have an entry for this HTLC in `forward_htlcs_legacy`, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 218779123f6..6800078fd6f 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1270,6 +1270,13 @@ pub fn check_added_monitors>(node: & } } +pub fn get_latest_mon_update_id<'a, 'b, 'c>( + node: &Node<'a, 'b, 'c>, channel_id: ChannelId, +) -> (u64, u64) { + let monitor_id_state = node.chain_monitor.latest_monitor_update_id.lock().unwrap(); + monitor_id_state.get(&channel_id).unwrap().clone() +} + fn claimed_htlc_matches_path<'a, 'b, 'c>( origin_node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], htlc: &ClaimedHTLC, ) -> bool { @@ -5172,6 +5179,9 @@ pub struct ReconnectArgs<'a, 'b, 'c, 'd> { pub pending_cell_htlc_claims: (usize, usize), pub pending_cell_htlc_fails: (usize, usize), pub pending_raa: (bool, bool), + /// If true, don't assert that pending messages are empty after the commitment dance completes. + /// Useful when holding cell HTLCs will be released and need to be handled by the caller. + pub allow_post_commitment_dance_msgs: (bool, bool), } impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> { @@ -5194,6 +5204,7 @@ impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> { pending_cell_htlc_claims: (0, 0), pending_cell_htlc_fails: (0, 0), pending_raa: (false, false), + allow_post_commitment_dance_msgs: (false, false), } } } @@ -5219,6 +5230,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { pending_raa, pending_responding_commitment_signed, pending_responding_commitment_signed_dup_monitor, + allow_post_commitment_dance_msgs, } = args; connect_nodes(node_a, node_b); let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b); @@ -5402,11 +5414,13 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); - assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors( &node_b, if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 }, ); + if !allow_post_commitment_dance_msgs.0 { + assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); + } } } else { assert!(chan_msgs.2.is_none()); @@ -5516,11 +5530,13 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes node_a.node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); - assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors( &node_a, if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 }, ); + if !allow_post_commitment_dance_msgs.1 { + assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); + } } } else { assert!(chan_msgs.2.is_none()); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index c0432051a62..e6061cc3106 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1319,6 +1319,117 @@ fn test_manager_persisted_post_outbound_edge_forward() { expect_payment_sent(&nodes[0], payment_preimage, None, true, true); } +#[test] +fn test_manager_persisted_post_outbound_edge_holding_cell() { + // Test that we will not double-forward an HTLC after restart if it is already in the outbound + // edge's holding cell, which was previously broken. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + + // Lock in the HTLC from node_a <> node_b. + let amt_msat = 1000; + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + // Send a 2nd HTLC node_c -> node_b, to force the first HTLC into the holding cell. + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[1], amt_msat); + nodes[2].node.send_payment_with_route(route_2, payment_hash_2, RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + let send_event = + SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &send_event.commitment_msg); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors(&nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Add the HTLC to the outbound edge, node_b <> node_c. Force the outbound HTLC into the b<>c + // holding cell. + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 0); + assert_eq!( + nodes[1].node.test_holding_cell_outbound_htlc_forwards_count(nodes[2].node.get_our_node_id(), chan_id_2), + 1 + ); + + // Disconnect peers and reload the forwarding node_b. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + + let node_b_encoded = nodes[1].node.encode(); + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_id_2); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id_2, latest_update); + + reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); + + // Reconnect b<>c. Node_b has pending RAA + commitment_signed from the incomplete c->b + // commitment dance, plus an HTLC in the holding cell that will be released after the dance. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_args.pending_raa = (false, true); + reconnect_args.pending_responding_commitment_signed = (false, true); + // Node_c needs a monitor update to catch up after processing node_b's reestablish. + reconnect_args.expect_renegotiated_funding_locked_monitor_update = (false, true); + // The holding cell HTLC will be released after the commitment dance - handle it below. + reconnect_args.allow_post_commitment_dance_msgs = (false, true); + reconnect_nodes(reconnect_args); + + // The holding cell HTLC was released during the reconnect. Complete its commitment dance. + let holding_cell_htlc_msgs = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(holding_cell_htlc_msgs.len(), 1); + match &holding_cell_htlc_msgs[0] { + MessageSendEvent::UpdateHTLCs { node_id, updates, .. } => { + assert_eq!(*node_id, nodes[2].node.get_our_node_id()); + assert_eq!(updates.update_add_htlcs.len(), 1); + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[2], &nodes[1], &updates.commitment_signed, false, false); + } + _ => panic!("Unexpected message: {:?}", holding_cell_htlc_msgs[0]), + } + + // Ensure node_b won't double-forward the outbound HTLC (this was previously broken). + nodes[1].node.process_pending_htlc_forwards(); + let msgs = nodes[1].node.get_and_clear_pending_msg_events(); + assert!(msgs.is_empty(), "Expected 0 messages, got {:?}", msgs); + + // The a->b->c HTLC is now committed on node_c. The c->b HTLC is committed on node_b. + // Both payments should now be claimable. + expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); + expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, amt_msat, None, nodes[1].node.get_our_node_id()); + + // Claim the a->b->c payment on node_c. + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); + + // Claim the c->b payment on node_b. + nodes[1].node.claim_funds(payment_preimage_2); + expect_payment_claimed!(nodes[1], payment_hash_2, amt_msat); + check_added_monitors(&nodes[1], 1); + let mut update = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), update.update_fulfill_htlcs.remove(0)); + do_commitment_signed_dance(&nodes[2], &nodes[1], &update.commitment_signed, false, false); + expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true); +} + #[test] fn test_reload_partial_funding_batch() { let chanmon_cfgs = create_chanmon_cfgs(3); From 7304cc9bebbd27bc7a766944d683bb3e02392dcb Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 2 Feb 2026 16:23:51 -0500 Subject: [PATCH 03/23] Simplify channel_closed check on manager read This cleanup falls out of the changes made in the previous commit. Separated out here for reviewability. --- lightning/src/ln/channelmanager.rs | 248 ++++++++++++++--------------- 1 file changed, 124 insertions(+), 124 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9e522820610..a0fb1369cf2 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18626,47 +18626,49 @@ impl< } } - for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() - { - let logger = - WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash)); - let htlc_id = SentHTLCId::from_source(&htlc_source); - match htlc_source { - HTLCSource::PreviousHopData(prev_hop_data) => { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint - && info.prev_htlc_id == prev_hop_data.htlc_id - }; - if !is_channel_closed { - continue; - } + if is_channel_closed { + for (htlc_source, (htlc, preimage_opt)) in + monitor.get_all_current_outbound_htlcs() + { + let logger = WithChannelMonitor::from( + &args.logger, + monitor, + Some(htlc.payment_hash), + ); + let htlc_id = SentHTLCId::from_source(&htlc_source); + match htlc_source { + HTLCSource::PreviousHopData(prev_hop_data) => { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint + && info.prev_htlc_id == prev_hop_data.htlc_id + }; + + // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed + // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from + // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, + // we'll double-forward. + if reconstruct_manager_from_monitors { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC already forwarded to the outbound edge", + &&logger, + ); + } - // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed - // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from - // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, - // we'll double-forward. - if reconstruct_manager_from_monitors { + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs_legacy`, + // `pending_intercepted_htlcs_legacy`, or + // `decode_update_add_htlcs_legacy`, we were apparently not persisted + // after the monitor was when forwarding the payment. dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, + &mut decode_update_add_htlcs_legacy, &prev_hop_data, - "HTLC already forwarded to the outbound edge", + "HTLC was forwarded to the closed channel", &&logger, ); - } - - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs_legacy`, - // `pending_intercepted_htlcs_legacy`, or - // `decode_update_add_htlcs_legacy`, we were apparently not persisted - // after the monitor was when forwarding the payment. - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs_legacy, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &&logger, - ); - forward_htlcs_legacy.retain(|_, forwards| { + forward_htlcs_legacy.retain(|_, forwards| { forwards.retain(|forward| { if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { if pending_forward_matches_htlc(&htlc_info) { @@ -18678,7 +18680,7 @@ impl< }); !forwards.is_empty() }); - pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { + pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", &htlc.payment_hash, &monitor.channel_id()); @@ -18690,113 +18692,111 @@ impl< false } else { true } }); - }, - HTLCSource::OutboundRoute { - payment_id, - session_priv, - path, - bolt12_invoice, - .. - } => { - if !is_channel_closed { - continue; - } - if let Some(preimage) = preimage_opt { - let pending_events = Mutex::new(pending_events_read); - let update = PaymentCompleteUpdate { - counterparty_node_id: monitor.get_counterparty_node_id(), - channel_funding_outpoint: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - htlc_id, - }; - let mut compl_action = Some( + }, + HTLCSource::OutboundRoute { + payment_id, + session_priv, + path, + bolt12_invoice, + .. + } => { + if let Some(preimage) = preimage_opt { + let pending_events = Mutex::new(pending_events_read); + let update = PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id, + }; + let mut compl_action = Some( EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update) ); - pending_outbounds.claim_htlc( - payment_id, - preimage, - bolt12_invoice, - session_priv, - path, - true, - &mut compl_action, - &pending_events, - &logger, - ); - // If the completion action was not consumed, then there was no - // payment to claim, and we need to tell the `ChannelMonitor` - // we don't need to hear about the HTLC again, at least as long - // as the PaymentSent event isn't still sitting around in our - // event queue. - let have_action = if compl_action.is_some() { - let pending_events = pending_events.lock().unwrap(); - pending_events.iter().any(|(_, act)| *act == compl_action) - } else { - false - }; - if !have_action && compl_action.is_some() { - let mut peer_state = per_peer_state + pending_outbounds.claim_htlc( + payment_id, + preimage, + bolt12_invoice, + session_priv, + path, + true, + &mut compl_action, + &pending_events, + &logger, + ); + // If the completion action was not consumed, then there was no + // payment to claim, and we need to tell the `ChannelMonitor` + // we don't need to hear about the HTLC again, at least as long + // as the PaymentSent event isn't still sitting around in our + // event queue. + let have_action = if compl_action.is_some() { + let pending_events = pending_events.lock().unwrap(); + pending_events.iter().any(|(_, act)| *act == compl_action) + } else { + false + }; + if !have_action && compl_action.is_some() { + let mut peer_state = per_peer_state .get(&counterparty_node_id) .map(|state| state.lock().unwrap()) .expect( "Channels originating a preimage must have peer state", ); - let update_id = peer_state + let update_id = peer_state .closed_channel_monitor_update_ids .get_mut(channel_id) .expect( "Channels originating a preimage must have a monitor", ); - // Note that for channels closed pre-0.1, the latest - // update_id is `u64::MAX`. - *update_id = update_id.saturating_add(1); - - pending_background_events.push( - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: monitor - .get_counterparty_node_id(), - funding_txo: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - update: ChannelMonitorUpdate { - update_id: *update_id, - channel_id: Some(monitor.channel_id()), - updates: vec![ + // Note that for channels closed pre-0.1, the latest + // update_id is `u64::MAX`. + *update_id = update_id.saturating_add(1); + + pending_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: monitor + .get_counterparty_node_id(), + funding_txo: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + update: ChannelMonitorUpdate { + update_id: *update_id, + channel_id: Some(monitor.channel_id()), + updates: vec![ ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id, }, ], + }, }, - }, - ); + ); + } + pending_events_read = pending_events.into_inner().unwrap(); } - pending_events_read = pending_events.into_inner().unwrap(); - } - }, + }, + } } - } - for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { - let logger = - WithChannelMonitor::from(&args.logger, monitor, Some(payment_hash)); - log_info!( - logger, - "Failing HTLC with payment hash {} as it was resolved on-chain.", - payment_hash - ); - let completion_action = Some(PaymentCompleteUpdate { - counterparty_node_id: monitor.get_counterparty_node_id(), - channel_funding_outpoint: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - htlc_id: SentHTLCId::from_source(&htlc_source), - }); + for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { + let logger = + WithChannelMonitor::from(&args.logger, monitor, Some(payment_hash)); + log_info!( + logger, + "Failing HTLC with payment hash {} as it was resolved on-chain.", + payment_hash + ); + let completion_action = Some(PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id: SentHTLCId::from_source(&htlc_source), + }); - failed_htlcs.push(( - htlc_source, - payment_hash, - monitor.get_counterparty_node_id(), - monitor.channel_id(), - LocalHTLCFailureReason::OnChainTimeout, - completion_action, - )); + failed_htlcs.push(( + htlc_source, + payment_hash, + monitor.get_counterparty_node_id(), + monitor.channel_id(), + LocalHTLCFailureReason::OnChainTimeout, + completion_action, + )); + } } // Whether the downstream channel was closed or not, try to re-apply any payment From 37375ca386298c86417d3c7ff750fa64a9a920d3 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 28 Jan 2026 19:03:34 -0500 Subject: [PATCH 04/23] Don't double-forward inbounds resolved in holding cell We recently added support for reconstructing ChannelManager::decode_update_add_htlcs on startup, using data present in the Channels. However, we failed to prune HTLCs from this rebuilt map if a given HTLC was already forwarded+removed from the outbound edge and resolved in the inbound edge's holding cell. Here we fix this bug that would have caused us to double-forward inbound HTLC forwards, which fortunately was not shipped. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channel.rs | 18 ++++++- lightning/src/ln/channelmanager.rs | 2 +- lightning/src/ln/reload_tests.rs | 81 ++++++++++++++++++++++++++++++ 3 files changed, 99 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3678ccba9a8..622227b2210 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7839,12 +7839,28 @@ where } /// Useful for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. - pub(super) fn get_inbound_committed_update_adds(&self) -> Vec { + pub(super) fn inbound_committed_unresolved_htlcs(&self) -> Vec { + // We don't want to return an HTLC as needing processing if it already has a resolution that's + // pending in the holding cell. + let htlc_resolution_in_holding_cell = |id: u64| -> bool { + self.context.holding_cell_htlc_updates.iter().any(|holding_cell_htlc| { + match holding_cell_htlc { + HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => *htlc_id == id, + HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => *htlc_id == id, + HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => *htlc_id == id, + HTLCUpdateAwaitingACK::AddHTLC { .. } => false, + } + }) + }; + self.context .pending_inbound_htlcs .iter() .filter_map(|htlc| match htlc.state { InboundHTLCState::Committed { ref update_add_htlc_opt } => { + if htlc_resolution_in_holding_cell(htlc.htlc_id) { + return None; + } update_add_htlc_opt.clone() }, _ => None, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a0fb1369cf2..2665bf11740 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18558,7 +18558,7 @@ impl< if let Some(chan) = peer_state.channel_by_id.get(channel_id) { if let Some(funded_chan) = chan.as_funded() { let inbound_committed_update_adds = - funded_chan.get_inbound_committed_update_adds(); + funded_chan.inbound_committed_unresolved_htlcs(); if !inbound_committed_update_adds.is_empty() { // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized // `Channel`, as part of removing the requirement to regularly persist the diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index e6061cc3106..826fdbf9b0c 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1758,3 +1758,84 @@ fn test_hold_completed_inflight_monitor_updates_upon_manager_reload() { reconnect_nodes(reconnect_args); } +#[test] +fn outbound_removed_holding_cell_resolved_no_double_forward() { + // Test that if a forwarding node has an HTLC that is fully removed on the outbound edge + // but where the inbound edge resolution is in the holding cell, and we reload the node in this + // state, that node will not double-forward the HTLC. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2 = chan_1_2.2; + + // Send a payment from nodes[0] to nodes[2] via nodes[1]. + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + send_along_route_with_secret( + &nodes[0], route, &[&[&nodes[1], &nodes[2]]], 1_000_000, payment_hash, payment_secret, + ); + + // Claim the payment on nodes[2]. + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); + + // Disconnect nodes[0] from nodes[1] BEFORE processing the fulfill. + // This forces the inbound fulfill resolution go to into nodes[1]'s holding cell for the inbound + // channel. + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + // Process the fulfill from nodes[2] to nodes[1]. + let updates_2_1 = get_htlc_update_msgs(&nodes[2], &node_1_id); + nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates_2_1.update_fulfill_htlcs[0].clone()); + check_added_monitors(&nodes[1], 1); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, false, false); + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); + + // At this point: + // - The outbound HTLC nodes[1]->nodes[2] is resolved and removed + // - The inbound HTLC nodes[0]->nodes[1] is still in a Committed state, with the fulfill + // resolution in nodes[1]'s chan_0_1 holding cell + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_serialized = get_monitor!(nodes[1], chan_id_1_2).encode(); + + // Reload nodes[1]. + // During deserialization, we previously would have not noticed that the nodes[0]<>nodes[1] HTLC + // had a resolution pending in the holding cell, and reconstructed the ChannelManager's pending + // HTLC state indicating that the HTLC still needed to be forwarded to the outbound edge. + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized + ); + + // Check that nodes[1] doesn't double-forward the HTLC. + nodes[1].node.process_pending_htlc_forwards(); + + // Reconnect nodes[1] to nodes[0]. The claim should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_claims = (0, 1); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received the fulfill and generate PaymentSent. + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} From a5c8bceefbd3765506f6d50590d91083221952ed Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 21 Jan 2026 15:52:42 -0500 Subject: [PATCH 05/23] Mark legacy pre-0.3 inbound htlcs on persist In 0.3+, we are taking steps to remove the requirement of regularly persisting the ChannelManager and instead rebuild the set of HTLC forwards (and the manager generally) from Channel{Monitor} data. We previously merged support for reconstructing the ChannelManager::decode_update_add_htlcs map from channel data, using a new HTLC onion field that will be present for inbound HTLCs received on 0.3+ only. However, we now want to add support for pruning this field once it's no longer needed so it doesn't get persisted every time the manager gets persisted. At the same time, in a future LDK version we need to detect whether the field was ever present to begin with to prevent upgrading with legacy HTLCs present. We accomplish both by converting the plain update_add option that was previously serialized to an enum that can indicate whether the HTLC is from 0.2- versus 0.3+-with-onion-pruned (a variant for the latter is added in the next commit). Actual pruning of the new update_add field is added in the next commit. --- lightning/src/ln/channel.rs | 98 +++++++++++++++++++++++++------------ lightning/src/util/ser.rs | 20 +++++--- 2 files changed, 80 insertions(+), 38 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 622227b2210..8a4ef19fe37 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -85,6 +85,7 @@ use crate::util::errors::APIError; use crate::util::logger::{Logger, Record, WithContext}; use crate::util::scid_utils::{block_from_scid, scid_from_parts}; use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, Writeable, Writer}; +use crate::{impl_readable_for_vec, impl_writeable_for_vec}; use alloc::collections::{btree_map, BTreeMap}; @@ -216,7 +217,7 @@ enum InboundHTLCState { /// Used to rebuild `ChannelManager` HTLC state on restart. Previously the manager would track /// and persist all HTLC forwards and receives itself, but newer LDK versions avoid relying on /// its persistence and instead reconstruct state based on `Channel` and `ChannelMonitor` data. - update_add_htlc_opt: Option, + update_add_htlc: InboundUpdateAdd, }, /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack @@ -307,6 +308,31 @@ impl InboundHTLCState { } } +/// A field of `InboundHTLCState::Committed` containing the HTLC's `update_add_htlc` message. If +/// the HTLC is a forward and gets irrevocably committed to the outbound edge, we convert to +/// `InboundUpdateAdd::Forwarded`, thus pruning the onion and not persisting it on every +/// `ChannelManager` persist. +/// +/// Useful for reconstructing the pending HTLC set on startup. +#[derive(Debug)] +enum InboundUpdateAdd { + /// The inbound committed HTLC's update_add_htlc message. + WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, + /// This HTLC was received pre-LDK 0.3, before we started persisting the onion for inbound + /// committed HTLCs. + Legacy, +} + +impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, + (0, WithOnion) => { + (0, update_add_htlc, required), + }, + (2, Legacy) => {}, +); + +impl_writeable_for_vec!(&InboundUpdateAdd); +impl_readable_for_vec!(InboundUpdateAdd); + #[cfg_attr(test, derive(Debug))] struct InboundHTLCOutput { htlc_id: u64, @@ -7856,12 +7882,14 @@ where self.context .pending_inbound_htlcs .iter() - .filter_map(|htlc| match htlc.state { - InboundHTLCState::Committed { ref update_add_htlc_opt } => { + .filter_map(|htlc| match &htlc.state { + InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc }, + } => { if htlc_resolution_in_holding_cell(htlc.htlc_id) { return None; } - update_add_htlc_opt.clone() + Some(update_add_htlc.clone()) }, _ => None, }) @@ -8863,7 +8891,8 @@ where false }; if swap { - let mut state = InboundHTLCState::Committed { update_add_htlc_opt: None }; + let mut state = + InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }; mem::swap(&mut state, &mut htlc.state); if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { @@ -8904,9 +8933,8 @@ where to_forward_infos.push((forward_info, htlc.htlc_id)); htlc.state = InboundHTLCState::Committed { // HTLCs will only be in state `InboundHTLCResolution::Resolved` if they were - // received on an old pre-0.0.123 version of LDK. In this case, the HTLC is - // required to be resolved prior to upgrading to 0.1+ per CHANGELOG.md. - update_add_htlc_opt: None, + // received on LDK 0.1-. + update_add_htlc: InboundUpdateAdd::Legacy, }; }, } @@ -8915,7 +8943,9 @@ where log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); pending_update_adds.push(update_add_htlc.clone()); htlc.state = InboundHTLCState::Committed { - update_add_htlc_opt: Some(update_add_htlc), + update_add_htlc: InboundUpdateAdd::WithOnion { + update_add_htlc, + }, }; }, } @@ -14602,7 +14632,7 @@ impl Writeable for FundedChannel { } } let mut removed_htlc_attribution_data: Vec<&Option> = Vec::new(); - let mut inbound_committed_update_adds: Vec<&Option> = Vec::new(); + let mut inbound_committed_update_adds: Vec<&InboundUpdateAdd> = Vec::new(); (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { @@ -14622,9 +14652,9 @@ impl Writeable for FundedChannel { 2u8.write(writer)?; htlc_resolution.write(writer)?; }, - &InboundHTLCState::Committed { ref update_add_htlc_opt } => { + &InboundHTLCState::Committed { ref update_add_htlc } => { 3u8.write(writer)?; - inbound_committed_update_adds.push(update_add_htlc_opt); + inbound_committed_update_adds.push(update_add_htlc); }, &InboundHTLCState::LocalRemoved(ref removal_reason) => { 4u8.write(writer)?; @@ -15093,7 +15123,7 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> }; InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) }, - 3 => InboundHTLCState::Committed { update_add_htlc_opt: None }, + 3 => InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }, 4 => { let reason = match ::read(reader)? { 0 => InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { @@ -15399,7 +15429,7 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> let mut pending_outbound_held_htlc_flags_opt: Option>> = None; let mut holding_cell_held_htlc_flags_opt: Option>> = None; - let mut inbound_committed_update_adds_opt: Option>> = None; + let mut inbound_committed_update_adds_opt: Option> = None; let mut holding_cell_accountable: Option> = None; let mut pending_outbound_accountable: Option> = None; @@ -15583,8 +15613,8 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> if let Some(update_adds) = inbound_committed_update_adds_opt { let mut iter = update_adds.into_iter(); for htlc in pending_inbound_htlcs.iter_mut() { - if let InboundHTLCState::Committed { ref mut update_add_htlc_opt } = htlc.state { - *update_add_htlc_opt = iter.next().ok_or(DecodeError::InvalidValue)?; + if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { + *update_add_htlc = iter.next().ok_or(DecodeError::InvalidValue)?; } } if iter.next().is_some() { @@ -15952,8 +15982,8 @@ mod tests { use crate::ln::chan_utils::{self, commit_tx_fee_sat, ChannelTransactionParameters}; use crate::ln::channel::{ AwaitingChannelReadyFlags, ChannelState, FundedChannel, HTLCCandidate, HTLCInitiator, - HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, InboundV1Channel, - OutboundHTLCOutput, OutboundHTLCState, OutboundV1Channel, + HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, InboundUpdateAdd, + InboundV1Channel, OutboundHTLCOutput, OutboundHTLCState, OutboundV1Channel, }; use crate::ln::channel::{ MAX_FUNDING_SATOSHIS_NO_WUMBO, MIN_THEIR_CHAN_RESERVE_SATOSHIS, @@ -15996,6 +16026,10 @@ mod tests { use bitcoin::{ScriptBuf, WPubkeyHash, WitnessProgram, WitnessVersion}; use std::cmp; + fn dummy_inbound_update_add() -> InboundUpdateAdd { + InboundUpdateAdd::Legacy + } + #[test] #[rustfmt::skip] fn test_channel_state_order() { @@ -16198,7 +16232,7 @@ mod tests { amount_msat: htlc_amount_msat, payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()), cltv_expiry: 300000000, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput { @@ -17047,7 +17081,7 @@ mod tests { amount_msat: 1000000, cltv_expiry: 500, payment_hash: PaymentHash::from(payment_preimage_0), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); let payment_preimage_1 = @@ -17057,7 +17091,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); let payment_preimage_2 = @@ -17099,7 +17133,7 @@ mod tests { amount_msat: 4000000, cltv_expiry: 504, payment_hash: PaymentHash::from(payment_preimage_4), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); // commitment tx with all five HTLCs untrimmed (minimum feerate) @@ -17488,7 +17522,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); chan.context.pending_outbound_htlcs.clear(); @@ -17741,7 +17775,7 @@ mod tests { amount_msat: 5000000, cltv_expiry: 920150, payment_hash: PaymentHash::from(htlc_in_preimage), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, })); chan.context.pending_outbound_htlcs.extend( @@ -17805,7 +17839,7 @@ mod tests { amount_msat, cltv_expiry: 920150, payment_hash: PaymentHash::from(htlc_in_preimage), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -17872,7 +17906,7 @@ mod tests { amount_msat: 100000, cltv_expiry: 920125, payment_hash: htlc_0_in_hash, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); let htlc_1_in_preimage = @@ -17890,7 +17924,7 @@ mod tests { amount_msat: 49900000, cltv_expiry: 920125, payment_hash: htlc_1_in_hash, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); chan.context.pending_outbound_htlcs.extend( @@ -17943,7 +17977,7 @@ mod tests { amount_msat: 30000, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -17985,7 +18019,7 @@ mod tests { amount_msat: 29525, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -18023,7 +18057,7 @@ mod tests { amount_msat: 29525, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -18061,7 +18095,7 @@ mod tests { amount_msat: 29753, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -18114,7 +18148,7 @@ mod tests { amount_msat, cltv_expiry, payment_hash, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }), ); diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index f821aa5afc0..6579c0353a3 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -979,13 +979,15 @@ where } } -// Vectors +/// Write number of items in a vec followed by each element, without writing a length-prefix for +/// each element. +#[macro_export] macro_rules! impl_writeable_for_vec { ($ty: ty $(, $name: ident)*) => { impl<$($name : Writeable),*> Writeable for Vec<$ty> { #[inline] fn write(&self, w: &mut W) -> Result<(), io::Error> { - CollectionLength(self.len() as u64).write(w)?; + $crate::util::ser::CollectionLength(self.len() as u64).write(w)?; for elem in self.iter() { elem.write(w)?; } @@ -994,15 +996,21 @@ macro_rules! impl_writeable_for_vec { } } } +/// Read the number of items in a vec followed by each element, without reading a length prefix for +/// each element. +/// +/// Each element is read with `MaybeReadable`, meaning if an element cannot be read then it is +/// skipped without returning `DecodeError::InvalidValue`. +#[macro_export] macro_rules! impl_readable_for_vec { ($ty: ty $(, $name: ident)*) => { impl<$($name : Readable),*> Readable for Vec<$ty> { #[inline] - fn read(r: &mut R) -> Result { - let len: CollectionLength = Readable::read(r)?; - let mut ret = Vec::with_capacity(cmp::min(len.0 as usize, MAX_BUF_SIZE / core::mem::size_of::<$ty>())); + fn read(r: &mut R) -> Result { + let len: $crate::util::ser::CollectionLength = Readable::read(r)?; + let mut ret = Vec::with_capacity(cmp::min(len.0 as usize, $crate::util::ser::MAX_BUF_SIZE / core::mem::size_of::<$ty>())); for _ in 0..len.0 { - if let Some(val) = MaybeReadable::read(r)? { + if let Some(val) = $crate::util::ser::MaybeReadable::read(r)? { ret.push(val); } } From 3cc64f042fd13e16f37c5e2f665f40f2680bb430 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 5 Jan 2026 16:28:00 -0500 Subject: [PATCH 06/23] Prune inbound HTLC onions once forwarded We store inbound committed HTLCs' onions in Channels for use in reconstructing the pending HTLC set on ChannelManager read. If an HTLC has been forwarded to the outbound edge, we no longer need to persist the inbound edge's onion and can prune it here. --- lightning/src/ln/channel.rs | 48 ++++++++++++++++++++++++++++-- lightning/src/ln/channelmanager.rs | 44 +++++++++++++++++++++++++++ lightning/src/ln/reload_tests.rs | 20 +++++++++++++ 3 files changed, 110 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 8a4ef19fe37..8a2bc30af91 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -318,6 +318,18 @@ impl InboundHTLCState { enum InboundUpdateAdd { /// The inbound committed HTLC's update_add_htlc message. WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, + /// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing + /// its onion to be pruned and no longer persisted. + Forwarded { + /// Useful if we need to fail or claim this HTLC backwards after restart, if it's missing in the + /// outbound edge. + hop_data: HTLCPreviousHopData, + /// Useful if we need to claim this HTLC backwards after a restart and it's missing in the + /// outbound edge, to generate an accurate [`Event::PaymentForwarded`]. + /// + /// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded + outbound_amt_msat: u64, + }, /// This HTLC was received pre-LDK 0.3, before we started persisting the onion for inbound /// committed HTLCs. Legacy, @@ -328,6 +340,10 @@ impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, (0, update_add_htlc, required), }, (2, Legacy) => {}, + (4, Forwarded) => { + (0, hop_data, required), + (2, outbound_amt_msat, required), + }, ); impl_writeable_for_vec!(&InboundUpdateAdd); @@ -1177,6 +1193,10 @@ pub(super) struct MonitorRestoreUpdates { pub channel_ready_order: ChannelReadyOrder, pub announcement_sigs: Option, pub tx_signatures: Option, + /// The sources of outbound HTLCs that were forwarded and irrevocably committed on this channel + /// (the outbound edge), along with their outbound amounts. Useful to store in the inbound HTLC + /// to ensure it gets resolved. + pub committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, } /// The return value of `signer_maybe_unblocked` @@ -7931,6 +7951,22 @@ where .count() } + /// This inbound HTLC was irrevocably forwarded to the outbound edge, so we no longer need to + /// persist its onion. + pub(super) fn prune_inbound_htlc_onion( + &mut self, htlc_id: u64, hop_data: HTLCPreviousHopData, outbound_amt_msat: u64, + ) { + for htlc in self.context.pending_inbound_htlcs.iter_mut() { + if htlc.htlc_id == htlc_id { + if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { + *update_add_htlc = InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat }; + return; + } + } + } + debug_assert!(false, "If we go to prune an inbound HTLC it should be present") + } + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] fn mark_outbound_htlc_removed( @@ -9532,6 +9568,14 @@ where mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills); let mut pending_update_adds = Vec::new(); mem::swap(&mut pending_update_adds, &mut self.context.monitor_pending_update_adds); + let committed_outbound_htlc_sources = self.context.pending_outbound_htlcs.iter().filter_map(|htlc| { + if let &OutboundHTLCState::LocalAnnounced(_) = &htlc.state { + if let HTLCSource::PreviousHopData(prev_hop_data) = &htlc.source { + return Some((prev_hop_data.clone(), htlc.amount_msat)) + } + } + None + }).collect(); if self.context.channel_state.is_peer_disconnected() { self.context.monitor_pending_revoke_and_ack = false; @@ -9540,7 +9584,7 @@ where raa: None, commitment_update: None, commitment_order: RAACommitmentOrder::RevokeAndACKFirst, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, - channel_ready_order, + channel_ready_order, committed_outbound_htlc_sources }; } @@ -9571,7 +9615,7 @@ where MonitorRestoreUpdates { raa, commitment_update, commitment_order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures, - channel_ready_order, + channel_ready_order, committed_outbound_htlc_sources } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 2665bf11740..e50a9b8a2c3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1408,6 +1408,7 @@ enum PostMonitorUpdateChanResume { decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, }, } @@ -9586,6 +9587,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, ) { // If the channel belongs to a batch funding transaction, the progress of the batch // should be updated as we have received funding_signed and persisted the monitor. @@ -9656,6 +9658,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); } + self.prune_persisted_inbound_htlc_onions(committed_outbound_htlc_sources); } fn handle_monitor_update_completion_actions< @@ -10130,6 +10133,33 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ decode_update_add_htlcs, finalized_claimed_htlcs: updates.finalized_claimed_htlcs, failed_htlcs: updates.failed_htlcs, + committed_outbound_htlc_sources: updates.committed_outbound_htlc_sources, + } + } + } + + /// We store inbound committed HTLCs' onions in `Channel`s for use in reconstructing the pending + /// HTLC set on `ChannelManager` read. If an HTLC has been irrevocably forwarded to the outbound + /// edge, we no longer need to persist the inbound edge's onion and can prune it here. + fn prune_persisted_inbound_htlc_onions( + &self, committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, + ) { + let per_peer_state = self.per_peer_state.read().unwrap(); + for (source, outbound_amt_msat) in committed_outbound_htlc_sources { + let counterparty_node_id = match source.counterparty_node_id.as_ref() { + Some(id) => id, + None => continue, + }; + let mut peer_state = + match per_peer_state.get(counterparty_node_id).map(|state| state.lock().unwrap()) { + Some(peer_state) => peer_state, + None => continue, + }; + + if let Some(chan) = + peer_state.channel_by_id.get_mut(&source.channel_id).and_then(|c| c.as_funded_mut()) + { + chan.prune_inbound_htlc_onion(source.htlc_id, source, outbound_amt_msat); } } } @@ -10144,6 +10174,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ chan.test_holding_cell_outbound_htlc_forwards_count() } + #[cfg(test)] + /// Useful to check that we prune inbound HTLC onions once they are irrevocably forwarded to the + /// outbound edge, see [`Self::prune_persisted_inbound_htlc_onions`]. + pub(crate) fn test_get_inbound_committed_htlcs_with_onion( + &self, cp_id: PublicKey, chan_id: ChannelId, + ) -> usize { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); + let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); + chan.inbound_committed_unresolved_htlcs().len() + } + /// Completes channel resumption after locks have been released. /// /// Processes the [`PostMonitorUpdateChanResume`] returned by @@ -10169,6 +10211,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ decode_update_add_htlcs, finalized_claimed_htlcs, failed_htlcs, + committed_outbound_htlc_sources, } => { self.post_monitor_update_unlock( channel_id, @@ -10179,6 +10222,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ decode_update_add_htlcs, finalized_claimed_htlcs, failed_htlcs, + committed_outbound_htlc_sources, ); }, } diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 826fdbf9b0c..360ffe2dc1f 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1211,6 +1211,13 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + // While an inbound HTLC is committed in a channel but not yet forwarded, we store its onion in + // the `Channel` in case we need to remember it on restart. Once it's irrevocably forwarded to the + // outbound edge, we can prune it on the inbound edge. + assert_eq!( + nodes[1].node.test_get_inbound_committed_htlcs_with_onion(nodes[0].node.get_our_node_id(), chan_id_1), + 1 + ); // Decode the HTLC onion but don't forward it to the next hop, such that the HTLC ends up in // `ChannelManager::forward_htlcs` or `ChannelManager::pending_intercepted_htlcs`. @@ -1232,6 +1239,13 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { args_b_c.send_announcement_sigs = (true, true); reconnect_nodes(args_b_c); + // Before an inbound HTLC is irrevocably forwarded, its onion should still be persisted within the + // inbound edge channel. + assert_eq!( + nodes[1].node.test_get_inbound_committed_htlcs_with_onion(nodes[0].node.get_our_node_id(), chan_id_1), + 1 + ); + // Forward the HTLC and ensure we can claim it post-reload. nodes[1].node.process_pending_htlc_forwards(); @@ -1254,6 +1268,12 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates.commitment_signed, false, false); expect_and_process_pending_htlcs(&nodes[2], false); + // After an inbound HTLC is irrevocably forwarded, its onion should be pruned within the inbound + // edge channel. + assert_eq!( + nodes[1].node.test_get_inbound_committed_htlcs_with_onion(nodes[0].node.get_our_node_id(), chan_id_1), + 0 + ); expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; From d435e5bb46bf281ab5ec0d1d3be8ba809a743588 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 2 Feb 2026 17:16:04 -0500 Subject: [PATCH 07/23] Deterministic reconstruct_manager option in tests We recently merged (test-only, for now) support for the ChannelManager reconstructing its set of pending HTLCs from Channel{Monitor} data, rather than using its own persisted maps. But because we want test coverage of both the new reconstruction codepaths as well as the old persisted map codepaths, in tests we would decide between those two sets of codepaths randomly. We now want to add some tests that require using the new codepaths, so here we add an option to explicitly set whether to reconstruct or not rather than choosing randomly. --- lightning/src/ln/channelmanager.rs | 55 ++++++++++++++--------- lightning/src/ln/functional_test_utils.rs | 8 +++- lightning/src/ln/reload_tests.rs | 2 + 3 files changed, 43 insertions(+), 22 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e50a9b8a2c3..569bb371459 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17819,6 +17819,15 @@ pub struct ChannelManagerReadArgs< /// /// This is not exported to bindings users because we have no HashMap bindings pub channel_monitors: HashMap>, + + /// Whether the `ChannelManager` should attempt to reconstruct its set of pending HTLCs from + /// `Channel{Monitor}` data rather than its own persisted maps, which is planned to become + /// the default behavior in upcoming versions. + /// + /// If `None`, whether we reconstruct or use the legacy maps will be decided randomly during + /// `ChannelManager::from_channel_manager_data`. + #[cfg(test)] + pub reconstruct_manager_from_monitors: Option, } impl< @@ -17856,6 +17865,8 @@ impl< channel_monitors: hash_map_from_iter( channel_monitors.drain(..).map(|monitor| (monitor.channel_id(), monitor)), ), + #[cfg(test)] + reconstruct_manager_from_monitors: None, } } } @@ -18553,26 +18564,30 @@ impl< #[cfg(not(test))] let reconstruct_manager_from_monitors = false; #[cfg(test)] - let reconstruct_manager_from_monitors = { - use core::hash::{BuildHasher, Hasher}; - - match std::env::var("LDK_TEST_REBUILD_MGR_FROM_MONITORS") { - Ok(val) => match val.as_str() { - "1" => true, - "0" => false, - _ => panic!("LDK_TEST_REBUILD_MGR_FROM_MONITORS must be 0 or 1, got: {}", val), - }, - Err(_) => { - let rand_val = - std::collections::hash_map::RandomState::new().build_hasher().finish(); - if rand_val % 2 == 0 { - true - } else { - false - } - }, - } - }; + let reconstruct_manager_from_monitors = + args.reconstruct_manager_from_monitors.unwrap_or_else(|| { + use core::hash::{BuildHasher, Hasher}; + + match std::env::var("LDK_TEST_REBUILD_MGR_FROM_MONITORS") { + Ok(val) => match val.as_str() { + "1" => true, + "0" => false, + _ => panic!( + "LDK_TEST_REBUILD_MGR_FROM_MONITORS must be 0 or 1, got: {}", + val + ), + }, + Err(_) => { + let rand_val = + std::collections::hash_map::RandomState::new().build_hasher().finish(); + if rand_val % 2 == 0 { + true + } else { + false + } + }, + } + }); // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we // should ensure we try them again on the inbound edge. We put them here and do so after we diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 6800078fd6f..25b54087e93 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -911,6 +911,8 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { tx_broadcaster: &broadcaster, logger: &self.logger, channel_monitors, + #[cfg(test)] + reconstruct_manager_from_monitors: None, }, ) .unwrap(); @@ -1309,7 +1311,7 @@ fn check_claimed_htlcs_match_route<'a, 'b, 'c>( pub fn _reload_node<'a, 'b, 'c>( node: &'a Node<'a, 'b, 'c>, config: UserConfig, chanman_encoded: &[u8], - monitors_encoded: &[&[u8]], + monitors_encoded: &[&[u8]], _reconstruct_manager_from_monitors: Option, ) -> TestChannelManager<'b, 'c> { let mut monitors_read = Vec::with_capacity(monitors_encoded.len()); for encoded in monitors_encoded { @@ -1343,6 +1345,8 @@ pub fn _reload_node<'a, 'b, 'c>( tx_broadcaster: node.tx_broadcaster, logger: node.logger, channel_monitors, + #[cfg(test)] + reconstruct_manager_from_monitors: _reconstruct_manager_from_monitors, }, ) .unwrap() @@ -1378,7 +1382,7 @@ macro_rules! reload_node { $node.chain_monitor = &$new_chain_monitor; $new_channelmanager = - _reload_node(&$node, $new_config, &chanman_encoded, $monitors_encoded); + _reload_node(&$node, $new_config, &chanman_encoded, $monitors_encoded, None); $node.node = &$new_channelmanager; $node.onion_messenger.set_offers_handler(&$new_channelmanager); $node.onion_messenger.set_async_payments_handler(&$new_channelmanager); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 360ffe2dc1f..cac187174cf 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -438,6 +438,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), + reconstruct_manager_from_monitors: None, }) { } else { panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return"); }; @@ -456,6 +457,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), + reconstruct_manager_from_monitors: None, }).unwrap(); nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); From 30dbf40e6fa26c44a1660dd42f5c9148171d856e Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 3 Feb 2026 12:37:45 -0500 Subject: [PATCH 08/23] Trivially refactor reload_node macro Cleans it up a bit in preparation for adding a new variant in the next commit. --- lightning/src/ln/functional_test_utils.rs | 38 +++++++++++++++++++---- 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 25b54087e93..07f11ed72e8 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1366,8 +1366,10 @@ pub fn _reload_node<'a, 'b, 'c>( } #[macro_export] -macro_rules! reload_node { - ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { +macro_rules! _reload_node_inner { + ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: + ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr + ) => { let chanman_encoded = $chanman_encoded; $persister = $crate::util::test_utils::TestPersister::new(); @@ -1381,22 +1383,46 @@ macro_rules! reload_node { ); $node.chain_monitor = &$new_chain_monitor; - $new_channelmanager = - _reload_node(&$node, $new_config, &chanman_encoded, $monitors_encoded, None); + $new_channelmanager = _reload_node( + &$node, + $new_config, + &chanman_encoded, + $monitors_encoded, + $reconstruct_pending_htlcs, + ); $node.node = &$new_channelmanager; $node.onion_messenger.set_offers_handler(&$new_channelmanager); $node.onion_messenger.set_async_payments_handler(&$new_channelmanager); }; +} + +#[macro_export] +macro_rules! reload_node { + // Reload the node using the node's current config ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { let config = $node.node.get_current_config(); - reload_node!( + _reload_node_inner!( $node, config, $chanman_encoded, $monitors_encoded, $persister, $new_chain_monitor, - $new_channelmanager + $new_channelmanager, + None + ); + }; + // Reload the node with the new provided config + ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { + _reload_node_inner!( + $node, + $new_config, + $chanman_encoded, + $monitors_encoded, + $persister, + $new_chain_monitor, + $new_channelmanager, + None ); }; } From 979d5648764da2a71b1f67c10ec329efa80ec5e7 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 23 Jan 2026 15:50:10 -0500 Subject: [PATCH 09/23] Check pruned HTLCs were resolved on startup In a recent commit, we added support for pruning an inbound HTLC's persisted onion once the HTLC has been irrevocably forwarded to the outbound edge. Here, we add a check on startup that those inbound HTLCs were actually handled. Specifically, we check that the inbound HTLC is either (a) currently present in the outbound edge or (b) was removed via claim. If neither of those are true, we infer that the HTLC was removed from the outbound edge via fail and fail the inbound HTLC backwards. --- lightning/src/ln/channel.rs | 34 ++-- lightning/src/ln/channelmanager.rs | 150 +++++++++++++-- lightning/src/ln/functional_test_utils.rs | 17 ++ lightning/src/ln/reload_tests.rs | 222 ++++++++++++++++++++++ 4 files changed, 396 insertions(+), 27 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 8a2bc30af91..e783f483063 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -314,8 +314,8 @@ impl InboundHTLCState { /// `ChannelManager` persist. /// /// Useful for reconstructing the pending HTLC set on startup. -#[derive(Debug)] -enum InboundUpdateAdd { +#[derive(Debug, Clone)] +pub(super) enum InboundUpdateAdd { /// The inbound committed HTLC's update_add_htlc message. WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, /// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing @@ -7885,7 +7885,9 @@ where } /// Useful for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. - pub(super) fn inbound_committed_unresolved_htlcs(&self) -> Vec { + pub(super) fn inbound_committed_unresolved_htlcs( + &self, + ) -> Vec<(PaymentHash, InboundUpdateAdd)> { // We don't want to return an HTLC as needing processing if it already has a resolution that's // pending in the holding cell. let htlc_resolution_in_holding_cell = |id: u64| -> bool { @@ -7903,13 +7905,11 @@ where .pending_inbound_htlcs .iter() .filter_map(|htlc| match &htlc.state { - InboundHTLCState::Committed { - update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc }, - } => { + InboundHTLCState::Committed { update_add_htlc } => { if htlc_resolution_in_holding_cell(htlc.htlc_id) { return None; } - Some(update_add_htlc.clone()) + Some((htlc.payment_hash, update_add_htlc.clone())) }, _ => None, }) @@ -7919,18 +7919,24 @@ where /// Useful when reconstructing the set of pending HTLC forwards when deserializing the /// `ChannelManager`. We don't want to cache an HTLC as needing to be forwarded if it's already /// present in the outbound edge, or else we'll double-forward. - pub(super) fn outbound_htlc_forwards(&self) -> impl Iterator + '_ { + pub(super) fn outbound_htlc_forwards( + &self, + ) -> impl Iterator + '_ { let holding_cell_outbounds = self.context.holding_cell_htlc_updates.iter().filter_map(|htlc| match htlc { - HTLCUpdateAwaitingACK::AddHTLC { source, .. } => match source { - HTLCSource::PreviousHopData(prev_hop_data) => Some(prev_hop_data.clone()), + HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => match source { + HTLCSource::PreviousHopData(prev_hop_data) => { + Some((*payment_hash, prev_hop_data.clone())) + }, _ => None, }, _ => None, }); let committed_outbounds = self.context.pending_outbound_htlcs.iter().filter_map(|htlc| match &htlc.source { - HTLCSource::PreviousHopData(prev_hop_data) => Some(prev_hop_data.clone()), + HTLCSource::PreviousHopData(prev_hop_data) => { + Some((htlc.payment_hash, prev_hop_data.clone())) + }, _ => None, }); holding_cell_outbounds.chain(committed_outbounds) @@ -7967,6 +7973,12 @@ where debug_assert!(false, "If we go to prune an inbound HTLC it should be present") } + /// Useful for testing crash scenarios where the holding cell is not persisted. + #[cfg(test)] + pub(super) fn test_clear_holding_cell(&mut self) { + self.context.holding_cell_htlc_updates.clear() + } + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] fn mark_outbound_htlc_removed( diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 569bb371459..f42d2947153 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -59,9 +59,9 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; use crate::ln::channel::QuiescentAction; use crate::ln::channel::{ self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult, - FundedChannel, FundingTxSigned, InboundV1Channel, OutboundV1Channel, PendingV2Channel, - ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch, - WithChannelContext, + FundedChannel, FundingTxSigned, InboundUpdateAdd, InboundV1Channel, OutboundV1Channel, + PendingV2Channel, ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, + UpdateFulfillCommitFetch, WithChannelContext, }; use crate::ln::channel_state::ChannelDetails; use crate::ln::funding::SpliceContribution; @@ -10183,7 +10183,20 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); - chan.inbound_committed_unresolved_htlcs().len() + chan.inbound_committed_unresolved_htlcs() + .iter() + .filter(|(_, htlc)| matches!(htlc, InboundUpdateAdd::WithOnion { .. })) + .count() + } + + #[cfg(test)] + /// Useful for testing crash scenarios where the holding cell of a channel is not persisted. + pub(crate) fn test_clear_channel_holding_cell(&self, cp_id: PublicKey, chan_id: ChannelId) { + let per_peer_state = self.per_peer_state.read().unwrap(); + let mut peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); + let chan = + peer_state.channel_by_id.get_mut(&chan_id).and_then(|c| c.as_funded_mut()).unwrap(); + chan.test_clear_holding_cell(); } /// Completes channel resumption after locks have been released. @@ -18293,7 +18306,7 @@ impl< } // Post-deserialization processing - let mut decode_update_add_htlcs = new_hash_map(); + let mut decode_update_add_htlcs: HashMap> = new_hash_map(); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); } @@ -18594,6 +18607,30 @@ impl< // have a fully-constructed `ChannelManager` at the end. let mut pending_claims_to_replay = Vec::new(); + // If we find an inbound HTLC that claims to already be forwarded to the outbound edge, we + // store an identifier for it here and verify that it is either (a) present in the outbound + // edge or (b) removed from the outbound edge via claim. If it's in neither of these states, we + // infer that it was removed from the outbound edge via fail, and fail it backwards to ensure + // that it is handled. + let mut already_forwarded_htlcs: HashMap< + (ChannelId, PaymentHash), + Vec<(HTLCPreviousHopData, u64)>, + > = new_hash_map(); + let prune_forwarded_htlc = |already_forwarded_htlcs: &mut HashMap< + (ChannelId, PaymentHash), + Vec<(HTLCPreviousHopData, u64)>, + >, + prev_hop: &HTLCPreviousHopData, + payment_hash: &PaymentHash| { + if let hash_map::Entry::Occupied(mut entry) = + already_forwarded_htlcs.entry((prev_hop.channel_id, *payment_hash)) + { + entry.get_mut().retain(|(htlc, _)| prev_hop.htlc_id != htlc.htlc_id); + if entry.get().is_empty() { + entry.remove(); + } + } + }; { // If we're tracking pending payments, ensure we haven't lost any by looking at the // ChannelMonitor data for any channels for which we do not have authorative state @@ -18616,16 +18653,33 @@ impl< if reconstruct_manager_from_monitors { if let Some(chan) = peer_state.channel_by_id.get(channel_id) { if let Some(funded_chan) = chan.as_funded() { + let scid_alias = funded_chan.context.outbound_scid_alias(); let inbound_committed_update_adds = funded_chan.inbound_committed_unresolved_htlcs(); - if !inbound_committed_update_adds.is_empty() { - // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized - // `Channel`, as part of removing the requirement to regularly persist the - // `ChannelManager`. - decode_update_add_htlcs.insert( - funded_chan.context.outbound_scid_alias(), - inbound_committed_update_adds, - ); + for (payment_hash, htlc) in inbound_committed_update_adds { + match htlc { + InboundUpdateAdd::WithOnion { update_add_htlc } => { + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel` as part of removing the requirement to regularly persist the + // `ChannelManager`. + decode_update_add_htlcs + .entry(scid_alias) + .or_insert_with(Vec::new) + .push(update_add_htlc); + }, + InboundUpdateAdd::Forwarded { + hop_data, + outbound_amt_msat, + } => { + already_forwarded_htlcs + .entry((hop_data.channel_id, payment_hash)) + .or_insert_with(Vec::new) + .push((hop_data, outbound_amt_msat)); + }, + InboundUpdateAdd::Legacy => { + return Err(DecodeError::InvalidValue) + }, + } } } } @@ -18672,13 +18726,19 @@ impl< if reconstruct_manager_from_monitors && !is_channel_closed { if let Some(chan) = peer_state.channel_by_id.get(channel_id) { if let Some(funded_chan) = chan.as_funded() { - for prev_hop in funded_chan.outbound_htlc_forwards() { + for (payment_hash, prev_hop) in funded_chan.outbound_htlc_forwards() + { dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs, &prev_hop, "HTLC already forwarded to the outbound edge", &args.logger, ); + prune_forwarded_htlc( + &mut already_forwarded_htlcs, + &prev_hop, + &payment_hash, + ); } } } @@ -18713,6 +18773,11 @@ impl< "HTLC already forwarded to the outbound edge", &&logger, ); + prune_forwarded_htlc( + &mut already_forwarded_htlcs, + &prev_hop_data, + &htlc.payment_hash, + ); } // The ChannelMonitor is now responsible for this HTLC's @@ -19160,7 +19225,7 @@ impl< if reconstruct_manager_from_monitors { // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. - for (src, _, _, _, _, _) in failed_htlcs.iter() { + for (src, payment_hash, _, _, _, _) in failed_htlcs.iter() { if let HTLCSource::PreviousHopData(prev_hop_data) = src { dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs, @@ -19168,6 +19233,7 @@ impl< "HTLC was failed backwards during manager read", &args.logger, ); + prune_forwarded_htlc(&mut already_forwarded_htlcs, prev_hop_data, payment_hash); } } @@ -19313,9 +19379,46 @@ impl< }; let mut processed_claims: HashSet> = new_hash_set(); - for (_, monitor) in args.channel_monitors.iter() { + for (channel_id, monitor) in args.channel_monitors.iter() { for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() { + // If we have unresolved inbound committed HTLCs that were already forwarded to the + // outbound edge and removed via claim, we need to make sure to claim them backwards via + // adding them to `pending_claims_to_replay`. + if let Some(forwarded_htlcs) = + already_forwarded_htlcs.remove(&(*channel_id, payment_hash)) + { + for (hop_data, outbound_amt_msat) in forwarded_htlcs { + let new_pending_claim = + !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _)| { + matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == hop_data.htlc_id && hop.channel_id == hop_data.channel_id) + }); + if new_pending_claim { + let counterparty_node_id = monitor.get_counterparty_node_id(); + let is_channel_closed = channel_manager + .per_peer_state + .read() + .unwrap() + .get(&counterparty_node_id) + .map_or(true, |peer_state_mtx| { + !peer_state_mtx + .lock() + .unwrap() + .channel_by_id + .contains_key(channel_id) + }); + pending_claims_to_replay.push(( + HTLCSource::PreviousHopData(hop_data), + payment_preimage, + outbound_amt_msat, + is_channel_closed, + counterparty_node_id, + monitor.get_funding_txo(), + *channel_id, + )); + } + } + } if !payment_claims.is_empty() { for payment_claim in payment_claims { if processed_claims.contains(&payment_claim.mpp_parts) { @@ -19557,6 +19660,21 @@ impl< channel_manager .fail_htlc_backwards_internal(&source, &hash, &reason, receiver, ev_action); } + for ((_, hash), htlcs) in already_forwarded_htlcs.into_iter() { + for (htlc, _) in htlcs { + let channel_id = htlc.channel_id; + let node_id = htlc.counterparty_node_id; + let source = HTLCSource::PreviousHopData(htlc); + let failure_reason = LocalHTLCFailureReason::TemporaryChannelFailure; + let failure_data = channel_manager.get_htlc_inbound_temp_fail_data(failure_reason); + let reason = HTLCFailReason::reason(failure_reason, failure_data); + let receiver = HTLCHandlingFailureType::Forward { node_id, channel_id }; + // The event completion action is only relevant for HTLCs that originate from our node, not + // forwarded HTLCs. + channel_manager + .fail_htlc_backwards_internal(&source, &hash, &reason, receiver, None); + } + } for ( source, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 07f11ed72e8..07f7c0bd8f3 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1425,6 +1425,23 @@ macro_rules! reload_node { None ); }; + // Reload the node and have the `ChannelManager` use new codepaths that reconstruct its set of + // pending HTLCs from `Channel{Monitor}` data. + ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: + ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr + ) => { + let config = $node.node.get_current_config(); + _reload_node_inner!( + $node, + config, + $chanman_encoded, + $monitors_encoded, + $persister, + $new_chain_monitor, + $new_channelmanager, + $reconstruct_pending_htlcs + ); + }; } pub fn create_funding_transaction<'a, 'b, 'c>( diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index cac187174cf..fa0c77bc9d4 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1861,3 +1861,225 @@ fn outbound_removed_holding_cell_resolved_no_double_forward() { // nodes[0] should now have received the fulfill and generate PaymentSent. expect_payment_sent(&nodes[0], payment_preimage, None, true, true); } + +#[test] +fn test_reload_node_with_preimage_in_monitor_claims_htlc() { + // Test that if a forwarding node has an HTLC that was irrevocably removed on the outbound edge + // via claim but is still forwarded-and-unresolved in the inbound edge, that HTLC will not be + // failed back on the inbound edge on reload. + // + // For context, the ChannelManager is moving towards reconstructing the pending inbound HTLC set + // from Channel data on startup. If we find an inbound HTLC that is flagged as already-forwarded, + // we then check that the HTLC is either (a) still present in the outbound edge or (b) removed + // from the outbound edge but with a preimage present in the corresponding ChannelMonitor, + // indicating that it was removed from the outbound edge via claim. If neither of those are the + // case, we infer that the HTLC was removed from the outbound edge via failure and fail the HTLC + // backwards. + // + // Here we ensure that inbound HTLCs in case (b) above will not be failed backwards on manager + // reload. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2 = chan_1_2.2; + + // Send a payment from nodes[0] to nodes[2] via nodes[1]. + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + send_along_route_with_secret( + &nodes[0], route, &[&[&nodes[1], &nodes[2]]], 1_000_000, payment_hash, payment_secret, + ); + + // Claim the payment on nodes[2]. + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); + + // Disconnect nodes[0] from nodes[1] BEFORE processing the fulfill. + // This prevents the claim from propagating back, leaving the inbound HTLC in ::Forwarded state. + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + // Process the fulfill from nodes[2] to nodes[1]. + // This stores the preimage in nodes[1]'s monitor for chan_1_2. + let updates_2_1 = get_htlc_update_msgs(&nodes[2], &node_1_id); + nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates_2_1.update_fulfill_htlcs[0].clone()); + check_added_monitors(&nodes[1], 1); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, false, false); + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); + + // Clear the holding cell's claim entry on chan_0_1 before serialization. + // This simulates a crash where the HTLC was fully removed from the outbound edge but is still + // present on the inbound edge without a resolution. + nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1); + + // At this point: + // - The inbound HTLC on nodes[1] (from nodes[0]) is in ::Forwarded state + // - The preimage IS in nodes[1]'s monitor for chan_1_2 + // - The outbound HTLC to nodes[2] is resolved + // + // Serialize nodes[1] state and monitors before reloading. + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_serialized = get_monitor!(nodes[1], chan_id_1_2).encode(); + + // Reload nodes[1]. + // During deserialization, we track inbound HTLCs that purport to already be forwarded on the + // outbound edge. If any are entirely missing from the outbound edge with no preimage available, + // they will be failed backwards. Otherwise, as in this case where a preimage is available, the + // payment should be claimed backwards. + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized, + Some(true) + ); + + // When the claim is reconstructed during reload, a PaymentForwarded event is generated. + // This event has next_user_channel_id as None since the outbound HTLC was already removed. + // Fetching events triggers the pending monitor update (adding preimage) to be applied. + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::PaymentForwarded { total_fee_earned_msat: Some(1000), .. } => {}, + _ => panic!("Expected PaymentForwarded event"), + } + check_added_monitors(&nodes[1], 1); + + // Reconnect nodes[1] to nodes[0]. The claim should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_claims = (0, 1); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received the fulfill and generate PaymentSent. + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + +#[test] +fn test_reload_node_without_preimage_fails_htlc() { + // Test that if a forwarding node has an HTLC that was removed on the outbound edge via failure + // but is still forwarded-and-unresolved in the inbound edge, that HTLC will be correctly + // failed back on reload via the already_forwarded_htlcs mechanism. + // + // For context, the ChannelManager reconstructs the pending inbound HTLC set from Channel data + // on startup. If an inbound HTLC is present but flagged as already-forwarded, we check that + // the HTLC is either (a) still present in the outbound edge or (b) removed from the outbound + // edge but with a preimage present in the corresponding ChannelMonitor, indicating it was + // removed via claim. If neither, we infer the HTLC was removed via failure and fail it back. + // + // Here we test the failure case: no preimage is present, so the HTLC should be failed back. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2 = chan_1_2.2; + + // Send a payment from nodes[0] to nodes[2] via nodes[1]. + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + send_along_route_with_secret( + &nodes[0], route, &[&[&nodes[1], &nodes[2]]], 1_000_000, payment_hash, payment_secret, + ); + + // Disconnect nodes[0] from nodes[1] BEFORE processing the failure. + // This prevents the fail from propagating back, leaving the inbound HTLC in ::Forwarded state. + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + // Fail the payment on nodes[2] and process the failure to nodes[1]. + // This removes the outbound HTLC and queues a fail in the holding cell. + nodes[2].node.fail_htlc_backwards(&payment_hash); + expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash }] + ); + check_added_monitors(&nodes[2], 1); + + let updates_2_1 = get_htlc_update_msgs(&nodes[2], &node_1_id); + nodes[1].node.handle_update_fail_htlc(node_2_id, &updates_2_1.update_fail_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, false, false); + expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_2_id), channel_id: chan_id_1_2 }] + ); + + // Clear the holding cell's fail entry on chan_0_1 before serialization. + // This simulates a crash where the HTLC was fully removed from the outbound edge but is still + // present on the inbound edge without a resolution. Otherwise, we would not be able to exercise + // the desired failure paths due to the holding cell failure resolution being present. + nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1); + + // Now serialize. The state has: + // - Inbound HTLC on chan_0_1 in ::Forwarded state + // - Outbound HTLC on chan_1_2 resolved (not present) + // - No preimage in monitors (it was a failure) + // - No holding cell entry for the fail (we cleared it) + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_serialized = get_monitor!(nodes[1], chan_id_1_2).encode(); + + // Reload nodes[1]. + // The already_forwarded_htlcs mechanism should detect: + // - Inbound HTLC is in ::Forwarded state + // - Outbound HTLC is not present in outbound channel + // - No preimage in monitors + // Therefore it should fail the HTLC backwards. + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized, + Some(true) + ); + + // After reload, nodes[1] should have generated an HTLCHandlingFailed event. + let events = nodes[1].node.get_and_clear_pending_events(); + assert!(!events.is_empty(), "Expected HTLCHandlingFailed event"); + for event in events { + match event { + Event::HTLCHandlingFailed { .. } => {}, + _ => panic!("Unexpected event {:?}", event), + } + } + + // Process the failure so it goes back into chan_0_1's holding cell. + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 0); // No monitor update yet (peer disconnected) + + // Reconnect nodes[1] to nodes[0]. The fail should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_fails = (0, 1); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received the failure and generate PaymentFailed. + expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); +} From 0ca79af5c3871e80f67d418e04468f7028881b99 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 16 Jan 2026 16:53:55 -0500 Subject: [PATCH 10/23] Support deleting legacy forward map persistence in 0.5 In 0.3+, we are taking steps to remove the requirement of regularly persisting the ChannelManager and instead rebuild the set of HTLC forwards (and the manager generally) from Channel{Monitor} data. We previously merged support for reconstructing the ChannelManager::decode_update_add_htlcs map from channel data, using a new HTLC onion field that will be present for inbound HTLCs received on 0.3+ only. The plan is that in upcoming LDK versions, the manager will reconstruct this map and the other forward/claimable/pending HTLC maps will automatically repopulate themselves on the next call to process_pending_htlc_forwards. As such, once we're in a future version that reconstructs the pending HTLC set, we can stop persisting the legacy ChannelManager maps such as forward_htlcs, pending_intercepted_htlcs since they will never be used. For 0.3 to be compatible with this future version, in this commit we detect that the manager was last written on a version of LDK that doesn't persist the legacy maps. In that case, we don't try to read the old forwards map and run the new reconstruction logic only. --- lightning/src/ln/channelmanager.rs | 58 ++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 19 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f42d2947153..808e776fed6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -16631,6 +16631,17 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { const SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1; +// We plan to start writing this version in 0.5. +// +// LDK 0.5+ will reconstruct the set of pending HTLCs from `Channel{Monitor}` data that started +// being written in 0.3, ignoring legacy `ChannelManager` HTLC maps on read and not writing them. +// LDK 0.5+ will automatically fail to read if the pending HTLC set cannot be reconstructed, i.e. +// if we were last written with pending HTLCs on 0.2- or if the new 0.3+ fields are missing. +// +// If 0.3 or 0.4 reads this manager version, it knows that the legacy maps were not written and +// acts accordingly. +const RECONSTRUCT_HTLCS_FROM_CHANS_VERSION: u8 = 5; + impl_writeable_tlv_based!(PhantomRouteHints, { (2, channels, required_vec), (4, phantom_scid, required), @@ -17382,6 +17393,8 @@ pub(super) struct ChannelManagerData { forward_htlcs_legacy: HashMap>, pending_intercepted_htlcs_legacy: HashMap, decode_update_add_htlcs_legacy: HashMap>, + // The `ChannelManager` version that was written. + version: u8, } /// Arguments for deserializing [`ChannelManagerData`]. @@ -17405,7 +17418,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> fn read( reader: &mut R, args: ChannelManagerDataReadArgs<'a, ES, NS, SP, L>, ) -> Result { - let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + let version = read_ver_prefix!(reader, SERIALIZATION_VERSION); let chain_hash: ChainHash = Readable::read(reader)?; let best_block_height: u32 = Readable::read(reader)?; @@ -17427,21 +17440,26 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> channels.push(channel); } - let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut forward_htlcs_legacy: HashMap> = - hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); - for _ in 0..forward_htlcs_count { - let short_channel_id = Readable::read(reader)?; - let pending_forwards_count: u64 = Readable::read(reader)?; - let mut pending_forwards = Vec::with_capacity(cmp::min( - pending_forwards_count as usize, - MAX_ALLOC_SIZE / mem::size_of::(), - )); - for _ in 0..pending_forwards_count { - pending_forwards.push(Readable::read(reader)?); - } - forward_htlcs_legacy.insert(short_channel_id, pending_forwards); - } + let forward_htlcs_legacy: HashMap> = + if version < RECONSTRUCT_HTLCS_FROM_CHANS_VERSION { + let forward_htlcs_count: u64 = Readable::read(reader)?; + let mut fwds = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + for _ in 0..forward_htlcs_count { + let short_channel_id = Readable::read(reader)?; + let pending_forwards_count: u64 = Readable::read(reader)?; + let mut pending_forwards = Vec::with_capacity(cmp::min( + pending_forwards_count as usize, + MAX_ALLOC_SIZE / mem::size_of::(), + )); + for _ in 0..pending_forwards_count { + pending_forwards.push(Readable::read(reader)?); + } + fwds.insert(short_channel_id, pending_forwards); + } + fwds + } else { + new_hash_map() + }; let claimable_htlcs_count: u64 = Readable::read(reader)?; let mut claimable_htlcs_list = @@ -17721,6 +17739,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> in_flight_monitor_updates: in_flight_monitor_updates.unwrap_or_default(), peer_storage_dir: peer_storage_dir.unwrap_or_default(), async_receive_offer_cache, + version, }) } } @@ -18023,6 +18042,7 @@ impl< mut in_flight_monitor_updates, peer_storage_dir, async_receive_offer_cache, + version: _version, } = data; let empty_peer_state = || PeerState { @@ -18572,10 +18592,10 @@ impl< // persist that state, relying on it being up-to-date on restart. Newer versions are moving // towards reducing this reliance on regular persistence of the `ChannelManager`, and instead // reconstruct HTLC/payment state based on `Channel{Monitor}` data if - // `reconstruct_manager_from_monitors` is set below. Currently it is only set in tests, randomly - // to ensure the legacy codepaths also have test coverage. + // `reconstruct_manager_from_monitors` is set below. Currently we set in tests randomly to + // ensure the legacy codepaths also have test coverage. #[cfg(not(test))] - let reconstruct_manager_from_monitors = false; + let reconstruct_manager_from_monitors = _version >= RECONSTRUCT_HTLCS_FROM_CHANS_VERSION; #[cfg(test)] let reconstruct_manager_from_monitors = args.reconstruct_manager_from_monitors.unwrap_or_else(|| { From 3b75eee99c9af8a515ff3203f4f0df135e229c27 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 6 Feb 2026 14:11:41 -0500 Subject: [PATCH 11/23] Fix docs on ChannelMonitor::payment_preimages --- lightning/src/chain/channelmonitor.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index a537ff55874..3c94b919d97 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1256,18 +1256,19 @@ pub(crate) struct ChannelMonitorImpl { // deserialization current_holder_commitment_number: u64, - /// The set of payment hashes from inbound payments for which we know the preimage. Payment - /// preimages that are not included in any unrevoked local commitment transaction or unrevoked - /// remote commitment transactions are automatically removed when commitment transactions are - /// revoked. Note that this happens one revocation after it theoretically could, leaving - /// preimages present here for the previous state even when the channel is "at rest". This is a - /// good safety buffer, but also is important as it ensures we retain payment preimages for the - /// previous local commitment transaction, which may have been broadcast already when we see - /// the revocation (in setups with redundant monitors). + /// The set of payment hashes from inbound payments and forwards for which we know the preimage. + /// Payment preimages that are not included in any unrevoked local commitment transaction or + /// unrevoked remote commitment transactions are automatically removed when commitment + /// transactions are revoked. Note that this happens one revocation after it theoretically could, + /// leaving preimages present here for the previous state even when the channel is "at rest". + /// This is a good safety buffer, but also is important as it ensures we retain payment preimages + /// for the previous local commitment transaction, which may have been broadcast already when we + /// see the revocation (in setups with redundant monitors). /// /// We also store [`PaymentClaimDetails`] here, tracking the payment information(s) for this /// preimage for inbound payments. This allows us to rebuild the inbound payment information on - /// startup even if we lost our `ChannelManager`. + /// startup even if we lost our `ChannelManager`. For forwardeds, the list of + /// [`PaymentClaimDetails`] is empty. payment_preimages: HashMap)>, // Note that `MonitorEvent`s MUST NOT be generated during update processing, only generated From ee4fda3706f91bfe34dbf8ec1b6b1a513f2876a4 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 6 Feb 2026 15:12:45 -0500 Subject: [PATCH 12/23] Trivial: full path in reload_node macro --- lightning/src/ln/functional_test_utils.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 07f7c0bd8f3..e5603bfddc6 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1383,7 +1383,7 @@ macro_rules! _reload_node_inner { ); $node.chain_monitor = &$new_chain_monitor; - $new_channelmanager = _reload_node( + $new_channelmanager = $crate::ln::functional_test_utils::_reload_node( &$node, $new_config, &chanman_encoded, @@ -1401,7 +1401,7 @@ macro_rules! reload_node { // Reload the node using the node's current config ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { let config = $node.node.get_current_config(); - _reload_node_inner!( + $crate::_reload_node_inner!( $node, config, $chanman_encoded, @@ -1414,7 +1414,7 @@ macro_rules! reload_node { }; // Reload the node with the new provided config ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { - _reload_node_inner!( + $crate::_reload_node_inner!( $node, $new_config, $chanman_encoded, @@ -1431,7 +1431,7 @@ macro_rules! reload_node { ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr ) => { let config = $node.node.get_current_config(); - _reload_node_inner!( + $crate::_reload_node_inner!( $node, config, $chanman_encoded, @@ -2968,7 +2968,7 @@ pub fn check_payment_claimable( #[cfg(any(test, ldk_bench, feature = "_test_utils"))] macro_rules! expect_payment_claimable { ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => { - expect_payment_claimable!( + $crate::expect_payment_claimable!( $node, $expected_payment_hash, $expected_payment_secret, From 77d260485e9924bc752d9b2a0db135ee7e1e29f4 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 6 Feb 2026 16:13:19 -0500 Subject: [PATCH 13/23] Update upgrade tests: 0.2- to 0.5 doesn't work XXX --- lightning-tests/Cargo.toml | 1 + lightning-tests/src/lib.rs | 1 - .../src/upgrade_downgrade_tests.rs | 276 ++++++++++++------ 3 files changed, 181 insertions(+), 97 deletions(-) diff --git a/lightning-tests/Cargo.toml b/lightning-tests/Cargo.toml index 4e8d330089d..7830792b121 100644 --- a/lightning-tests/Cargo.toml +++ b/lightning-tests/Cargo.toml @@ -15,6 +15,7 @@ lightning-types = { path = "../lightning-types", features = ["_test_utils"] } lightning-invoice = { path = "../lightning-invoice", default-features = false } lightning-macros = { path = "../lightning-macros" } lightning = { path = "../lightning", features = ["_test_utils"] } +lightning_0_3 = { package = "lightning", git = "https://github.com/valentinewallace/rust-lightning", branch = "2026-02-dedup-htlc-fwd-data", features = ["_test_utils"] } lightning_0_2 = { package = "lightning", version = "0.2.0", features = ["_test_utils"] } lightning_0_1 = { package = "lightning", version = "0.1.7", features = ["_test_utils"] } lightning_0_0_125 = { package = "lightning", version = "0.0.125", features = ["_test_utils"] } diff --git a/lightning-tests/src/lib.rs b/lightning-tests/src/lib.rs index c028193d692..4249c957dde 100644 --- a/lightning-tests/src/lib.rs +++ b/lightning-tests/src/lib.rs @@ -1,4 +1,3 @@ -#[cfg_attr(test, macro_use)] extern crate lightning; #[cfg(all(test, not(taproot)))] diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index 8df670321be..17df9f5adea 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -45,20 +45,37 @@ use lightning_0_0_125::ln::msgs::ChannelMessageHandler as _; use lightning_0_0_125::routing::router as router_0_0_125; use lightning_0_0_125::util::ser::Writeable as _; +use lightning_0_3::events::bump_transaction::sync::WalletSourceSync as _; +use lightning_0_3::events::{ + Event as Event_0_3, HTLCHandlingFailureType as HTLCHandlingFailureType_0_3, +}; +use lightning_0_3::expect_payment_claimable as expect_payment_claimable_0_3; +use lightning_0_3::get_event_msg as get_event_msg_0_3; +use lightning_0_3::ln::functional_test_utils as lightning_0_3_utils; +use lightning_0_3::ln::functional_test_utils::{ + PaymentFailedConditions as PaymentFailedConditions_0_3, ReconnectArgs as ReconnectArgs_0_3, + SendEvent as SendEvent_0_3, +}; +use lightning_0_3::ln::funding::SpliceContribution as SpliceContribution_0_3; +use lightning_0_3::ln::msgs::BaseMessageHandler as _; +use lightning_0_3::ln::msgs::ChannelMessageHandler as _; +use lightning_0_3::ln::msgs::MessageSendEvent as MessageSendEvent_0_3; +use lightning_0_3::ln::splicing_tests::lock_splice as lock_splice_0_3; +use lightning_0_3::ln::splicing_tests::splice_channel as splice_channel_0_3; +use lightning_0_3::ln::types::ChannelId as ChannelId_0_3; +use lightning_0_3::reload_node as reload_node_0_3; +use lightning_0_3::types::payment::{ + PaymentHash as PaymentHash_0_3, PaymentPreimage as PaymentPreimage_0_3, + PaymentSecret as PaymentSecret_0_3, +}; + use lightning::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER}; -use lightning::events::bump_transaction::sync::WalletSourceSync; -use lightning::events::{ClosureReason, Event, HTLCHandlingFailureType}; -use lightning::ln::functional_test_utils::*; -use lightning::ln::funding::SpliceContribution; -use lightning::ln::msgs::BaseMessageHandler as _; -use lightning::ln::msgs::ChannelMessageHandler as _; -use lightning::ln::msgs::MessageSendEvent; -use lightning::ln::splicing_tests::*; -use lightning::ln::types::ChannelId; +use lightning::check_spends; +use lightning::events::{ClosureReason, Event}; +use lightning::ln::functional_test_utils as lightning_local_utils; +use lightning::reload_node; use lightning::sign::OutputSpender; -use lightning_types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; - use bitcoin::script::Builder; use bitcoin::secp256k1::Secp256k1; use bitcoin::{opcodes, Amount, TxOut}; @@ -68,7 +85,7 @@ use std::sync::Arc; #[test] fn simple_upgrade() { // Tests a simple case of upgrading from LDK 0.1 with a pending payment - let (node_a_ser, node_b_ser, mon_a_ser, mon_b_ser, preimage); + let (node_a_ser, node_b_ser, mon_a_ser, mon_b_ser, preimage_bytes); { let chanmon_cfgs = lightning_0_1_utils::create_chanmon_cfgs(2); let node_cfgs = lightning_0_1_utils::create_node_cfgs(2, &chanmon_cfgs); @@ -79,7 +96,7 @@ fn simple_upgrade() { let payment_preimage = lightning_0_1_utils::route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - preimage = PaymentPreimage(payment_preimage.0 .0); + preimage_bytes = payment_preimage.0 .0; node_a_ser = nodes[0].node.encode(); node_b_ser = nodes[1].node.encode(); @@ -89,26 +106,43 @@ fn simple_upgrade() { // Create a dummy node to reload over with the 0.1 state - let mut chanmon_cfgs = create_chanmon_cfgs(2); + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(2); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(2, &chanmon_cfgs); let (persister_a, persister_b, chain_mon_a, chain_mon_b); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let node_chanmgrs = lightning_0_3_utils::create_node_chanmgrs(2, &node_cfgs, &[None, None]); let (node_a, node_b); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_0_3_utils::create_network(2, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_0_3_utils::test_default_channel_config(); let a_mons = &[&mon_a_ser[..]]; - reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); - reload_node!(nodes[1], config, &node_b_ser, &[&mon_b_ser], persister_b, chain_mon_b, node_b); - - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - - claim_payment(&nodes[0], &[&nodes[1]], preimage); + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + reload_node_0_3!( + nodes[1], + config, + &node_b_ser, + &[&mon_b_ser], + persister_b, + chain_mon_b, + node_b + ); + + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + + let preimage = PaymentPreimage_0_3(preimage_bytes); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1]], preimage); } #[test] @@ -228,7 +262,7 @@ fn test_125_dangling_post_update_actions() { // Create a dummy node to reload over with the 0.0.125 state - let mut chanmon_cfgs = create_chanmon_cfgs(4); + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(4); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; @@ -236,14 +270,15 @@ fn test_125_dangling_post_update_actions() { chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[3].keys_manager.disable_all_state_policy_checks = true; - let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_cfgs = lightning_local_utils::create_node_cfgs(4, &chanmon_cfgs); let (persister, chain_mon); - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let node; - let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_local_utils::create_network(4, &node_cfgs, &node_chanmgrs); // Finally, reload the node in the latest LDK. This previously failed. - let config = test_default_channel_config(); + let config = lightning_local_utils::test_default_channel_config(); reload_node!(nodes[3], config, &node_d_ser, &[&mon_ser], persister, chain_mon, node); } @@ -283,14 +318,14 @@ fn test_0_1_legacy_remote_key_derivation() { } // Create a dummy node to reload over with the 0.1 state - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(2); + let node_cfgs = lightning_local_utils::create_node_cfgs(2, &chanmon_cfgs); let (persister_a, persister_b, chain_mon_a, chain_mon_b); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let node_chanmgrs = lightning_local_utils::create_node_chanmgrs(2, &node_cfgs, &[None, None]); let (node_a, node_b); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_local_utils::create_network(2, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_local_utils::test_default_channel_config(); let a_mons = &[&mon_a_ser[..]]; reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); reload_node!(nodes[1], config, &node_b_ser, &[&mon_b_ser], persister_b, chain_mon_b, node_b); @@ -299,13 +334,13 @@ fn test_0_1_legacy_remote_key_derivation() { let node_b_id = nodes[1].node.get_our_node_id(); - mine_transaction(&nodes[0], &commitment_tx[0]); + lightning_local_utils::mine_transaction(&nodes[0], &commitment_tx[0]); let reason = ClosureReason::CommitmentTxConfirmed; - check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100_000); - check_added_monitors(&nodes[0], 1); - check_closed_broadcast(&nodes[0], 1, false); + lightning_local_utils::check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100_000); + lightning_local_utils::check_added_monitors(&nodes[0], 1); + lightning_local_utils::check_closed_broadcast(&nodes[0], 1, false); - connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + lightning_local_utils::connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let mut spendable_event = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(spendable_event.len(), 1); if let Event::SpendableOutputs { outputs, channel_id: ev_id } = spendable_event.pop().unwrap() { @@ -420,7 +455,7 @@ fn do_test_0_1_htlc_forward_after_splice(fail_htlc: bool) { } // Create a dummy node to reload over with the 0.1 state - let mut chanmon_cfgs = create_chanmon_cfgs(3); + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; @@ -431,73 +466,105 @@ fn do_test_0_1_htlc_forward_after_splice(fail_htlc: bool) { chanmon_cfgs[1].tx_broadcaster.blocks = node_b_blocks; chanmon_cfgs[2].tx_broadcaster.blocks = node_c_blocks; - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let (node_a, node_b, node_c); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_0_3_utils::test_default_channel_config(); let a_mons = &[&mon_a_1_ser[..]]; - reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; - reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser, + b_mons, + persister_b, + chain_mon_b, + node_b + ); let c_mons = &[&mon_c_1_ser[..]]; - reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + reload_node_0_3!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); reconnect_b_c_args.send_channel_ready = (true, true); reconnect_b_c_args.send_announcement_sigs = (true, true); - reconnect_nodes(reconnect_b_c_args); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); - let contribution = SpliceContribution::splice_out(vec![TxOut { + let contribution = SpliceContribution_0_3::splice_out(vec![TxOut { value: Amount::from_sat(1_000), script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), }]); - let splice_tx = splice_channel(&nodes[0], &nodes[1], ChannelId(chan_id_bytes_a), contribution); + let splice_tx = + splice_channel_0_3(&nodes[0], &nodes[1], ChannelId_0_3(chan_id_bytes_a), contribution); for node in nodes.iter() { - mine_transaction(node, &splice_tx); - connect_blocks(node, ANTI_REORG_DELAY - 1); + lightning_0_3_utils::mine_transaction(node, &splice_tx); + lightning_0_3_utils::connect_blocks(node, ANTI_REORG_DELAY - 1); } - let splice_locked = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceLocked, node_b_id); - lock_splice(&nodes[0], &nodes[1], &splice_locked, false); + let splice_locked = + get_event_msg_0_3!(nodes[0], MessageSendEvent_0_3::SendSpliceLocked, node_b_id); + lock_splice_0_3(&nodes[0], &nodes[1], &splice_locked, false); for node in nodes.iter() { - connect_blocks(node, EXTRA_BLOCKS_BEFORE_FAIL - ANTI_REORG_DELAY); + lightning_0_3_utils::connect_blocks(node, EXTRA_BLOCKS_BEFORE_FAIL - ANTI_REORG_DELAY); } // Now release the HTLC to be failed back to node A nodes[1].node.process_pending_htlc_forwards(); - let pay_secret = PaymentSecret(payment_secret_bytes); - let pay_hash = PaymentHash(payment_hash_bytes); - let pay_preimage = PaymentPreimage(payment_preimage_bytes); + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_preimage = PaymentPreimage_0_3(payment_preimage_bytes); if fail_htlc { - let failure = HTLCHandlingFailureType::Forward { + let failure = HTLCHandlingFailureType_0_3::Forward { node_id: Some(node_c_id), - channel_id: ChannelId(chan_id_bytes_b), + channel_id: ChannelId_0_3(chan_id_bytes_b), }; - expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[failure]); - check_added_monitors(&nodes[1], 1); + lightning_0_3_utils::expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[1], + &[failure], + ); + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); - let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); + let updates = lightning_0_3_utils::get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); - do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); - let conditions = PaymentFailedConditions::new(); - expect_payment_failed_conditions(&nodes[0], pay_hash, false, conditions); + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[0], + &nodes[1], + &updates.commitment_signed, + false, + false, + ); + let conditions = PaymentFailedConditions_0_3::new(); + lightning_0_3_utils::expect_payment_failed_conditions( + &nodes[0], pay_hash, false, conditions, + ); } else { - check_added_monitors(&nodes[1], 1); - let forward_event = SendEvent::from_node(&nodes[1]); + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); let commitment = &forward_event.commitment_msg; - do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[2], &nodes[1], commitment, false, false, + ); - expect_and_process_pending_htlcs(&nodes[2], false); - expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); - claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); } } @@ -632,32 +699,49 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { } // Create a dummy node to reload over with the 0.2 state - let mut chanmon_cfgs = create_chanmon_cfgs(3); + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let (node_a, node_b, node_c); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_0_3_utils::test_default_channel_config(); let a_mons = &[&mon_a_1_ser[..]]; - reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; - reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser, + b_mons, + persister_b, + chain_mon_b, + node_b + ); let c_mons = &[&mon_c_1_ser[..]]; - reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + reload_node_0_3!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); reconnect_b_c_args.send_channel_ready = (true, true); reconnect_b_c_args.send_announcement_sigs = (true, true); - reconnect_nodes(reconnect_b_c_args); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); // Now release the HTLC from node_b to node_c, to be claimed back to node_a nodes[1].node.process_pending_htlc_forwards(); @@ -666,7 +750,7 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); let (intercept_id, expected_outbound_amt_msat) = match events[0] { - Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { + Event_0_3::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { (intercept_id, expected_outbound_amount_msat) }, _ => panic!(), @@ -675,7 +759,7 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { .node .forward_intercepted_htlc( intercept_id, - &ChannelId(chan_id_bytes_b_c), + &ChannelId_0_3(chan_id_bytes_b_c), nodes[2].node.get_our_node_id(), expected_outbound_amt_msat, ) @@ -683,17 +767,17 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { nodes[1].node.process_pending_htlc_forwards(); } - let pay_secret = PaymentSecret(payment_secret_bytes); - let pay_hash = PaymentHash(payment_hash_bytes); - let pay_preimage = PaymentPreimage(payment_preimage_bytes); + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_preimage = PaymentPreimage_0_3(payment_preimage_bytes); - check_added_monitors(&nodes[1], 1); - let forward_event = SendEvent::from_node(&nodes[1]); + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); let commitment = &forward_event.commitment_msg; - do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + lightning_0_3_utils::do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); - expect_and_process_pending_htlcs(&nodes[2], false); - expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); - claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); } From 7c79e67332289fc7ef20e3234bed9f3ad968519d Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 16 Jan 2026 17:38:57 -0500 Subject: [PATCH 14/23] Elide legacy manager pending HTLC maps on read/write In LDK 0.3 we added support for reconstructing the ChannelManager's pending forward HTLCs maps from channel data as part of removing the requirement to regularly persist the manager, but til now we still would write/read those maps within the manager to maintain compat with 0.2-. Also 0.3 added a new field to Channel that allowed the map reconstruction. Now that a few versions have passed we have more confidence that the new field is being written, here we deprecate persistence of the old legacy maps and only attempt to read them if the manager serialized version indicates that they were written. Upcoming commits will ensure we error if the new field is missing. XXX clean up claude tests --- .../src/upgrade_downgrade_tests.rs | 640 ++++++++++++++++++ lightning/src/ln/channelmanager.rs | 339 +++------- .../upgrade-0.2-pending-htlc.txt | 9 + 3 files changed, 752 insertions(+), 236 deletions(-) create mode 100644 pending_changelog/upgrade-0.2-pending-htlc.txt diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index 17df9f5adea..e900aeb381b 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -51,6 +51,8 @@ use lightning_0_3::events::{ }; use lightning_0_3::expect_payment_claimable as expect_payment_claimable_0_3; use lightning_0_3::get_event_msg as get_event_msg_0_3; +use lightning_0_3::get_monitor as get_monitor_0_3; +use lightning_0_3::ln::channelmanager::PaymentId as PaymentId_0_3; use lightning_0_3::ln::functional_test_utils as lightning_0_3_utils; use lightning_0_3::ln::functional_test_utils::{ PaymentFailedConditions as PaymentFailedConditions_0_3, ReconnectArgs as ReconnectArgs_0_3, @@ -60,21 +62,29 @@ use lightning_0_3::ln::funding::SpliceContribution as SpliceContribution_0_3; use lightning_0_3::ln::msgs::BaseMessageHandler as _; use lightning_0_3::ln::msgs::ChannelMessageHandler as _; use lightning_0_3::ln::msgs::MessageSendEvent as MessageSendEvent_0_3; +use lightning_0_3::ln::outbound_payment::RecipientOnionFields as RecipientOnionFields_0_3; use lightning_0_3::ln::splicing_tests::lock_splice as lock_splice_0_3; use lightning_0_3::ln::splicing_tests::splice_channel as splice_channel_0_3; use lightning_0_3::ln::types::ChannelId as ChannelId_0_3; use lightning_0_3::reload_node as reload_node_0_3; +use lightning_0_3::routing::router as router_0_3; use lightning_0_3::types::payment::{ PaymentHash as PaymentHash_0_3, PaymentPreimage as PaymentPreimage_0_3, PaymentSecret as PaymentSecret_0_3, }; +use lightning_0_3::util::ser::Writeable as _; use lightning::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER}; use lightning::check_spends; use lightning::events::{ClosureReason, Event}; +use lightning::expect_payment_claimable; use lightning::ln::functional_test_utils as lightning_local_utils; +use lightning::ln::functional_test_utils::{ReconnectArgs, SendEvent}; +use lightning::ln::msgs::ChannelMessageHandler as _; use lightning::reload_node; use lightning::sign::OutputSpender; +use lightning::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning::util::ser::Writeable as _; use bitcoin::script::Builder; use bitcoin::secp256k1::Secp256k1; @@ -781,3 +791,633 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); } + +#[test] +fn test_0_3_pending_forward_upgrade() { + // Tests upgrading from 0.3 with a pending HTLC forward. + // Phase 1: Create state in 0.3 with a pending forward (HTLC locked on inbound edge of node B) + // Phase 2: Reload with local lightning and complete the payment + let (node_a_ser, node_b_ser, node_c_ser, mon_a_1_ser, mon_b_1_ser, mon_b_2_ser, mon_c_1_ser); + let (node_a_id, node_b_id, _node_c_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + + { + let chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + _node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_a = lightning_0_3_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + + let chan_id_b = lightning_0_3_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_0_3_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_0_3_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + let pay_params = router_0_3::PaymentParameters::from_node_id( + _node_c_id, + lightning_0_3_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_3::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_0_3_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_3::secret_only(secret); + let id = PaymentId_0_3(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_3_utils::check_added_monitors(&nodes[0], 1); + let send_event = SendEvent_0_3::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[1], + &nodes[0], + &send_event.commitment_msg, + false, + false, + ); + // Process the pending update_add_htlcs but don't forward yet + nodes[1].node.test_process_pending_update_add_htlcs(); + let events = nodes[1].node.get_and_clear_pending_events(); + assert!(events.is_empty()); + + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + node_c_ser = nodes[2].node.encode(); + mon_a_1_ser = get_monitor_0_3!(nodes[0], chan_id_a).encode(); + mon_b_1_ser = get_monitor_0_3!(nodes[1], chan_id_a).encode(); + mon_b_2_ser = get_monitor_0_3!(nodes[1], chan_id_b).encode(); + mon_c_1_ser = get_monitor_0_3!(nodes[2], chan_id_b).encode(); + } + + // Create a dummy node to reload over with the 0.3 state + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(3); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_local_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_local_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_local_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser[..]]; + reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; + reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + let c_mons = &[&mon_c_1_ser[..]]; + reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + + lightning_local_utils::reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + lightning_local_utils::reconnect_nodes(reconnect_b_c_args); + + // Now release the HTLC from node_b to node_c, to be claimed back to node_a + nodes[1].node.process_pending_htlc_forwards(); + + let pay_secret = PaymentSecret(payment_secret_bytes); + let pay_hash = PaymentHash(payment_hash_bytes); + let pay_preimage = PaymentPreimage(payment_preimage_bytes); + + lightning_local_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + lightning_local_utils::do_commitment_signed_dance( + &nodes[2], &nodes[1], commitment, false, false, + ); + + lightning_local_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); + lightning_local_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} + +#[test] +fn test_0_2_pending_forward_upgrade_fails() { + // Tests that upgrading directly from 0.2 to local lightning fails with DecodeError::InvalidValue + // when there's a pending HTLC forward, because in 0.5 we started requiring new pending_htlc + // fields that started being written in 0.3. + // XXX update this + use lightning::ln::channelmanager::ChannelManagerReadArgs; + use lightning::ln::msgs::DecodeError; + use lightning::util::ser::ReadableArgs; + + let (node_b_ser, mon_b_1_ser, mon_b_2_ser); + + { + let chanmon_cfgs = lightning_0_2_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_2_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_2_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_0_2_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_a = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + + let chan_id_b = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + + // Send HTLC from node_a to node_c, hold at node_b (don't call process_pending_htlc_forwards) + let node_a_id = nodes[0].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let (_preimage, hash, secret) = + lightning_0_2_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + + let pay_params = router_0_2::PaymentParameters::from_node_id( + node_c_id, + lightning_0_2_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_2::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_0_2_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_2::secret_only(secret); + let id = PaymentId_0_2(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_2_utils::check_added_monitors(&nodes[0], 1); + let send_event = lightning_0_2_utils::SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + commitment_signed_dance_0_2!(nodes[1], nodes[0], send_event.commitment_msg, false); + + // Process the pending HTLC to create a pending forward (but don't actually forward it) + nodes[1].node.test_process_pending_update_add_htlcs(); + + // Serialize node_b with the pending forward + node_b_ser = nodes[1].node.encode(); + mon_b_1_ser = get_monitor_0_2!(nodes[1], chan_id_a).encode(); + mon_b_2_ser = get_monitor_0_2!(nodes[1], chan_id_b).encode(); + } + + // Try to reload using local lightning - this should fail with DecodeError::InvalidValue + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(1); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_local_utils::create_node_cfgs(1, &chanmon_cfgs); + let node_chanmgrs = lightning_local_utils::create_node_chanmgrs(1, &node_cfgs, &[None]); + let nodes = lightning_local_utils::create_network(1, &node_cfgs, &node_chanmgrs); + + // Read the monitors first + use lightning::util::test_channel_signer::TestChannelSigner; + let mut monitor_read_1 = &mon_b_1_ser[..]; + let (_, monitor_1) = <( + bitcoin::BlockHash, + lightning::chain::channelmonitor::ChannelMonitor, + )>::read( + &mut monitor_read_1, (nodes[0].keys_manager, nodes[0].keys_manager) + ) + .unwrap(); + + let mut monitor_read_2 = &mon_b_2_ser[..]; + let (_, monitor_2) = <( + bitcoin::BlockHash, + lightning::chain::channelmonitor::ChannelMonitor, + )>::read( + &mut monitor_read_2, (nodes[0].keys_manager, nodes[0].keys_manager) + ) + .unwrap(); + + // Try to read the ChannelManager - this should fail + use lightning::util::hash_tables::new_hash_map; + let mut channel_monitors = new_hash_map(); + channel_monitors.insert(monitor_1.channel_id(), &monitor_1); + channel_monitors.insert(monitor_2.channel_id(), &monitor_2); + + let config = lightning_local_utils::test_default_channel_config(); + let mut node_read = &node_b_ser[..]; + let read_args = ChannelManagerReadArgs { + config, + entropy_source: nodes[0].keys_manager, + node_signer: nodes[0].keys_manager, + signer_provider: nodes[0].keys_manager, + fee_estimator: nodes[0].fee_estimator, + router: nodes[0].router, + message_router: nodes[0].message_router, + chain_monitor: nodes[0].chain_monitor, + tx_broadcaster: nodes[0].tx_broadcaster, + logger: nodes[0].logger, + channel_monitors, + }; + + let result = + <(bitcoin::BlockHash, lightning::ln::functional_test_utils::TestChannelManager)>::read( + &mut node_read, + read_args, + ); + + assert!(matches!(result, Err(DecodeError::InvalidValue))); +} + +#[test] +fn test_0_2_to_0_3_to_local_pending_forward_upgrade() { + // Tests that upgrading from 0.2 to 0.3 to local with a pending HTLC forward works. + // The key is that 0.3 can read 0.2's format and re-serialize it in the new format + // that local can then read. + let (node_a_ser_0_3, node_b_ser_0_3, node_c_ser_0_3); + let (mon_a_1_ser_0_3, mon_b_1_ser_0_3, mon_b_2_ser_0_3, mon_c_1_ser_0_3); + let (node_a_id, node_b_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + let (chan_id_a_bytes, chan_id_b_bytes); + + // Phase 1: Create network on 0.2 with pending HTLC forward + let (node_a_ser_0_2, node_b_ser_0_2, node_c_ser_0_2); + let (mon_a_1_ser_0_2, mon_b_1_ser_0_2, mon_b_2_ser_0_2, mon_c_1_ser_0_2); + { + let chanmon_cfgs = lightning_0_2_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_2_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_2_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_0_2_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + let chan_id_a = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + chan_id_a_bytes = chan_id_a.0; + + let chan_id_b = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + chan_id_b_bytes = chan_id_b.0; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_0_2_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_0_2_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + let pay_params = router_0_2::PaymentParameters::from_node_id( + node_c_id, + lightning_0_2_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_2::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_0_2_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_2::secret_only(secret); + let id = PaymentId_0_2(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_2_utils::check_added_monitors(&nodes[0], 1); + let send_event = lightning_0_2_utils::SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + commitment_signed_dance_0_2!(nodes[1], nodes[0], send_event.commitment_msg, false); + // Process the pending update_add_htlcs to create pending forward state + nodes[1].node.test_process_pending_update_add_htlcs(); + let events = nodes[1].node.get_and_clear_pending_events(); + assert!(events.is_empty()); + + node_a_ser_0_2 = nodes[0].node.encode(); + node_b_ser_0_2 = nodes[1].node.encode(); + node_c_ser_0_2 = nodes[2].node.encode(); + mon_a_1_ser_0_2 = get_monitor_0_2!(nodes[0], chan_id_a).encode(); + mon_b_1_ser_0_2 = get_monitor_0_2!(nodes[1], chan_id_a).encode(); + mon_b_2_ser_0_2 = get_monitor_0_2!(nodes[1], chan_id_b).encode(); + mon_c_1_ser_0_2 = get_monitor_0_2!(nodes[2], chan_id_b).encode(); + } + + // Phase 2: Reload on 0.3, forward the HTLC (but don't claim), and re-serialize + { + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_0_3_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser_0_2[..]]; + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser_0_2, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + let b_mons = &[&mon_b_1_ser_0_2[..], &mon_b_2_ser_0_2[..]]; + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser_0_2, + b_mons, + persister_b, + chain_mon_b, + node_b + ); + let c_mons = &[&mon_c_1_ser_0_2[..]]; + reload_node_0_3!( + nodes[2], + config, + &node_c_ser_0_2, + c_mons, + persister_c, + chain_mon_c, + node_c + ); + + // Reconnect nodes after reload + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); + + // Forward the HTLC from node_b to node_c (but don't claim yet) + nodes[1].node.process_pending_htlc_forwards(); + + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[2], &nodes[1], commitment, false, false, + ); + + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + + // Re-serialize in 0.3 format - now the HTLC has been forwarded (not just pending) + node_a_ser_0_3 = nodes[0].node.encode(); + node_b_ser_0_3 = nodes[1].node.encode(); + node_c_ser_0_3 = nodes[2].node.encode(); + + let chan_id_a = ChannelId_0_3(chan_id_a_bytes); + let chan_id_b = ChannelId_0_3(chan_id_b_bytes); + + mon_a_1_ser_0_3 = get_monitor_0_3!(nodes[0], chan_id_a).encode(); + mon_b_1_ser_0_3 = get_monitor_0_3!(nodes[1], chan_id_a).encode(); + mon_b_2_ser_0_3 = get_monitor_0_3!(nodes[1], chan_id_b).encode(); + mon_c_1_ser_0_3 = get_monitor_0_3!(nodes[2], chan_id_b).encode(); + } + + // Phase 3: Reload on local and claim the payment (HTLC already forwarded in Phase 2) + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(3); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_local_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_local_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_local_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser_0_3[..]]; + reload_node!( + nodes[0], + config.clone(), + &node_a_ser_0_3, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + let b_mons = &[&mon_b_1_ser_0_3[..], &mon_b_2_ser_0_3[..]]; + reload_node!( + nodes[1], + config.clone(), + &node_b_ser_0_3, + b_mons, + persister_b, + chain_mon_b, + node_b + ); + let c_mons = &[&mon_c_1_ser_0_3[..]]; + reload_node!(nodes[2], config, &node_c_ser_0_3, c_mons, persister_c, chain_mon_c, node_c); + + lightning_local_utils::reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + lightning_local_utils::reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); + + // The HTLC was already forwarded and claimable event generated in Phase 2. + // After reload, just claim the payment - the HTLC is already locked on node C. + let pay_preimage = PaymentPreimage(payment_preimage_bytes); + lightning_local_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} + +#[test] +fn test_local_to_0_3_pending_forward_downgrade() { + // Tests downgrading from local lightning to 0.3 with a pending HTLC forward works. + // Phase 1: Create state in local lightning with a pending forward + // Phase 2: Reload with 0.3 and complete the payment + use lightning::ln::types::ChannelId; + + let (node_a_ser, node_b_ser, node_c_ser, mon_a_1_ser, mon_b_1_ser, mon_b_2_ser, mon_c_1_ser); + let (node_a_id, node_b_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + let (chan_id_a_bytes, chan_id_b_bytes); + + // Phase 1: Create network on local lightning with pending HTLC forward + { + let chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_local_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_local_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + let chan_id_a = lightning_local_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + chan_id_a_bytes = chan_id_a.0; + + let chan_id_b = lightning_local_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + chan_id_b_bytes = chan_id_b.0; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_local_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_local_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + use lightning::routing::router as local_router; + let pay_params = local_router::PaymentParameters::from_node_id( + node_c_id, + lightning_local_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + local_router::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_local_utils::get_route(&nodes[0], &route_params).unwrap(); + + use lightning::ln::channelmanager::PaymentId as LocalPaymentId; + use lightning::ln::outbound_payment::RecipientOnionFields as LocalRecipientOnionFields; + let onion = LocalRecipientOnionFields::secret_only(secret); + let id = LocalPaymentId(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_local_utils::check_added_monitors(&nodes[0], 1); + let send_event = SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + lightning_local_utils::do_commitment_signed_dance( + &nodes[1], + &nodes[0], + &send_event.commitment_msg, + false, + false, + ); + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + node_c_ser = nodes[2].node.encode(); + mon_a_1_ser = lightning::get_monitor!(nodes[0], ChannelId(chan_id_a_bytes)).encode(); + mon_b_1_ser = lightning::get_monitor!(nodes[1], ChannelId(chan_id_a_bytes)).encode(); + mon_b_2_ser = lightning::get_monitor!(nodes[1], ChannelId(chan_id_b_bytes)).encode(); + mon_c_1_ser = lightning::get_monitor!(nodes[2], ChannelId(chan_id_b_bytes)).encode(); + } + + // Phase 2: Reload on 0.3 and complete the payment + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_0_3_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser[..]]; + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser, + b_mons, + persister_b, + chain_mon_b, + node_b + ); + let c_mons = &[&mon_c_1_ser[..]]; + reload_node_0_3!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); + + // Now release the HTLC from node_b to node_c, to be claimed back to node_a + nodes[1].node.process_pending_htlc_forwards(); + + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_preimage = PaymentPreimage_0_3(payment_preimage_bytes); + + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + lightning_0_3_utils::do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 808e776fed6..9219b12984d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -16628,14 +16628,15 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { features } -const SERIALIZATION_VERSION: u8 = 1; +// Bumped to 5 in 0.5 because we stop reading/writing legacy ChannelManager pending HTLC maps and +// instead reconstruct them from `Channel{Monitor}` data as part of removing the requirement to +// regularly persist the `ChannelManager`. +const SERIALIZATION_VERSION: u8 = 5; const MIN_SERIALIZATION_VERSION: u8 = 1; -// We plan to start writing this version in 0.5. -// -// LDK 0.5+ will reconstruct the set of pending HTLCs from `Channel{Monitor}` data that started +// LDK 0.5+ reconstructs the set of pending HTLCs from `Channel{Monitor}` data that started // being written in 0.3, ignoring legacy `ChannelManager` HTLC maps on read and not writing them. -// LDK 0.5+ will automatically fail to read if the pending HTLC set cannot be reconstructed, i.e. +// LDK 0.5+ automatically fails to read if the pending HTLC set cannot be reconstructed, i.e. // if we were last written with pending HTLCs on 0.2- or if the new 0.3+ fields are missing. // // If 0.3 or 0.4 reads this manager version, it knows that the legacy maps were not written and @@ -17108,24 +17109,6 @@ impl< } } - { - let forward_htlcs = self.forward_htlcs.lock().unwrap(); - (forward_htlcs.len() as u64).write(writer)?; - for (short_channel_id, pending_forwards) in forward_htlcs.iter() { - short_channel_id.write(writer)?; - (pending_forwards.len() as u64).write(writer)?; - for forward in pending_forwards { - forward.write(writer)?; - } - } - } - - let mut decode_update_add_htlcs_opt = None; - let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap(); - if !decode_update_add_htlcs.is_empty() { - decode_update_add_htlcs_opt = Some(decode_update_add_htlcs); - } - let claimable_payments = self.claimable_payments.lock().unwrap(); let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap(); @@ -17172,8 +17155,6 @@ impl< } } - let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); - // Since some FundingNegotiation variants are not persisted, any splice in such state must // be failed upon reload. However, as the necessary information for the SpliceFailed event // is not persisted, the event itself needs to be persisted even though it hasn't been @@ -17269,11 +17250,6 @@ impl< } } - let mut pending_intercepted_htlcs = None; - if our_pending_intercepts.len() != 0 { - pending_intercepted_htlcs = Some(our_pending_intercepts); - } - let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments); if pending_claiming_payments.as_ref().unwrap().is_empty() { // LDK versions prior to 0.0.113 do not know how to read the pending claimed payments @@ -17296,7 +17272,7 @@ impl< write_tlv_fields!(writer, { (1, pending_outbound_payments_no_retry, required), - (2, pending_intercepted_htlcs, option), + // 2 was previously used for the pending_intercepted_htlcs map. (3, pending_outbound_payments, required), (4, pending_claiming_payments, option), (5, self.our_network_pubkey, required), @@ -17307,7 +17283,7 @@ impl< (10, legacy_in_flight_monitor_updates, option), (11, self.probing_cookie_secret, required), (13, htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs_opt, option), + // 14 was previously used for the decode_update_add_htlcs map. (15, self.inbound_payment_id_secret, required), (17, in_flight_monitor_updates, option), (19, peer_storage_dir, optional_vec), @@ -17387,12 +17363,6 @@ pub(super) struct ChannelManagerData { in_flight_monitor_updates: HashMap<(PublicKey, ChannelId), Vec>, peer_storage_dir: Vec<(PublicKey, Vec)>, async_receive_offer_cache: AsyncReceiveOfferCache, - // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of - // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from - // `Channel{Monitor}` data. - forward_htlcs_legacy: HashMap>, - pending_intercepted_htlcs_legacy: HashMap, - decode_update_add_htlcs_legacy: HashMap>, // The `ChannelManager` version that was written. version: u8, } @@ -17440,26 +17410,24 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> channels.push(channel); } - let forward_htlcs_legacy: HashMap> = - if version < RECONSTRUCT_HTLCS_FROM_CHANS_VERSION { - let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut fwds = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); - for _ in 0..forward_htlcs_count { - let short_channel_id = Readable::read(reader)?; - let pending_forwards_count: u64 = Readable::read(reader)?; - let mut pending_forwards = Vec::with_capacity(cmp::min( - pending_forwards_count as usize, - MAX_ALLOC_SIZE / mem::size_of::(), - )); - for _ in 0..pending_forwards_count { - pending_forwards.push(Readable::read(reader)?); - } - fwds.insert(short_channel_id, pending_forwards); + if version < RECONSTRUCT_HTLCS_FROM_CHANS_VERSION { + let forward_htlcs_count: u64 = Readable::read(reader)?; + // This map is written if version = 1 (LDK versions 0.4-) only. + let mut _forward_htlcs_legacy: HashMap> = + hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + for _ in 0..forward_htlcs_count { + let short_channel_id = Readable::read(reader)?; + let pending_forwards_count: u64 = Readable::read(reader)?; + let mut pending_forwards = Vec::with_capacity(cmp::min( + pending_forwards_count as usize, + MAX_ALLOC_SIZE / mem::size_of::(), + )); + for _ in 0..pending_forwards_count { + pending_forwards.push(Readable::read(reader)?); } - fwds - } else { - new_hash_map() - }; + _forward_htlcs_legacy.insert(short_channel_id, pending_forwards); + } + } let claimable_htlcs_count: u64 = Readable::read(reader)?; let mut claimable_htlcs_list = @@ -17545,9 +17513,13 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> }; } - let mut pending_intercepted_htlcs_legacy: Option> = - None; - let mut decode_update_add_htlcs_legacy: Option>> = + // Read for compatibility with 0.4- but no longer used in 0.5+, instead these maps are + // reconstructed during runtime from decode_update_add_htlcs, via the next call to + // process_pending_htlc_forwards. + let mut _pending_intercepted_htlcs_legacy: Option< + HashMap, + > = None; + let mut _decode_update_add_htlcs_legacy: Option>> = None; // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = @@ -17575,7 +17547,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), - (2, pending_intercepted_htlcs_legacy, option), + (2, _pending_intercepted_htlcs_legacy, (legacy, HashMap, |_| None)), (3, pending_outbound_payments, option), (4, pending_claiming_payments, option), (5, received_network_pubkey, option), @@ -17586,7 +17558,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> (10, legacy_in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs_legacy, option), + (14, _decode_update_add_htlcs_legacy, (legacy, HashMap>, |_| None)), (15, inbound_payment_id_secret, option), (17, in_flight_monitor_updates, option), (19, peer_storage_dir, optional_vec), @@ -17719,13 +17691,10 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> best_block_height, best_block_hash, channels, - forward_htlcs_legacy, claimable_payments, peer_init_features, pending_events_read, highest_seen_timestamp, - pending_intercepted_htlcs_legacy: pending_intercepted_htlcs_legacy - .unwrap_or_else(new_hash_map), pending_outbound_payments, pending_claiming_payments: pending_claiming_payments.unwrap_or_else(new_hash_map), received_network_pubkey, @@ -17733,8 +17702,6 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> .unwrap_or_else(Vec::new), fake_scid_rand_bytes, probing_cookie_secret, - decode_update_add_htlcs_legacy: decode_update_add_htlcs_legacy - .unwrap_or_else(new_hash_map), inbound_payment_id_secret, in_flight_monitor_updates: in_flight_monitor_updates.unwrap_or_default(), peer_storage_dir: peer_storage_dir.unwrap_or_default(), @@ -18025,19 +17992,16 @@ impl< best_block_height, best_block_hash, channels, - mut forward_htlcs_legacy, claimable_payments, peer_init_features, mut pending_events_read, highest_seen_timestamp, - mut pending_intercepted_htlcs_legacy, pending_outbound_payments, pending_claiming_payments, received_network_pubkey, monitor_update_blocked_actions_per_peer, mut fake_scid_rand_bytes, mut probing_cookie_secret, - mut decode_update_add_htlcs_legacy, mut inbound_payment_id_secret, mut in_flight_monitor_updates, peer_storage_dir, @@ -18588,40 +18552,6 @@ impl< pending_background_events.push(new_event); } - // In LDK 0.2 and below, the `ChannelManager` would track all payments and HTLCs internally and - // persist that state, relying on it being up-to-date on restart. Newer versions are moving - // towards reducing this reliance on regular persistence of the `ChannelManager`, and instead - // reconstruct HTLC/payment state based on `Channel{Monitor}` data if - // `reconstruct_manager_from_monitors` is set below. Currently we set in tests randomly to - // ensure the legacy codepaths also have test coverage. - #[cfg(not(test))] - let reconstruct_manager_from_monitors = _version >= RECONSTRUCT_HTLCS_FROM_CHANS_VERSION; - #[cfg(test)] - let reconstruct_manager_from_monitors = - args.reconstruct_manager_from_monitors.unwrap_or_else(|| { - use core::hash::{BuildHasher, Hasher}; - - match std::env::var("LDK_TEST_REBUILD_MGR_FROM_MONITORS") { - Ok(val) => match val.as_str() { - "1" => true, - "0" => false, - _ => panic!( - "LDK_TEST_REBUILD_MGR_FROM_MONITORS must be 0 or 1, got: {}", - val - ), - }, - Err(_) => { - let rand_val = - std::collections::hash_map::RandomState::new().build_hasher().finish(); - if rand_val % 2 == 0 { - true - } else { - false - } - }, - } - }); - // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we // should ensure we try them again on the inbound edge. We put them here and do so after we // have a fully-constructed `ChannelManager` at the end. @@ -18651,6 +18581,7 @@ impl< } } }; + { // If we're tracking pending payments, ensure we haven't lost any by looking at the // ChannelMonitor data for any channels for which we do not have authorative state @@ -18670,36 +18601,31 @@ impl< let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); - if reconstruct_manager_from_monitors { - if let Some(chan) = peer_state.channel_by_id.get(channel_id) { - if let Some(funded_chan) = chan.as_funded() { - let scid_alias = funded_chan.context.outbound_scid_alias(); - let inbound_committed_update_adds = - funded_chan.inbound_committed_unresolved_htlcs(); - for (payment_hash, htlc) in inbound_committed_update_adds { - match htlc { - InboundUpdateAdd::WithOnion { update_add_htlc } => { - // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized - // `Channel` as part of removing the requirement to regularly persist the - // `ChannelManager`. - decode_update_add_htlcs - .entry(scid_alias) - .or_insert_with(Vec::new) - .push(update_add_htlc); - }, - InboundUpdateAdd::Forwarded { - hop_data, - outbound_amt_msat, - } => { - already_forwarded_htlcs - .entry((hop_data.channel_id, payment_hash)) - .or_insert_with(Vec::new) - .push((hop_data, outbound_amt_msat)); - }, - InboundUpdateAdd::Legacy => { - return Err(DecodeError::InvalidValue) - }, - } + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + let scid_alias = funded_chan.context.outbound_scid_alias(); + let inbound_committed_update_adds = + funded_chan.inbound_committed_unresolved_htlcs(); + for (payment_hash, htlc) in inbound_committed_update_adds { + match htlc { + InboundUpdateAdd::WithOnion { update_add_htlc } => { + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel` as part of removing the requirement to regularly persist the + // `ChannelManager`. + decode_update_add_htlcs + .entry(scid_alias) + .or_insert_with(Vec::new) + .push(update_add_htlc); + }, + InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat } => { + already_forwarded_htlcs + .entry((hop_data.channel_id, payment_hash)) + .or_insert_with(Vec::new) + .push((hop_data, outbound_amt_msat)); + }, + InboundUpdateAdd::Legacy => { + return Err(DecodeError::InvalidValue) + }, } } } @@ -18743,7 +18669,7 @@ impl< let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); - if reconstruct_manager_from_monitors && !is_channel_closed { + if !is_channel_closed { if let Some(chan) = peer_state.channel_by_id.get(channel_id) { if let Some(funded_chan) = chan.as_funded() { for (payment_hash, prev_hop) in funded_chan.outbound_htlc_forwards() @@ -18777,65 +18703,20 @@ impl< let htlc_id = SentHTLCId::from_source(&htlc_source); match htlc_source { HTLCSource::PreviousHopData(prev_hop_data) => { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint - && info.prev_htlc_id == prev_hop_data.htlc_id - }; - - // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed - // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from - // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, - // we'll double-forward. - if reconstruct_manager_from_monitors { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - &prev_hop_data, - "HTLC already forwarded to the outbound edge", - &&logger, - ); - prune_forwarded_htlc( - &mut already_forwarded_htlcs, - &prev_hop_data, - &htlc.payment_hash, - ); - } - - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs_legacy`, - // `pending_intercepted_htlcs_legacy`, or - // `decode_update_add_htlcs_legacy`, we were apparently not persisted - // after the monitor was when forwarding the payment. + // We always add all inbound committed HTLCs to `decode_update_add_htlcs` in the + // above loop, but we need to prune from those added HTLCs if they were already + // forwarded to the outbound edge. Otherwise, we'll double-forward. dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs_legacy, + &mut decode_update_add_htlcs, &prev_hop_data, - "HTLC was forwarded to the closed channel", + "HTLC already forwarded to the outbound edge", &&logger, ); - forward_htlcs_legacy.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - false - } else { true } - } else { true } - }); - !forwards.is_empty() - }); - pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - pending_events_read.retain(|(event, _)| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false - } else { true } - }); + prune_forwarded_htlc( + &mut already_forwarded_htlcs, + &prev_hop_data, + &htlc.payment_hash, + ); }, HTLCSource::OutboundRoute { payment_id, @@ -19242,55 +19123,41 @@ impl< } } - if reconstruct_manager_from_monitors { - // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. - // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. - for (src, payment_hash, _, _, _, _) in failed_htlcs.iter() { - if let HTLCSource::PreviousHopData(prev_hop_data) = src { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was failed backwards during manager read", - &args.logger, - ); - prune_forwarded_htlc(&mut already_forwarded_htlcs, prev_hop_data, payment_hash); - } + // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. + // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. + for (src, payment_hash, _, _, _, _) in failed_htlcs.iter() { + if let HTLCSource::PreviousHopData(prev_hop_data) = src { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was failed backwards during manager read", + &args.logger, + ); + prune_forwarded_htlc(&mut already_forwarded_htlcs, prev_hop_data, payment_hash); } + } - // See above comment on `failed_htlcs`. - for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { - for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was already decoded and marked as a claimable payment", - &args.logger, - ); - } + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, + ); } } - let (decode_update_add_htlcs, forward_htlcs, pending_intercepted_htlcs) = - if reconstruct_manager_from_monitors { - (decode_update_add_htlcs, new_hash_map(), new_hash_map()) - } else { - ( - decode_update_add_htlcs_legacy, - forward_htlcs_legacy, - pending_intercepted_htlcs_legacy, - ) - }; - - // If we have a pending intercept HTLC present but no corresponding event, add that now rather - // than relying on the user having persisted the event prior to shutdown. - for (id, fwd) in pending_intercepted_htlcs.iter() { - if !pending_events_read.iter().any( - |(ev, _)| matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), - ) { - match create_htlc_intercepted_event(*id, fwd) { - Ok(ev) => pending_events_read.push_back((ev, None)), - Err(()) => debug_assert!(false), - } + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, + ); } } @@ -19346,9 +19213,9 @@ impl< inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs), + pending_intercepted_htlcs: Mutex::new(new_hash_map()), - forward_htlcs: Mutex::new(forward_htlcs), + forward_htlcs: Mutex::new(new_hash_map()), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, diff --git a/pending_changelog/upgrade-0.2-pending-htlc.txt b/pending_changelog/upgrade-0.2-pending-htlc.txt new file mode 100644 index 00000000000..6ca11755ee9 --- /dev/null +++ b/pending_changelog/upgrade-0.2-pending-htlc.txt @@ -0,0 +1,9 @@ +Backwards Compatibility +======================= + + * Upgrading directly from 0.2 to 0.5 with pending HTLC forwards is not supported. + If you have pending HTLC forwards when upgrading from 0.2, you must first upgrade + to 0.3, forward the HTLCs (completing at least the outbound commitment), then + upgrade to 0.5. Attempting to upgrade directly from 0.2 to 0.5 with pending HTLC + forwards will result in a `DecodeError::InvalidValue` when reading the + `ChannelManager`. From 0127fb77793bc9136c6f9c999de328f2883ce6cd Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 3 Feb 2026 17:08:15 -0500 Subject: [PATCH 15/23] Remove now-unused reconstruct_manager test arg See previous commit, but now in prod reconstruct_manager_from_monitors will always be set, so there's no need to have an option to set it in tests. --- lightning/src/ln/channelmanager.rs | 11 -------- lightning/src/ln/functional_test_utils.rs | 32 +++-------------------- lightning/src/ln/reload_tests.rs | 8 ++---- 3 files changed, 6 insertions(+), 45 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9219b12984d..37ccbaae6a6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17818,15 +17818,6 @@ pub struct ChannelManagerReadArgs< /// /// This is not exported to bindings users because we have no HashMap bindings pub channel_monitors: HashMap>, - - /// Whether the `ChannelManager` should attempt to reconstruct its set of pending HTLCs from - /// `Channel{Monitor}` data rather than its own persisted maps, which is planned to become - /// the default behavior in upcoming versions. - /// - /// If `None`, whether we reconstruct or use the legacy maps will be decided randomly during - /// `ChannelManager::from_channel_manager_data`. - #[cfg(test)] - pub reconstruct_manager_from_monitors: Option, } impl< @@ -17864,8 +17855,6 @@ impl< channel_monitors: hash_map_from_iter( channel_monitors.drain(..).map(|monitor| (monitor.channel_id(), monitor)), ), - #[cfg(test)] - reconstruct_manager_from_monitors: None, } } } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e5603bfddc6..8508322b63a 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -911,8 +911,6 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { tx_broadcaster: &broadcaster, logger: &self.logger, channel_monitors, - #[cfg(test)] - reconstruct_manager_from_monitors: None, }, ) .unwrap(); @@ -1311,7 +1309,7 @@ fn check_claimed_htlcs_match_route<'a, 'b, 'c>( pub fn _reload_node<'a, 'b, 'c>( node: &'a Node<'a, 'b, 'c>, config: UserConfig, chanman_encoded: &[u8], - monitors_encoded: &[&[u8]], _reconstruct_manager_from_monitors: Option, + monitors_encoded: &[&[u8]], ) -> TestChannelManager<'b, 'c> { let mut monitors_read = Vec::with_capacity(monitors_encoded.len()); for encoded in monitors_encoded { @@ -1345,8 +1343,6 @@ pub fn _reload_node<'a, 'b, 'c>( tx_broadcaster: node.tx_broadcaster, logger: node.logger, channel_monitors, - #[cfg(test)] - reconstruct_manager_from_monitors: _reconstruct_manager_from_monitors, }, ) .unwrap() @@ -1368,7 +1364,7 @@ pub fn _reload_node<'a, 'b, 'c>( #[macro_export] macro_rules! _reload_node_inner { ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: - ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr + ident, $new_chain_monitor: ident, $new_channelmanager: ident ) => { let chanman_encoded = $chanman_encoded; @@ -1388,7 +1384,6 @@ macro_rules! _reload_node_inner { $new_config, &chanman_encoded, $monitors_encoded, - $reconstruct_pending_htlcs, ); $node.node = &$new_channelmanager; $node.onion_messenger.set_offers_handler(&$new_channelmanager); @@ -1408,8 +1403,7 @@ macro_rules! reload_node { $monitors_encoded, $persister, $new_chain_monitor, - $new_channelmanager, - None + $new_channelmanager ); }; // Reload the node with the new provided config @@ -1421,25 +1415,7 @@ macro_rules! reload_node { $monitors_encoded, $persister, $new_chain_monitor, - $new_channelmanager, - None - ); - }; - // Reload the node and have the `ChannelManager` use new codepaths that reconstruct its set of - // pending HTLCs from `Channel{Monitor}` data. - ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: - ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr - ) => { - let config = $node.node.get_current_config(); - $crate::_reload_node_inner!( - $node, - config, - $chanman_encoded, - $monitors_encoded, - $persister, - $new_chain_monitor, - $new_channelmanager, - $reconstruct_pending_htlcs + $new_channelmanager ); }; } diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index fa0c77bc9d4..1ee63fd1745 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -438,7 +438,6 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), - reconstruct_manager_from_monitors: None, }) { } else { panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return"); }; @@ -457,7 +456,6 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), - reconstruct_manager_from_monitors: None, }).unwrap(); nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); @@ -1948,8 +1946,7 @@ fn test_reload_node_with_preimage_in_monitor_claims_htlc() { &[&mon_0_1_serialized, &mon_1_2_serialized], persister, new_chain_monitor, - nodes_1_deserialized, - Some(true) + nodes_1_deserialized ); // When the claim is reconstructed during reload, a PaymentForwarded event is generated. @@ -2057,8 +2054,7 @@ fn test_reload_node_without_preimage_fails_htlc() { &[&mon_0_1_serialized, &mon_1_2_serialized], persister, new_chain_monitor, - nodes_1_deserialized, - Some(true) + nodes_1_deserialized ); // After reload, nodes[1] should have generated an HTLCHandlingFailed event. From 71fd3991022e88dd1ca83eae71191f099b63ac4b Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 13:29:12 -0500 Subject: [PATCH 16/23] Pass logger into FundedChannel::read Used in the next commit when we log on some read errors. --- lightning/src/ln/channel.rs | 16 +++++++++------- lightning/src/ln/channelmanager.rs | 1 + 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index e783f483063..802babc0948 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -15105,13 +15105,13 @@ impl Writeable for FundedChannel { } } -impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> - ReadableArgs<(&'a ES, &'b SP, &'c ChannelTypeFeatures)> for FundedChannel +impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> + ReadableArgs<(&'a ES, &'b SP, &'c L, &'d ChannelTypeFeatures)> for FundedChannel { fn read( - reader: &mut R, args: (&'a ES, &'b SP, &'c ChannelTypeFeatures), + reader: &mut R, args: (&'a ES, &'b SP, &'c L, &'d ChannelTypeFeatures), ) -> Result { - let (entropy_source, signer_provider, our_supported_features) = args; + let (entropy_source, signer_provider, logger, our_supported_features) = args; let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); if ver <= 2 { return Err(DecodeError::UnknownVersion); @@ -16859,9 +16859,11 @@ mod tests { let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64); let features = channelmanager::provided_channel_type_features(&config); - let decoded_chan = - FundedChannel::read(&mut reader, (&&keys_provider, &&keys_provider, &features)) - .unwrap(); + let decoded_chan = FundedChannel::read( + &mut reader, + (&&keys_provider, &&keys_provider, &&logger, &features), + ) + .unwrap(); assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs); assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 37ccbaae6a6..e2e29e69825 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17404,6 +17404,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> ( args.entropy_source, args.signer_provider, + args.logger, &provided_channel_type_features(&args.config), ), )?; From 27c450e1eb5d218f261fab417a76397b5dceb5df Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 13:47:27 -0500 Subject: [PATCH 17/23] Remove deprecated InboundHTLCResolution::Resolved variant In 0.3, we added a new field to Channel as part of adding support for reconstructing the ChannelManager's pending forward HTLC set from Channel{Monitor} data, moving towards removing the requirement of regularly persisting the ChannelManager. This new field cannot be set for HTLCs received pre-0.1 that are using this legacy ::Resolved variant. In this version, we fully rely on the new Channel field being set and cease handling legacy HTLCs entirely, and thus must fully deprecate the ::Resolved variant and require all inbound HTLCs be in state InboundHTLCResolution::Pending, which we do here. Further cleanups are coming in upcoming commits. --- lightning/src/ln/channel.rs | 96 +++++++++---------------------------- 1 file changed, 22 insertions(+), 74 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 802babc0948..9884ac6e373 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,10 +50,9 @@ use crate::ln::channel_state::{ OutboundHTLCDetails, OutboundHTLCStateDetails, }; use crate::ln::channelmanager::{ - self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCPreviousHopData, - HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, - RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, - MIN_CLTV_EXPIRY_DELTA, + self, ChannelReadyOrder, FundingConfirmedMessage, HTLCPreviousHopData, HTLCSource, + OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, RAACommitmentOrder, SentHTLCId, + BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::interactivetxs::{ @@ -149,12 +148,6 @@ enum InboundHTLCRemovalReason { #[cfg_attr(test, derive(Debug))] #[derive(Clone)] enum InboundHTLCResolution { - /// Resolved implies the action we must take with the inbound HTLC has already been determined, - /// i.e., we already know whether it must be failed back or forwarded. - // - // TODO: Once this variant is removed, we should also clean up - // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable. - Resolved { pending_htlc_status: PendingHTLCStatus }, /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both /// nodes for the state update in which it was proposed. @@ -162,9 +155,7 @@ enum InboundHTLCResolution { } impl_writeable_tlv_based_enum!(InboundHTLCResolution, - (0, Resolved) => { - (0, pending_htlc_status, required), - }, + // 0 used to be used for InboundHTLCResolution::Resolved in 0.4 and below. (2, Pending) => { (0, update_add_htlc, required), }, @@ -301,7 +292,6 @@ impl InboundHTLCState { InboundHTLCResolution::Pending { update_add_htlc } => { update_add_htlc.hold_htlc.is_some() }, - InboundHTLCResolution::Resolved { .. } => false, }, InboundHTLCState::Committed { .. } | InboundHTLCState::LocalRemoved(_) => false, } @@ -8870,12 +8860,9 @@ where } log_trace!(logger, "Updating HTLCs on receipt of RAA..."); - let mut to_forward_infos = Vec::new(); let mut pending_update_adds = Vec::new(); let mut revoked_htlcs = Vec::new(); let mut finalized_claimed_htlcs = Vec::new(); - let mut update_fail_htlcs = Vec::new(); - let mut update_fail_malformed_htlcs = Vec::new(); let mut static_invoices = Vec::new(); let mut require_commitment = false; let mut value_to_self_msat_diff: i64 = 0; @@ -8951,42 +8938,6 @@ where state { match resolution { - InboundHTLCResolution::Resolved { pending_htlc_status } => { - match pending_htlc_status { - PendingHTLCStatus::Fail(fail_msg) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash); - require_commitment = true; - match fail_msg { - HTLCFailureMsg::Relay(msg) => { - htlc.state = InboundHTLCState::LocalRemoved( - InboundHTLCRemovalReason::FailRelay( - msg.clone().into(), - ), - ); - update_fail_htlcs.push(msg) - }, - HTLCFailureMsg::Malformed(msg) => { - htlc.state = InboundHTLCState::LocalRemoved( - InboundHTLCRemovalReason::FailMalformed { - sha256_of_onion: msg.sha256_of_onion, - failure_code: msg.failure_code, - }, - ); - update_fail_malformed_htlcs.push(msg) - }, - } - }, - PendingHTLCStatus::Forward(forward_info) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash); - to_forward_infos.push((forward_info, htlc.htlc_id)); - htlc.state = InboundHTLCState::Committed { - // HTLCs will only be in state `InboundHTLCResolution::Resolved` if they were - // received on LDK 0.1-. - update_add_htlc: InboundUpdateAdd::Legacy, - }; - }, - } - }, InboundHTLCResolution::Pending { update_add_htlc } => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); pending_update_adds.push(update_add_htlc.clone()); @@ -9112,7 +9063,7 @@ where false, true, false, - to_forward_infos, + Vec::new(), revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9139,9 +9090,11 @@ where release_state_str ); if self.context.channel_state.can_generate_new_commitment() { - log_debug!(logger, "Responding with a commitment update with {} HTLCs failed for channel {}", - update_fail_htlcs.len() + update_fail_malformed_htlcs.len(), - &self.context.channel_id); + log_debug!( + logger, + "Responding with a commitment update for channel {}", + &self.context.channel_id + ); } else { let reason = if self.context.channel_state.is_local_stfu_sent() { "exits quiescence" @@ -9157,7 +9110,7 @@ where false, true, false, - to_forward_infos, + Vec::new(), revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9171,7 +9124,7 @@ where false, false, false, - to_forward_infos, + Vec::new(), revoked_htlcs, finalized_claimed_htlcs, logger, @@ -15146,8 +15099,9 @@ impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> let counterparty_next_commitment_transaction_number = Readable::read(reader)?; let value_to_self_msat = Readable::read(reader)?; - let pending_inbound_htlc_count: u64 = Readable::read(reader)?; + let logger = WithContext::from(logger, None, Some(channel_id), None); + let pending_inbound_htlc_count: u64 = Readable::read(reader)?; let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min( pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize, @@ -15160,23 +15114,17 @@ impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> payment_hash: Readable::read(reader)?, state: match ::read(reader)? { 1 => { - let resolution = if ver <= 3 { - InboundHTLCResolution::Resolved { - pending_htlc_status: Readable::read(reader)?, - } - } else { - Readable::read(reader)? - }; + let resolution = Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + e + })?; InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) }, 2 => { - let resolution = if ver <= 3 { - InboundHTLCResolution::Resolved { - pending_htlc_status: Readable::read(reader)?, - } - } else { - Readable::read(reader)? - }; + let resolution = Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + e + })?; InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) }, 3 => InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }, From eebbbcaacd1af7157df5a8f418ff88b9f93d5297 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 16:17:12 -0500 Subject: [PATCH 18/23] Simplify InboundHTLCStates' htlc resolution type Previously, several variants of InboundHTLCState contained an InboundHTLCResolution enum. Now that the resolution enum only has one variant, we can get rid of it and simplify the parent InboundHTLCState type. --- lightning/src/ln/channel.rs | 137 ++++++++++++++++++++---------------- 1 file changed, 75 insertions(+), 62 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 9884ac6e373..f36fc223b6c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -144,28 +144,11 @@ enum InboundHTLCRemovalReason { Fulfill { preimage: PaymentPreimage, attribution_data: Option }, } -/// Represents the resolution status of an inbound HTLC. -#[cfg_attr(test, derive(Debug))] -#[derive(Clone)] -enum InboundHTLCResolution { - /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed - /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both - /// nodes for the state update in which it was proposed. - Pending { update_add_htlc: msgs::UpdateAddHTLC }, -} - -impl_writeable_tlv_based_enum!(InboundHTLCResolution, - // 0 used to be used for InboundHTLCResolution::Resolved in 0.4 and below. - (2, Pending) => { - (0, update_add_htlc, required), - }, -); - #[cfg_attr(test, derive(Debug))] enum InboundHTLCState { /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an /// update_add_htlc message for this HTLC. - RemoteAnnounced(InboundHTLCResolution), + RemoteAnnounced(msgs::UpdateAddHTLC), /// Included in a received commitment_signed message (implying we've /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous /// state (see the example below). We have not yet included this HTLC in a @@ -195,13 +178,13 @@ enum InboundHTLCState { /// Implies AwaitingRemoteRevoke. /// /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md - AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution), + AwaitingRemoteRevokeToAnnounce(msgs::UpdateAddHTLC), /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it). /// We have also included this HTLC in our latest commitment_signed and are now just waiting /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the /// channel (before it can then get forwarded and/or removed). /// Implies AwaitingRemoteRevoke. - AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution), + AwaitingAnnouncedRemoteRevoke(msgs::UpdateAddHTLC), /// An HTLC irrevocably committed in the latest commitment transaction, ready to be forwarded or /// removed. Committed { @@ -286,12 +269,10 @@ impl InboundHTLCState { /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc fn should_hold_htlc(&self) -> bool { match self { - InboundHTLCState::RemoteAnnounced(res) - | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(res) - | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(res) => match res { - InboundHTLCResolution::Pending { update_add_htlc } => { - update_add_htlc.hold_htlc.is_some() - }, + InboundHTLCState::RemoteAnnounced(update_add_htlc) + | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add_htlc) + | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) => { + update_add_htlc.hold_htlc.is_some() }, InboundHTLCState::Committed { .. } | InboundHTLCState::LocalRemoved(_) => false, } @@ -7867,9 +7848,7 @@ where amount_msat: msg.amount_msat, payment_hash: msg.payment_hash, cltv_expiry: msg.cltv_expiry, - state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Pending { - update_add_htlc: msg.clone(), - }), + state: InboundHTLCState::RemoteAnnounced(msg.clone()) }); Ok(()) } @@ -8415,11 +8394,10 @@ where } for htlc in self.context.pending_inbound_htlcs.iter_mut() { - if let &InboundHTLCState::RemoteAnnounced(ref htlc_resolution) = &htlc.state { + if let &InboundHTLCState::RemoteAnnounced(ref update_add) = &htlc.state { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", &htlc.payment_hash, &self.context.channel_id); - htlc.state = - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution.clone()); + htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add.clone()); need_commitment = true; } } @@ -8930,24 +8908,22 @@ where InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }; mem::swap(&mut state, &mut htlc.state); - if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { + if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add) = state { log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash); - htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution); + htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add); require_commitment = true; - } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) = + } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) = state { - match resolution { - InboundHTLCResolution::Pending { update_add_htlc } => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); - pending_update_adds.push(update_add_htlc.clone()); - htlc.state = InboundHTLCState::Committed { - update_add_htlc: InboundUpdateAdd::WithOnion { - update_add_htlc, - }, - }; - }, - } + log_trace!( + logger, + " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", + &htlc.payment_hash + ); + pending_update_adds.push(update_add_htlc.clone()); + htlc.state = InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc }, + }; } } } @@ -12856,10 +12832,10 @@ where // is acceptable. for htlc in self.context.pending_inbound_htlcs.iter_mut() { let new_state = - if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = + if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref update_add) = &htlc.state { - Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone())) + Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add.clone())) } else { None }; @@ -14569,6 +14545,41 @@ impl Readable for AnnouncementSigsState { } } +/// Represents the resolution status of an inbound HTLC. Previously we had multiple variants here, +/// now we only use it for backwards compatibility when (de)serializing [`InboundHTLCState`]s. +enum WriteableLegacyHTLCResolution<'a> { + Pending { update_add_htlc: &'a msgs::UpdateAddHTLC }, +} + +// We can't use `impl_writeable_tlv_based_enum` due to the lifetime. +impl<'a> Writeable for WriteableLegacyHTLCResolution<'a> { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + match self { + Self::Pending { update_add_htlc } => { + 2u8.write(writer)?; + crate::_encode_varint_length_prefixed_tlv!(writer, { + (0, update_add_htlc, required) + }); + }, + } + + Ok(()) + } +} + +/// Represents the resolution status of an inbound HTLC. Previously we had multiple variants here, +/// now we only use it for backwards compatibility when (de)serializing [`InboundHTLCState`]s. +enum ReadableLegacyHTLCResolution { + Pending { update_add_htlc: msgs::UpdateAddHTLC }, +} + +impl_writeable_tlv_based_enum!(ReadableLegacyHTLCResolution, + // 0 used to be used for the ::Resolved variant in 0.2 and below. + (2, Pending) => { + (0, update_add_htlc, required), + }, +); + impl Writeable for FundedChannel { fn write(&self, writer: &mut W) -> Result<(), io::Error> { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been @@ -14653,13 +14664,13 @@ impl Writeable for FundedChannel { htlc.payment_hash.write(writer)?; match &htlc.state { &InboundHTLCState::RemoteAnnounced(_) => unreachable!(), - &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => { + &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref update_add_htlc) => { 1u8.write(writer)?; - htlc_resolution.write(writer)?; + WriteableLegacyHTLCResolution::Pending { update_add_htlc }.write(writer)?; }, - &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { + &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref update_add_htlc) => { 2u8.write(writer)?; - htlc_resolution.write(writer)?; + WriteableLegacyHTLCResolution::Pending { update_add_htlc }.write(writer)?; }, &InboundHTLCState::Committed { ref update_add_htlc } => { 3u8.write(writer)?; @@ -15114,18 +15125,20 @@ impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> payment_hash: Readable::read(reader)?, state: match ::read(reader)? { 1 => { - let resolution = Readable::read(reader).map_err(|e| { - log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); - e - })?; - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) + let ReadableLegacyHTLCResolution::Pending { update_add_htlc } = + Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + e + })?; + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add_htlc) }, 2 => { - let resolution = Readable::read(reader).map_err(|e| { - log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); - e - })?; - InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) + let ReadableLegacyHTLCResolution::Pending { update_add_htlc } = + Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + e + })?; + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) }, 3 => InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }, 4 => { From b04661280d171b3a16ccf2ae0232f9029f50fe6b Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 28 Jan 2026 16:47:26 -0500 Subject: [PATCH 19/23] Remove now-unused InboundUpdateAdd::Legacy This version of LDK no longer supports HTLCs created before 0.3, which allows us to remove this variant that will only be present if an HTLC was received on a pre-0.3 LDK version. See the past few commits. --- lightning/src/ln/channel.rs | 41 ++++++++++++++++++++++-------- lightning/src/ln/channelmanager.rs | 3 --- 2 files changed, 31 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index f36fc223b6c..542d956b0b1 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -301,16 +301,13 @@ pub(super) enum InboundUpdateAdd { /// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded outbound_amt_msat: u64, }, - /// This HTLC was received pre-LDK 0.3, before we started persisting the onion for inbound - /// committed HTLCs. - Legacy, } impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, (0, WithOnion) => { (0, update_add_htlc, required), }, - (2, Legacy) => {}, + // 2 was previously used for the ::Legacy variant, used for HTLCs received on LDK 0.1-. (4, Forwarded) => { (0, hop_data, required), (2, outbound_amt_msat, required), @@ -8905,7 +8902,10 @@ where }; if swap { let mut state = - InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }; + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed { + sha256_of_onion: [0; 32], + failure_code: 0, + }); mem::swap(&mut state, &mut htlc.state); if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add) = state { @@ -15069,6 +15069,23 @@ impl Writeable for FundedChannel { } } +fn dummy_prev_hop_data() -> HTLCPreviousHopData { + let txid_hash = Sha256d::from_bytes_ref(&[0; 32]); + HTLCPreviousHopData { + prev_outbound_scid_alias: 0, + user_channel_id: None, + htlc_id: 0, + incoming_packet_shared_secret: [0; 32], + phantom_shared_secret: None, + trampoline_shared_secret: None, + blinded_failure: None, + channel_id: ChannelId([0; 32]), + outpoint: OutPoint { txid: Txid::from_raw_hash(*txid_hash), index: 0 }, + counterparty_node_id: None, + cltv_expiry: None, + } +} + impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> ReadableArgs<(&'a ES, &'b SP, &'c L, &'d ChannelTypeFeatures)> for FundedChannel { @@ -15140,7 +15157,10 @@ impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> })?; InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) }, - 3 => InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }, + 3 => InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Forwarded { + hop_data: dummy_prev_hop_data(), + outbound_amt_msat: 0, + }}, 4 => { let reason = match ::read(reader)? { 0 => InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { @@ -15998,9 +16018,10 @@ mod tests { use crate::chain::BestBlock; use crate::ln::chan_utils::{self, commit_tx_fee_sat, ChannelTransactionParameters}; use crate::ln::channel::{ - AwaitingChannelReadyFlags, ChannelState, FundedChannel, HTLCCandidate, HTLCInitiator, - HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, InboundUpdateAdd, - InboundV1Channel, OutboundHTLCOutput, OutboundHTLCState, OutboundV1Channel, + dummy_prev_hop_data, AwaitingChannelReadyFlags, ChannelState, FundedChannel, HTLCCandidate, + HTLCInitiator, HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, + InboundUpdateAdd, InboundV1Channel, OutboundHTLCOutput, OutboundHTLCState, + OutboundV1Channel, }; use crate::ln::channel::{ MAX_FUNDING_SATOSHIS_NO_WUMBO, MIN_THEIR_CHAN_RESERVE_SATOSHIS, @@ -16044,7 +16065,7 @@ mod tests { use std::cmp; fn dummy_inbound_update_add() -> InboundUpdateAdd { - InboundUpdateAdd::Legacy + InboundUpdateAdd::Forwarded { hop_data: dummy_prev_hop_data(), outbound_amt_msat: 0 } } #[test] diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e2e29e69825..441a8a7e2e7 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18613,9 +18613,6 @@ impl< .or_insert_with(Vec::new) .push((hop_data, outbound_amt_msat)); }, - InboundUpdateAdd::Legacy => { - return Err(DecodeError::InvalidValue) - }, } } } From 87a33b8c20c418c9f38a595ee8ef8ec2f29e363a Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 15:19:33 -0500 Subject: [PATCH 20/23] Remove now-unused param from monitor_updating_paused In the previous commit, we removed the InboundHTLCResolution::Resolved enum variant, which caused us to never provide any HTLCs in this now-removed parameter. --- lightning/src/ln/channel.rs | 79 ++++--------------------------------- 1 file changed, 7 insertions(+), 72 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 542d956b0b1..10b638d54dd 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7585,7 +7585,6 @@ where false, Vec::new(), Vec::new(), - Vec::new(), logger, ); UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat } @@ -8076,15 +8075,7 @@ where &self.context.channel_id() ); - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); self.context .interactive_tx_signing_session .as_mut() @@ -8188,15 +8179,7 @@ where .as_mut() .expect("Signing session must exist for negotiated pending splice") .received_commitment_signed(); - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); self.context.monitor_pending_tx_signatures = true; Ok(self.push_ret_blockable_mon_update(monitor_update)) @@ -8496,7 +8479,6 @@ where false, Vec::new(), Vec::new(), - Vec::new(), logger, ); return Ok(self.push_ret_blockable_mon_update(monitor_update)); @@ -8696,15 +8678,7 @@ where if update_fee.is_some() { "a fee update, " } else { "" }, update_add_count, update_fulfill_count, update_fail_count); - self.monitor_updating_paused( - false, - true, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), logger); (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) } else { (None, Vec::new()) @@ -9039,7 +9013,6 @@ where false, true, false, - Vec::new(), revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9086,7 +9059,6 @@ where false, true, false, - Vec::new(), revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9100,7 +9072,6 @@ where false, false, false, - Vec::new(), revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9405,7 +9376,6 @@ where /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress fn monitor_updating_paused( &mut self, resend_raa: bool, resend_commitment: bool, resend_channel_ready: bool, - pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pending_finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, logger: &L, ) { @@ -9414,7 +9384,6 @@ where self.context.monitor_pending_revoke_and_ack |= resend_raa; self.context.monitor_pending_commitment_signed |= resend_commitment; self.context.monitor_pending_channel_ready |= resend_channel_ready; - self.context.monitor_pending_forwards.extend(pending_forwards); self.context.monitor_pending_failures.extend(pending_fails); self.context.monitor_pending_finalized_fulfills.extend(pending_finalized_claimed_htlcs); self.context.channel_state.set_monitor_update_in_progress(); @@ -10630,15 +10599,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -11379,15 +11340,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); let monitor_update = self.push_ret_blockable_mon_update(monitor_update); let announcement_sigs = @@ -13056,15 +13009,7 @@ where let can_add_htlc = send_res.map_err(|(_, msg)| ChannelError::Ignore(msg))?; if can_add_htlc { let monitor_update = self.build_commitment_no_status_check(logger); - self.monitor_updating_paused( - false, - true, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), logger); Ok(self.push_ret_blockable_mon_update(monitor_update)) } else { Ok(None) @@ -13184,15 +13129,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - &&logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), &&logger); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -13801,7 +13738,6 @@ impl OutboundV1Channel { need_channel_ready, Vec::new(), Vec::new(), - Vec::new(), logger, ); Ok((channel, channel_monitor)) @@ -14100,7 +14036,6 @@ impl InboundV1Channel { need_channel_ready, Vec::new(), Vec::new(), - Vec::new(), logger, ); From 86601653ab476cd9ddf72850a4083923c79aeda3 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 16:31:30 -0500 Subject: [PATCH 21/23] Delete now-unused ChannelContext::monitor_pending_forwards We stopped adding any HTLCs to this vec a few commits ago when we removed support for HTLCs that were originally received on LDK 0.1. --- lightning/src/ln/channel.rs | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 10b638d54dd..7b28ecc4fe0 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1150,6 +1150,7 @@ pub(super) struct MonitorRestoreUpdates { /// A `CommitmentUpdate` to be sent to our channel peer. pub commitment_update: Option, pub commitment_order: RAACommitmentOrder, + // TODO: get rid of this pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, @@ -3059,7 +3060,6 @@ pub(super) struct ChannelContext { // responsible for some of the HTLCs here or not - we don't know whether the update in question // completed or not. We currently ignore these fields entirely when force-closing a channel, // but need to handle this somehow or we run the risk of losing HTLCs! - monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, monitor_pending_finalized_fulfills: Vec<(HTLCSource, Option)>, monitor_pending_update_adds: Vec, @@ -3763,7 +3763,6 @@ impl ChannelContext { monitor_pending_channel_ready: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), monitor_pending_finalized_fulfills: Vec::new(), monitor_pending_update_adds: Vec::new(), @@ -3997,7 +3996,6 @@ impl ChannelContext { monitor_pending_channel_ready: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), monitor_pending_finalized_fulfills: Vec::new(), monitor_pending_update_adds: Vec::new(), @@ -9470,8 +9468,6 @@ where let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger); - let mut accepted_htlcs = Vec::new(); - mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards); let mut failed_htlcs = Vec::new(); mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures); let mut finalized_claimed_htlcs = Vec::new(); @@ -9492,7 +9488,7 @@ where self.context.monitor_pending_commitment_signed = false; return MonitorRestoreUpdates { raa: None, commitment_update: None, commitment_order: RAACommitmentOrder::RevokeAndACKFirst, - accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, + accepted_htlcs: Vec::new(), failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, channel_ready_order, committed_outbound_htlc_sources }; @@ -9523,7 +9519,7 @@ where if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match commitment_order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { - raa, commitment_update, commitment_order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, + raa, commitment_update, commitment_order, accepted_htlcs: Vec::new(), failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures, channel_ready_order, committed_outbound_htlc_sources } @@ -14781,11 +14777,8 @@ impl Writeable for FundedChannel { self.context.monitor_pending_revoke_and_ack.write(writer)?; self.context.monitor_pending_commitment_signed.write(writer)?; - (self.context.monitor_pending_forwards.len() as u64).write(writer)?; - for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() { - pending_forward.write(writer)?; - htlc_id.write(writer)?; - } + // Previously used for monitor_pending_forwards prior to LDK 0.5. + 0u64.write(writer)?; (self.context.monitor_pending_failures.len() as u64).write(writer)?; for &(ref htlc_source, ref payment_hash, ref fail_reason) in @@ -15227,13 +15220,10 @@ impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> let monitor_pending_revoke_and_ack = Readable::read(reader)?; let monitor_pending_commitment_signed = Readable::read(reader)?; - let monitor_pending_forwards_count: u64 = Readable::read(reader)?; - let mut monitor_pending_forwards = Vec::with_capacity(cmp::min( - monitor_pending_forwards_count as usize, - DEFAULT_MAX_HTLCS as usize, - )); - for _ in 0..monitor_pending_forwards_count { - monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?)); + let monitor_pending_forwards_count_legacy: u64 = Readable::read(reader)?; + if monitor_pending_forwards_count_legacy != 0 { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + return Err(DecodeError::InvalidValue); } let monitor_pending_failures_count: u64 = Readable::read(reader)?; @@ -15837,7 +15827,6 @@ impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> monitor_pending_channel_ready, monitor_pending_revoke_and_ack, monitor_pending_commitment_signed, - monitor_pending_forwards, monitor_pending_failures, monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(), monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or_default(), From f79003e1c56a884809503d0bb46fbf3a75c01399 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 16:40:27 -0500 Subject: [PATCH 22/23] Delete now-unused MonitorRestoreUpdates::accepted_htlcs We stopped adding any HTLCs to this vec a few commits ago when we removed support for HTLCs that were originally received on LDK 0.1 --- lightning/src/ln/channel.rs | 14 +++++------ lightning/src/ln/channelmanager.rs | 37 ++++++++---------------------- 2 files changed, 15 insertions(+), 36 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 7b28ecc4fe0..9199ac83c2d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -51,8 +51,8 @@ use crate::ln::channel_state::{ }; use crate::ln::channelmanager::{ self, ChannelReadyOrder, FundingConfirmedMessage, HTLCPreviousHopData, HTLCSource, - OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, RAACommitmentOrder, SentHTLCId, - BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, + OpenChannelMessage, PaymentClaimDetails, RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, + MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::interactivetxs::{ @@ -1150,8 +1150,6 @@ pub(super) struct MonitorRestoreUpdates { /// A `CommitmentUpdate` to be sent to our channel peer. pub commitment_update: Option, pub commitment_order: RAACommitmentOrder, - // TODO: get rid of this - pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, /// Inbound update_adds that are now irrevocably committed to this channel and are ready for the @@ -9488,9 +9486,9 @@ where self.context.monitor_pending_commitment_signed = false; return MonitorRestoreUpdates { raa: None, commitment_update: None, commitment_order: RAACommitmentOrder::RevokeAndACKFirst, - accepted_htlcs: Vec::new(), failed_htlcs, finalized_claimed_htlcs, pending_update_adds, - funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, - channel_ready_order, committed_outbound_htlc_sources + failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, + channel_ready, announcement_sigs, tx_signatures: None, channel_ready_order, + committed_outbound_htlc_sources }; } @@ -9519,7 +9517,7 @@ where if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match commitment_order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { - raa, commitment_update, commitment_order, accepted_htlcs: Vec::new(), failed_htlcs, finalized_claimed_htlcs, + raa, commitment_update, commitment_order, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures, channel_ready_order, committed_outbound_htlc_sources } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 441a8a7e2e7..700c05f8717 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1404,7 +1404,6 @@ enum PostMonitorUpdateChanResume { counterparty_node_id: PublicKey, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, - htlc_forwards: Option, decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, @@ -9583,7 +9582,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ &self, channel_id: ChannelId, counterparty_node_id: PublicKey, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, - htlc_forwards: Option, decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, @@ -9644,9 +9642,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.handle_monitor_update_completion_actions(update_actions); - if let Some(forwards) = htlc_forwards { - self.forward_htlcs(&mut [forwards][..]); - } if let Some(decode) = decode_update_add_htlcs { self.push_decode_update_add_htlcs(decode); } @@ -10102,13 +10097,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ None }; - let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( + let decode_update_add_htlcs = self.handle_channel_resumption( pending_msg_events, chan, updates.raa, updates.commitment_update, updates.commitment_order, - updates.accepted_htlcs, updates.pending_update_adds, updates.funding_broadcastable, updates.channel_ready, @@ -10129,7 +10123,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ counterparty_node_id, unbroadcasted_batch_funding_txid, update_actions, - htlc_forwards, decode_update_add_htlcs, finalized_claimed_htlcs: updates.finalized_claimed_htlcs, failed_htlcs: updates.failed_htlcs, @@ -10220,7 +10213,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ counterparty_node_id, unbroadcasted_batch_funding_txid, update_actions, - htlc_forwards, decode_update_add_htlcs, finalized_claimed_htlcs, failed_htlcs, @@ -10231,7 +10223,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ counterparty_node_id, unbroadcasted_batch_funding_txid, update_actions, - htlc_forwards, decode_update_add_htlcs, finalized_claimed_htlcs, failed_htlcs, @@ -10247,17 +10238,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, channel: &mut FundedChannel, raa: Option, commitment_update: Option, commitment_order: RAACommitmentOrder, - pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec, - funding_broadcastable: Option, + pending_update_adds: Vec, funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option, tx_signatures: Option, tx_abort: Option, channel_ready_order: ChannelReadyOrder, - ) -> (Option<(u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { + ) -> Option<(u64, Vec)> { let logger = WithChannelContext::from(&self.logger, &channel.context, None); - log_trace!(logger, "Handling channel resumption with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", + log_trace!(logger, "Handling channel resumption with {} RAA, {} commitment update, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, - pending_forwards.len(), pending_update_adds.len(), + pending_update_adds.len(), if funding_broadcastable.is_some() { "" } else { "not " }, if channel_ready.is_some() { "sending" } else { "without" }, if announcement_sigs.is_some() { "sending" } else { "without" }, @@ -10268,14 +10258,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let counterparty_node_id = channel.context.get_counterparty_node_id(); let outbound_scid_alias = channel.context.outbound_scid_alias(); - let mut htlc_forwards = None; - if !pending_forwards.is_empty() { - htlc_forwards = Some(( - outbound_scid_alias, channel.context.get_counterparty_node_id(), - channel.funding.get_funding_txo().unwrap(), channel.context.channel_id(), - channel.context.get_user_id(), pending_forwards - )); - } let mut decode_update_add_htlcs = None; if !pending_update_adds.is_empty() { decode_update_add_htlcs = Some((outbound_scid_alias, pending_update_adds)); @@ -10362,7 +10344,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, None => { debug_assert!(false, "Channel resumed without a funding txo, this should never happen!"); - return (htlc_forwards, decode_update_add_htlcs); + return decode_update_add_htlcs; } }; } else { @@ -10380,7 +10362,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ emit_initial_channel_ready_event!(pending_events, channel); } - (htlc_forwards, decode_update_add_htlcs) + decode_update_add_htlcs } #[rustfmt::skip] @@ -12485,12 +12467,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take(); - let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( + let decode_update_add_htlcs = self.handle_channel_resumption( &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.commitment_order, - Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs, + Vec::new(), None, responses.channel_ready, responses.announcement_sigs, responses.tx_signatures, responses.tx_abort, responses.channel_ready_order, ); - debug_assert!(htlc_forwards.is_none()); debug_assert!(decode_update_add_htlcs.is_none()); if let Some(upd) = channel_update { peer_state.pending_msg_events.push(upd); From 21709105dcdf2cd203b26ff098303d0959ae50b0 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 7 Jan 2026 16:50:37 -0500 Subject: [PATCH 23/23] Delete now-unused PendingHTLCStatus We stopped using this struct a few commits ago when we removed support for HTLCs that were originally received on LDK 0.1. --- lightning/src/ln/channelmanager.rs | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 700c05f8717..e253a860e44 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -188,23 +188,6 @@ use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use core::time::Duration; use core::{cmp, mem}; -// We hold various information about HTLC relay in the HTLC objects in Channel itself: -// -// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should -// forward the HTLC with information it will give back to us when it does so, or if it should Fail -// the HTLC with the relevant message for the Channel to handle giving to the remote peer. -// -// Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the -// Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo -// with it to track where it came from (in case of onwards-forward error), waiting a random delay -// before we forward it. -// -// We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a -// relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use -// to either fail-backwards or fulfill the HTLC backwards along the relevant path). -// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is -// our payment, which we can use to decode errors or inform the user that the payment was sent. - /// Information about where a received HTLC('s onion) has indicated the HTLC should go. #[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug #[cfg_attr(test, derive(Debug, PartialEq))] @@ -437,14 +420,6 @@ pub(super) enum HTLCFailureMsg { Malformed(msgs::UpdateFailMalformedHTLC), } -/// Stores whether we can't forward an HTLC or relevant forwarding info -#[cfg_attr(test, derive(Debug))] -#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug -pub(super) enum PendingHTLCStatus { - Forward(PendingHTLCInfo), - Fail(HTLCFailureMsg), -} - #[cfg_attr(test, derive(Clone, Debug, PartialEq))] pub(super) struct PendingAddHTLCInfo { pub(super) forward_info: PendingHTLCInfo, @@ -16759,11 +16734,6 @@ impl Readable for HTLCFailureMsg { } } -impl_writeable_tlv_based_enum_legacy!(PendingHTLCStatus, ; - (0, Forward), - (1, Fail), -); - impl_writeable_tlv_based_enum!(BlindedFailure, (0, FromIntroductionNode) => {}, (2, FromBlindedNode) => {},