diff --git a/lightning-types/src/features.rs b/lightning-types/src/features.rs index 148242e6fce..84538ad2617 100644 --- a/lightning-types/src/features.rs +++ b/lightning-types/src/features.rs @@ -82,6 +82,8 @@ //! (see [BOLT PR #1228](https://github.com/lightning/bolts/pull/1228) for more info). //! - `Splice` - Allows replacing the currently-locked funding transaction with a new one //! (see [BOLT PR #1160](https://github.com/lightning/bolts/pull/1160) for more information). +//! - `HtlcHold` - requires/supports holding HTLCs and forwarding on receipt of an onion message +//! (see [BOLT-2](https://github.com/lightning/bolts/pull/989/files) for more information). //! //! LDK knows about the following features, but does not support them: //! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be @@ -166,6 +168,10 @@ mod sealed { ZeroConf, // Byte 7 Trampoline | SimpleClose | Splice, + // Byte 8 - 130 + ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, + // Byte 131 + HtlcHold, ] ); define_context!( @@ -191,6 +197,10 @@ mod sealed { ,,,,,,,,,,,,,,,,,,,,,,,, // Byte 32 DnsResolver, + // Byte 33 - 130 + ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, + // Byte 131 + HtlcHold, ] ); define_context!(ChannelContext, []); @@ -700,6 +710,17 @@ mod sealed { supports_dns_resolution, requires_dns_resolution ); + define_feature!( + 1053, // The BOLTs PR uses feature bit 52/53, so add +1000 for the experimental bit + HtlcHold, + [InitContext, NodeContext], + "Feature flags for holding HTLCs and forwarding on receipt of an onion message", + set_htlc_hold_optional, + set_htlc_hold_required, + clear_htlc_hold, + supports_htlc_hold, + requires_htlc_hold + ); // Note: update the module-level docs when a new feature bit is added! diff --git a/lightning/src/blinded_path/message.rs b/lightning/src/blinded_path/message.rs index 7d721cd1fdc..e291c83b66c 100644 --- a/lightning/src/blinded_path/message.rs +++ b/lightning/src/blinded_path/message.rs @@ -19,7 +19,7 @@ use crate::blinded_path::{BlindedHop, BlindedPath, Direction, IntroductionNode, use crate::crypto::streams::ChaChaPolyReadAdapter; use crate::io; use crate::io::Cursor; -use crate::ln::channelmanager::PaymentId; +use crate::ln::channelmanager::{InterceptId, PaymentId}; use crate::ln::msgs::DecodeError; use crate::ln::onion_utils; use crate::offers::nonce::Nonce; @@ -556,7 +556,7 @@ pub enum AsyncPaymentsContext { }, /// Context contained within the reply [`BlindedMessagePath`] we put in outbound /// [`HeldHtlcAvailable`] messages, provided back to us in corresponding [`ReleaseHeldHtlc`] - /// messages. + /// messages if we are an always-online sender paying an async recipient. /// /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc @@ -577,6 +577,17 @@ pub enum AsyncPaymentsContext { /// able to trivially ask if we're online forever. path_absolute_expiry: core::time::Duration, }, + /// Context contained within the reply [`BlindedMessagePath`] put in outbound + /// [`HeldHtlcAvailable`] messages, provided back to the async sender's always-online counterparty + /// in corresponding [`ReleaseHeldHtlc`] messages. + /// + /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + ReleaseHeldHtlc { + /// An identifier for the HTLC that should be released by us as the sender's always-online + /// channel counterparty to the often-offline recipient. + intercept_id: InterceptId, + }, } impl_writeable_tlv_based_enum!(MessageContext, @@ -632,6 +643,9 @@ impl_writeable_tlv_based_enum!(AsyncPaymentsContext, (2, invoice_slot, required), (4, path_absolute_expiry, required), }, + (6, ReleaseHeldHtlc) => { + (0, intercept_id, required), + }, ); /// Contains a simple nonce for use in a blinded path's context. diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index a8e7af23984..25fa5e71e5d 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -1522,6 +1522,7 @@ fn update_add_msg( onion_routing_packet, skimmed_fee_msat: None, blinding_point, + hold_htlc: None, } } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 1065803fe76..15b04942a8b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -28,6 +28,7 @@ use bitcoin::{secp256k1, sighash, TxIn}; #[cfg(splicing)] use bitcoin::{FeeRate, Sequence}; +use crate::blinded_path::message::BlindedMessagePath; use crate::chain::chaininterface::{ fee_for_weight, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, }; @@ -283,6 +284,24 @@ impl InboundHTLCState { _ => None, } } + + /// Whether we need to hold onto this HTLC until receipt of a corresponding [`ReleaseHeldHtlc`] + /// onion message. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + fn should_hold_htlc(&self) -> bool { + match self { + InboundHTLCState::RemoteAnnounced(res) + | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(res) + | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(res) => match res { + InboundHTLCResolution::Pending { update_add_htlc } => { + update_add_htlc.hold_htlc.is_some() + }, + InboundHTLCResolution::Resolved { .. } => false, + }, + InboundHTLCState::Committed | InboundHTLCState::LocalRemoved(_) => false, + } + } } struct InboundHTLCOutput { @@ -1606,12 +1625,12 @@ where } #[rustfmt::skip] - pub fn signer_maybe_unblocked( - &mut self, chain_hash: ChainHash, logger: &L, - ) -> Option where L::Target: Logger { + pub fn signer_maybe_unblocked( + &mut self, chain_hash: ChainHash, logger: &L, path_for_release_htlc: CBP + ) -> Option where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), - ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger)), + ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger, path_for_release_htlc)), ChannelPhase::UnfundedOutboundV1(chan) => { let (open_channel, funding_created) = chan.signer_maybe_unblocked(chain_hash, logger); Some(SignerResumeUpdates { @@ -8712,13 +8731,14 @@ where /// successfully and we should restore normal operation. Returns messages which should be sent /// to the remote side. #[rustfmt::skip] - pub fn monitor_updating_restored( + pub fn monitor_updating_restored( &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash, - user_config: &UserConfig, best_block_height: u32 + user_config: &UserConfig, best_block_height: u32, path_for_release_htlc: CBP ) -> MonitorRestoreUpdates where L::Target: Logger, - NS::Target: NodeSigner + NS::Target: NodeSigner, + CBP: Fn(u64) -> BlindedMessagePath { assert!(self.context.channel_state.is_monitor_update_in_progress()); self.context.channel_state.clear_monitor_update_in_progress(); @@ -8787,7 +8807,7 @@ where } let mut raa = if self.context.monitor_pending_revoke_and_ack { - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } else { None }; let mut commitment_update = if self.context.monitor_pending_commitment_signed { self.get_last_commitment_update_for_send(logger).ok() @@ -8877,7 +8897,9 @@ where /// Indicates that the signer may have some signatures for us, so we should retry if we're /// blocked. #[rustfmt::skip] - pub fn signer_maybe_unblocked(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger { + pub fn signer_maybe_unblocked( + &mut self, logger: &L, path_for_release_htlc: CBP + ) -> SignerResumeUpdates where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { if !self.holder_commitment_point.can_advance() { log_trace!(logger, "Attempting to update holder per-commitment point..."); self.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger); @@ -8905,7 +8927,7 @@ where } else { None }; let mut revoke_and_ack = if self.context.signer_pending_revoke_and_ack { log_trace!(logger, "Attempting to generate pending revoke and ack..."); - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } else { None }; if self.context.resend_order == RAACommitmentOrder::CommitmentFirst @@ -8976,9 +8998,12 @@ where } } - fn get_last_revoke_and_ack(&mut self, logger: &L) -> Option + fn get_last_revoke_and_ack( + &mut self, path_for_release_htlc: CBP, logger: &L, + ) -> Option where L::Target: Logger, + CBP: Fn(u64) -> BlindedMessagePath, { debug_assert!( self.holder_commitment_point.next_transaction_number() <= INITIAL_COMMITMENT_NUMBER - 2 @@ -8991,6 +9016,14 @@ where .ok(); if let Some(per_commitment_secret) = per_commitment_secret { if self.holder_commitment_point.can_advance() { + let mut release_htlc_message_paths = Vec::new(); + for htlc in &self.context.pending_inbound_htlcs { + if htlc.state.should_hold_htlc() { + let path = path_for_release_htlc(htlc.htlc_id); + release_htlc_message_paths.push((htlc.htlc_id, path)); + } + } + self.context.signer_pending_revoke_and_ack = false; return Some(msgs::RevokeAndACK { channel_id: self.context.channel_id, @@ -8998,6 +9031,7 @@ where next_per_commitment_point: self.holder_commitment_point.next_point(), #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths, }); } } @@ -9045,6 +9079,7 @@ where onion_routing_packet: (**onion_packet).clone(), skimmed_fee_msat: htlc.skimmed_fee_msat, blinding_point: htlc.blinding_point, + hold_htlc: None, // Will be set by the async sender when support is added }); } } @@ -9144,13 +9179,15 @@ where /// May panic if some calls other than message-handling calls (which will all Err immediately) /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. #[rustfmt::skip] - pub fn channel_reestablish( + pub fn channel_reestablish( &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, - chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock + chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock, + path_for_release_htlc: CBP, ) -> Result where L::Target: Logger, - NS::Target: NodeSigner + NS::Target: NodeSigner, + CBP: Fn(u64) -> BlindedMessagePath { if !self.context.channel_state.is_peer_disconnected() { // While BOLT 2 doesn't indicate explicitly we should error this channel here, it @@ -9369,7 +9406,7 @@ where self.context.monitor_pending_revoke_and_ack = true; None } else { - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } } else { debug_assert!(false, "All values should have been handled in the four cases above"); @@ -16633,6 +16670,7 @@ mod tests { chain_hash, &config, 0, + |_| unreachable!() ); // Receive funding_signed, but the channel will be configured to hold sending channel_ready and @@ -16647,6 +16685,7 @@ mod tests { chain_hash, &config, 0, + |_| unreachable!() ); // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set, // as the funding transaction depends on all channels in the batch becoming ready. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index dfc10e84a2a..e5b501f512d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -229,6 +229,9 @@ pub enum PendingHTLCRouting { blinded: Option, /// The absolute CLTV of the inbound HTLC incoming_cltv_expiry: Option, + /// Whether this HTLC should be held by our node until we receive a corresponding + /// [`ReleaseHeldHtlc`] onion message. + hold_htlc: Option<()>, }, /// An HTLC which should be forwarded on to another Trampoline node. TrampolineForward { @@ -371,6 +374,15 @@ impl PendingHTLCRouting { Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), } } + + /// Whether this HTLC should be held by our node until we receive a corresponding + /// [`ReleaseHeldHtlc`] onion message. + pub(super) fn should_hold_htlc(&self) -> bool { + match self { + Self::Forward { hold_htlc: Some(()), .. } => true, + _ => false, + } + } } /// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it @@ -638,9 +650,40 @@ impl Readable for PaymentId { /// An identifier used to uniquely identify an intercepted HTLC to LDK. /// /// This is not exported to bindings users as we just use [u8; 32] directly -#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] +#[derive(Hash, Copy, Clone, PartialEq, Eq)] pub struct InterceptId(pub [u8; 32]); +impl InterceptId { + /// This intercept id corresponds to an HTLC that will be forwarded on + /// [`ChannelManager::forward_intercepted_htlc`]. + fn from_incoming_shared_secret(ss: &[u8; 32]) -> Self { + Self(Sha256::hash(ss).to_byte_array()) + } + + /// This intercept id corresponds to an HTLC that will be forwarded on receipt of a + /// [`ReleaseHeldHtlc`] onion message. + fn from_htlc_id_and_chan_id( + htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> Self { + let mut sha = Sha256::engine(); + sha.input(&htlc_id.to_be_bytes()); + sha.input(&channel_id.0); + sha.input(&counterparty_node_id.serialize()); + Self(Sha256::from_engine(sha).to_byte_array()) + } +} + +impl Borrow<[u8]> for InterceptId { + fn borrow(&self) -> &[u8] { + &self.0[..] + } +} +impl_fmt_traits! { + impl fmt_traits for InterceptId { + const LENGTH: usize = 32; + } +} + impl Writeable for InterceptId { fn write(&self, w: &mut W) -> Result<(), io::Error> { self.0.write(w) @@ -2506,8 +2549,8 @@ where // `total_consistency_lock` // | // |__`forward_htlcs` -// | | -// | |__`pending_intercepted_htlcs` +// | +// |__`pending_intercepted_htlcs` // | // |__`decode_update_add_htlcs` // | @@ -2598,8 +2641,14 @@ pub struct ChannelManager< pub(super) forward_htlcs: Mutex>>, #[cfg(not(test))] forward_htlcs: Mutex>>, - /// Storage for HTLCs that have been intercepted and bubbled up to the user. We hold them here - /// until the user tells us what we should do with them. + /// Storage for HTLCs that have been intercepted. + /// + /// These HTLCs fall into two categories: + /// 1. HTLCs that are bubbled up to the user and held until the invocation of + /// [`ChannelManager::forward_intercepted_htlc`] or [`ChannelManager::fail_intercepted_htlc`] + /// (or timeout) + /// 2. HTLCs that are being held on behalf of an often-offline sender until receipt of a + /// [`ReleaseHeldHtlc`] onion message from an often-offline recipient /// /// See `ChannelManager` struct-level documentation for lock order requirements. pending_intercepted_htlcs: Mutex>, @@ -3394,18 +3443,20 @@ macro_rules! emit_initial_channel_ready_event { /// set for this channel is empty! macro_rules! handle_monitor_update_completion { ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { + let channel_id = $chan.context.channel_id(); + let counterparty_node_id = $chan.context.get_counterparty_node_id(); #[cfg(debug_assertions)] { let in_flight_updates = - $peer_state.in_flight_monitor_updates.get(&$chan.context.channel_id()); + $peer_state.in_flight_monitor_updates.get(&channel_id); assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true)); assert_eq!($chan.blocked_monitor_updates_pending(), 0); } let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); let mut updates = $chan.monitor_updating_restored(&&logger, &$self.node_signer, $self.chain_hash, &*$self.config.read().unwrap(), - $self.best_block.read().unwrap().height); - let counterparty_node_id = $chan.context.get_counterparty_node_id(); + $self.best_block.read().unwrap().height, + |htlc_id| $self.path_for_release_held_htlc(htlc_id, &channel_id, &counterparty_node_id)); let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() { // We only send a channel_update in the case where we are just now sending a // channel_ready and the channel is in a usable state. We may re-send a @@ -3421,7 +3472,7 @@ macro_rules! handle_monitor_update_completion { } else { None }; let update_actions = $peer_state.monitor_update_blocked_actions - .remove(&$chan.context.channel_id()).unwrap_or(Vec::new()); + .remove(&channel_id).unwrap_or(Vec::new()); let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( &mut $peer_state.pending_msg_events, $chan, updates.raa, @@ -3433,7 +3484,6 @@ macro_rules! handle_monitor_update_completion { $peer_state.pending_msg_events.push(upd); } - let channel_id = $chan.context.channel_id(); let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding); core::mem::drop($peer_state_lock); core::mem::drop($per_peer_state_lock); @@ -5418,6 +5468,18 @@ where res } + /// If we are holding an HTLC on behalf of an often-offline sender, this method allows us to + /// create a path for the sender to use as the reply path when they send the recipient a + /// [`HeldHtlcAvailable`] onion message, so the recipient's [`ReleaseHeldHtlc`] response will be + /// received to our node. + fn path_for_release_held_htlc( + &self, htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> BlindedMessagePath { + let intercept_id = + InterceptId::from_htlc_id_and_chan_id(htlc_id, channel_id, counterparty_node_id); + self.flow.path_for_release_held_htlc(intercept_id, &*self.entropy_source) + } + /// Signals that no further attempts for the given payment should occur. Useful if you have a /// pending outbound payment with retries remaining, but wish to stop retrying the payment before /// retries are exhausted. @@ -6282,11 +6344,19 @@ where })?; let routing = match payment.forward_info.routing { - PendingHTLCRouting::Forward { onion_packet, blinded, incoming_cltv_expiry, .. } => { + PendingHTLCRouting::Forward { + onion_packet, + blinded, + incoming_cltv_expiry, + hold_htlc, + .. + } => { + debug_assert!(hold_htlc.is_none(), "Held intercept HTLCs should not be surfaced in an event until the recipient comes online"); PendingHTLCRouting::Forward { onion_packet, blinded, incoming_cltv_expiry, + hold_htlc, short_channel_id: next_hop_scid, } }, @@ -6420,6 +6490,32 @@ where }); let shared_secret = next_hop.shared_secret().secret_bytes(); + // Nodes shouldn't expect us to hold HTLCs for them if we don't advertise htlc_hold feature + // support. + // + // If we wanted to pretend to be a node that didn't understand the feature at all here, the + // correct behavior would've been to disconnect the sender when we first received the + // update_add message. However, this would make the `UserConfig::enable_htlc_hold` option + // unsafe -- if our node switched the config option from on to off just after the sender + // enqueued their update_add + CS, the sender would continue retransmitting those messages + // and we would keep disconnecting them until the HTLC timed out. + if update_add_htlc.hold_htlc.is_some() + && !BaseMessageHandler::provided_node_features(self).supports_htlc_hold() + { + let reason = LocalHTLCFailureReason::TemporaryNodeFailure; + let htlc_fail = self.htlc_failure_from_update_add_err( + &update_add_htlc, + &incoming_counterparty_node_id, + reason, + is_intro_node_blinded_forward, + &shared_secret, + ); + let failure_type = + get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, failure_type, reason.into())); + continue; + } + // Process the HTLC on the incoming channel. match self.do_funded_channel_callback( incoming_scid, @@ -10661,6 +10757,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); let payment_hash = forward_info.payment_hash; + let logger = WithContext::from( + &self.logger, + None, + Some(prev_channel_id), + Some(payment_hash), + ); let pending_add = PendingAddHTLCInfo { prev_short_channel_id, prev_counterparty_node_id, @@ -10670,77 +10772,99 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ prev_user_channel_id, forward_info, }; - match forward_htlcs.entry(scid) { - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); - }, - hash_map::Entry::Vacant(entry) => { - if !is_our_scid - && pending_add.forward_info.incoming_amt_msat.is_some() - && fake_scid::is_valid_intercept( - &self.fake_scid_rand_bytes, - scid, - &self.chain_hash, - ) { - let intercept_id = InterceptId( - Sha256::hash(&pending_add.forward_info.incoming_shared_secret) - .to_byte_array(), + let mut fail_intercepted_htlc = |pending_add: PendingAddHTLCInfo| { + let htlc_source = + HTLCSource::PreviousHopData(pending_add.htlc_previous_hop_data()); + let reason = HTLCFailReason::from_failure_code( + LocalHTLCFailureReason::UnknownNextPeer, + ); + let failure_type = HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: scid, + }; + failed_intercept_forwards.push(( + htlc_source, + payment_hash, + reason, + failure_type, + )); + }; + + // In the case that we have an HTLC that we're supposed to hold onto until the + // recipient comes online *and* the outbound scid is encoded as + // `fake_scid::is_valid_intercept`, we should first wait for the recipient to come + // online before generating an `HTLCIntercepted` event, since the event cannot be + // acted on until the recipient is online to cooperatively open the JIT channel. Once + // we receive the `ReleaseHeldHtlc` message from the recipient, we will circle back + // here and resume generating the event below. + if pending_add.forward_info.routing.should_hold_htlc() { + let intercept_id = InterceptId::from_htlc_id_and_chan_id( + prev_htlc_id, + &prev_channel_id, + &prev_counterparty_node_id, + ); + let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + match held_htlcs.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + log_trace!( + logger, + "Intercepted held HTLC with id {}, holding until the recipient is online", + intercept_id ); - let mut pending_intercepts = - self.pending_intercepted_htlcs.lock().unwrap(); - match pending_intercepts.entry(intercept_id) { - hash_map::Entry::Vacant(entry) => { - new_intercept_events.push_back(( - events::Event::HTLCIntercepted { - requested_next_hop_scid: scid, - payment_hash, - inbound_amount_msat: pending_add - .forward_info - .incoming_amt_msat - .unwrap(), - expected_outbound_amount_msat: pending_add - .forward_info - .outgoing_amt_msat, - intercept_id, - }, - None, - )); - entry.insert(pending_add); + entry.insert(pending_add); + }, + hash_map::Entry::Occupied(_) => { + debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); + fail_intercepted_htlc(pending_add); + }, + } + } else if !is_our_scid + && pending_add.forward_info.incoming_amt_msat.is_some() + && fake_scid::is_valid_intercept( + &self.fake_scid_rand_bytes, + scid, + &self.chain_hash, + ) { + let intercept_id = InterceptId::from_incoming_shared_secret( + &pending_add.forward_info.incoming_shared_secret, + ); + let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); + match pending_intercepts.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + new_intercept_events.push_back(( + events::Event::HTLCIntercepted { + requested_next_hop_scid: scid, + payment_hash, + inbound_amount_msat: pending_add + .forward_info + .incoming_amt_msat + .unwrap(), + expected_outbound_amount_msat: pending_add + .forward_info + .outgoing_amt_msat, + intercept_id, }, - hash_map::Entry::Occupied(_) => { - let logger = WithContext::from( - &self.logger, - None, - Some(prev_channel_id), - Some(payment_hash), - ); - log_info!( + None, + )); + entry.insert(pending_add); + }, + hash_map::Entry::Occupied(_) => { + log_info!( logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid ); - let htlc_source = HTLCSource::PreviousHopData( - pending_add.htlc_previous_hop_data(), - ); - let reason = HTLCFailReason::from_failure_code( - LocalHTLCFailureReason::UnknownNextPeer, - ); - let failure_type = - HTLCHandlingFailureType::InvalidForward { - requested_forward_scid: scid, - }; - failed_intercept_forwards.push(( - htlc_source, - payment_hash, - reason, - failure_type, - )); - }, - } - } else { + fail_intercepted_htlc(pending_add); + }, + } + } else { + match forward_htlcs.entry(scid) { + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); + }, + hash_map::Entry::Vacant(entry) => { entry.insert(vec![HTLCForwardInfo::AddHTLC(pending_add)]); - } - }, + }, + } } } } @@ -11054,6 +11178,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.chain_hash, &self.config.read().unwrap(), &*self.best_block.read().unwrap(), + |htlc_id| self.path_for_release_held_htlc(htlc_id, &msg.channel_id, counterparty_node_id) ); let responses = try_channel_entry!(self, peer_state, res, chan_entry); let mut channel_update = None; @@ -11529,9 +11654,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Returns whether we should remove this channel as it's just been closed. let unblock_chan = |chan: &mut Channel, pending_msg_events: &mut Vec| -> Option { + let channel_id = chan.context().channel_id(); let logger = WithChannelContext::from(&self.logger, &chan.context(), None); let node_id = chan.context().get_counterparty_node_id(); - if let Some(msgs) = chan.signer_maybe_unblocked(self.chain_hash, &&logger) { + if let Some(msgs) = chan.signer_maybe_unblocked( + self.chain_hash, &&logger, + |htlc_id| self.path_for_release_held_htlc(htlc_id, &channel_id, &node_id) + ) { if let Some(msg) = msgs.open_channel { pending_msg_events.push(MessageSendEvent::SendOpenChannel { node_id, @@ -11552,7 +11681,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } let cu_msg = msgs.commitment_update.map(|updates| MessageSendEvent::UpdateHTLCs { node_id, - channel_id: chan.context().channel_id(), + channel_id, updates, }); let raa_msg = msgs.revoke_and_ack.map(|msg| MessageSendEvent::SendRevokeAndACK { @@ -14652,18 +14781,64 @@ where } fn handle_release_held_htlc(&self, _message: ReleaseHeldHtlc, context: AsyncPaymentsContext) { - let payment_id = match context { - AsyncPaymentsContext::OutboundPayment { payment_id } => payment_id, - _ => return, - }; + match context { + AsyncPaymentsContext::OutboundPayment { payment_id } => { + if let Err(e) = self.send_payment_for_static_invoice(payment_id) { + log_trace!( + self.logger, + "Failed to release held HTLC with payment id {}: {:?}", + payment_id, + e + ); + } + }, + AsyncPaymentsContext::ReleaseHeldHtlc { intercept_id } => { + let mut htlc = { + let mut pending_intercept_htlcs = + self.pending_intercepted_htlcs.lock().unwrap(); + match pending_intercept_htlcs.remove(&intercept_id) { + Some(htlc) => htlc, + None => { + log_trace!( + self.logger, + "Failed to release HTLC with intercept_id {}: HTLC not found", + intercept_id + ); + return; + }, + } + }; + match htlc.forward_info.routing { + PendingHTLCRouting::Forward { ref mut hold_htlc, .. } => { + debug_assert!(hold_htlc.is_some()); + *hold_htlc = None; + }, + _ => { + debug_assert!(false, "HTLC intercepts can only be forwards"); + return; + }, + } - if let Err(e) = self.send_payment_for_static_invoice(payment_id) { - log_trace!( - self.logger, - "Failed to release held HTLC with payment id {}: {:?}", - payment_id, - e - ); + let logger = WithContext::from( + &self.logger, + Some(htlc.prev_counterparty_node_id), + Some(htlc.prev_channel_id), + Some(htlc.forward_info.payment_hash), + ); + log_trace!(logger, "Releasing held htlc with intercept_id {}", intercept_id); + + let mut per_source_pending_forward = [( + htlc.prev_short_channel_id, + htlc.prev_counterparty_node_id, + htlc.prev_funding_outpoint, + htlc.prev_channel_id, + htlc.prev_user_channel_id, + vec![(htlc.forward_info, htlc.prev_htlc_id)], + )]; + self.forward_htlcs(&mut per_source_pending_forward); + PersistenceNotifierGuard::notify_on_drop(self); + }, + _ => return, } } @@ -14847,6 +15022,13 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { features.set_anchor_zero_fee_commitments_optional(); } + // If we are configured to be an announced node, we are expected to be always-online and can + // advertise the htlc_hold feature. + #[cfg(test)] + if config.enable_htlc_hold { + features.set_htlc_hold_optional(); + } + features } @@ -14871,6 +15053,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (1, blinded, option), (2, short_channel_id, required), (3, incoming_cltv_expiry, option), + (4, hold_htlc, option), }, (1, Receive) => { (0, payment_data, required), diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 03fd8167b77..4d5355a8d78 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2288,6 +2288,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &update_add_htlc); } @@ -6547,6 +6548,7 @@ pub fn test_counterparty_raa_skip_no_crash() { next_per_commitment_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); assert_eq!( diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index f90b8b880bc..dc5d07c180e 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -835,6 +835,7 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -935,6 +936,7 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { next_per_commitment_point: next_local_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); expect_and_process_pending_htlcs(&nodes[1], false); @@ -1072,6 +1074,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &msg); @@ -1255,6 +1258,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -1637,6 +1641,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { onion_routing_packet: onion_packet.clone(), skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; for i in 0..50 { @@ -2242,6 +2247,7 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -2376,6 +2382,7 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { next_per_commitment_point: next_local_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 0a6817e7712..b3b21340554 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -31,6 +31,7 @@ use bitcoin::secp256k1::ecdsa::Signature; use bitcoin::secp256k1::PublicKey; use bitcoin::{secp256k1, Transaction, Witness}; +use crate::blinded_path::message::BlindedMessagePath; use crate::blinded_path::payment::{ BlindedPaymentTlvs, ForwardTlvs, ReceiveTlvs, UnauthenticatedReceiveTlvs, }; @@ -765,6 +766,11 @@ pub struct UpdateAddHTLC { /// Provided if we are relaying or receiving a payment within a blinded path, to decrypt the onion /// routing packet and the recipient-provided encrypted payload within. pub blinding_point: Option, + /// Set to `Some` if the sender wants the receiver of this message to hold onto this HTLC until + /// receipt of a [`ReleaseHeldHtlc`] onion message from the payment recipient. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + pub hold_htlc: Option<()>, } /// An onion message to be sent to or received from a peer. @@ -883,6 +889,13 @@ pub struct RevokeAndACK { #[cfg(taproot)] /// Musig nonce the recipient should use in their next commitment signature message pub next_local_nonce: Option, + /// A list of `(htlc_id, blinded_path)`. The receiver of this message will use the blinded paths + /// as reply paths to [`HeldHtlcAvailable`] onion messages that they send to the often-offline + /// receiver of this HTLC. The `htlc_id` is used by the receiver of this message to identify which + /// held HTLC a given blinded path corresponds to. + /// + /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable + pub release_htlc_message_paths: Vec<(u64, BlindedMessagePath)>, } /// An [`update_fee`] message to be sent to or received from a peer @@ -3255,7 +3268,9 @@ impl_writeable_msg!(RevokeAndACK, { channel_id, per_commitment_secret, next_per_commitment_point -}, {}); +}, { + (75537, release_htlc_message_paths, optional_vec) +}); #[cfg(taproot)] impl_writeable_msg!(RevokeAndACK, { @@ -3263,7 +3278,8 @@ impl_writeable_msg!(RevokeAndACK, { per_commitment_secret, next_per_commitment_point }, { - (4, next_local_nonce, option) + (4, next_local_nonce, option), + (75537, release_htlc_message_paths, optional_vec) }); impl_writeable_msg!(Shutdown, { @@ -3350,7 +3366,10 @@ impl_writeable_msg!(UpdateAddHTLC, { onion_routing_packet, }, { (0, blinding_point, option), - (65537, skimmed_fee_msat, option) + (65537, skimmed_fee_msat, option), + // TODO: currently we may fail to read the `ChannelManager` if we write a new even TLV in this message + // and then downgrade. Once this is fixed, update the type here to match BOLTs PR 989. + (75537, hold_htlc, option), }); impl LengthReadable for OnionMessage { @@ -5847,6 +5866,7 @@ mod tests { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; let encoded_value = update_add_htlc.encode(); let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d32144668701144760101010101010101010101010101010101010101010101010101010101010101000c89d4ff031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202").unwrap(); @@ -5967,6 +5987,7 @@ mod tests { next_per_commitment_point: pubkey_1, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; let encoded_value = raa.encode(); let target_value = >::from_hex("02020202020202020202020202020202020202020202020202020202020202020101010101010101010101010101010101010101010101010101010101010101031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f").unwrap(); diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 79952faca9a..0934c6c812b 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -190,6 +190,7 @@ pub(super) fn create_fwd_pending_htlc_info( onion_packet: outgoing_packet, short_channel_id, incoming_cltv_expiry: Some(msg.cltv_expiry), + hold_htlc: msg.hold_htlc, blinded: intro_node_blinding_point.or(msg.blinding_point) .map(|bp| BlindedForward { inbound_blinding_point: bp, @@ -753,6 +754,7 @@ mod tests { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, } } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 0af0463134e..3dab164e619 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -5017,6 +5017,7 @@ fn peel_payment_onion_custom_tlvs() { skimmed_fee_msat: None, onion_routing_packet, blinding_point: None, + hold_htlc: None, }; let peeled_onion = crate::ln::onion_payment::peel_payment_onion( &update_add, diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 9040f785753..90252392baf 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -32,7 +32,7 @@ use crate::prelude::*; use crate::chain::BestBlock; use crate::ln::channel_state::ChannelDetails; -use crate::ln::channelmanager::{PaymentId, CLTV_FAR_FAR_AWAY}; +use crate::ln::channelmanager::{InterceptId, PaymentId, CLTV_FAR_FAR_AWAY}; use crate::ln::inbound_payment; use crate::offers::async_receive_offer_cache::AsyncReceiveOfferCache; use crate::offers::invoice::{ @@ -52,7 +52,7 @@ use crate::onion_message::async_payments::{ StaticInvoicePersisted, }; use crate::onion_message::messenger::{ - Destination, MessageRouter, MessageSendInstructions, Responder, + Destination, MessageRouter, MessageSendInstructions, Responder, PADDED_PATH_LENGTH, }; use crate::onion_message::offers::OffersMessage; use crate::onion_message::packet::OnionMessageContents; @@ -1163,6 +1163,33 @@ where Ok(()) } + /// If we are holding an HTLC on behalf of an often-offline sender, this method allows us to + /// create a path for the sender to use as the reply path when they send the recipient a + /// [`HeldHtlcAvailable`] onion message, so the recipient's [`ReleaseHeldHtlc`] response will be + /// received to our node. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + pub fn path_for_release_held_htlc( + &self, intercept_id: InterceptId, entropy: ES, + ) -> BlindedMessagePath + where + ES::Target: EntropySource, + { + // In the future, we should support multi-hop paths here. + let context = + MessageContext::AsyncPayments(AsyncPaymentsContext::ReleaseHeldHtlc { intercept_id }); + let num_dummy_hops = PADDED_PATH_LENGTH.saturating_sub(1); + BlindedMessagePath::new_with_dummy_hops( + &[], + self.get_our_node_id(), + num_dummy_hops, + self.receive_auth_key, + context, + &*entropy, + &self.secp_ctx, + ) + } + /// Enqueues the created [`DNSSECQuery`] to be sent to the counterparty. /// /// # Peers diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index a0c74673799..f0c0330e14d 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -935,6 +935,18 @@ pub struct UserConfig { /// /// Default value: `false` pub enable_dual_funded_channels: bool, + /// LDK supports a feature for always-online nodes such that these nodes can hold onto an HTLC + /// from an often-offline channel peer until the often-offline payment recipient sends an onion + /// message telling the always-online node to release the HTLC. If this is set to `true`, our node + /// will carry out this feature for channel peers that request it. + /// + /// This should only be set to `true` for nodes which expect to be online reliably. + /// + /// Setting this to `true` may break backwards compatibility with LDK versions < 0.2. + /// + /// Default value: `false` + #[cfg(test)] + pub enable_htlc_hold: bool, } impl Default for UserConfig { @@ -949,6 +961,8 @@ impl Default for UserConfig { accept_intercept_htlcs: false, manually_handle_bolt12_invoices: false, enable_dual_funded_channels: false, + #[cfg(test)] + enable_htlc_hold: false, } } } diff --git a/lightning/src/util/ser_macros.rs b/lightning/src/util/ser_macros.rs index ea7a3e8a2a2..647e7c77a6c 100644 --- a/lightning/src/util/ser_macros.rs +++ b/lightning/src/util/ser_macros.rs @@ -700,7 +700,7 @@ macro_rules! impl_writeable_msg { impl $crate::util::ser::Writeable for $st { fn write(&self, w: &mut W) -> Result<(), $crate::io::Error> { $( self.$field.write(w)?; )* - $crate::encode_tlv_stream!(w, {$(($type, self.$tlvfield.as_ref(), $fieldty)),*}); + $crate::encode_tlv_stream!(w, {$(($type, &self.$tlvfield, $fieldty)),*}); Ok(()) } } @@ -713,7 +713,7 @@ macro_rules! impl_writeable_msg { $crate::decode_tlv_stream!(r, {$(($type, $tlvfield, $fieldty)),*}); Ok(Self { $($field,)* - $($tlvfield),* + $($tlvfield: $crate::_init_tlv_based_struct_field!($tlvfield, $fieldty)),* }) } } diff --git a/pending_changelog/4045-sender-lsp.txt b/pending_changelog/4045-sender-lsp.txt new file mode 100644 index 00000000000..fa4243b5b7f --- /dev/null +++ b/pending_changelog/4045-sender-lsp.txt @@ -0,0 +1,4 @@ +## Backwards Compat + +* Downgrading to prior versions of LDK after setting `UserConfig::enable_htlc_hold` may cause + `ChannelManager` deserialization to fail or HTLCs to time out (#4045, #4046)