From b916c3596d085dcbfab7ac2e91b8e4df26267b53 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Thu, 19 Feb 2026 04:45:50 +0000 Subject: [PATCH 1/9] Add inbound and outbound checks for zero reserve channels The goal is to prevent any commitments with no outputs, since these are not broadcastable. --- lightning/src/ln/channel.rs | 72 +++++++----- lightning/src/sign/tx_builder.rs | 181 ++++++++++++++++++++++++++++--- 2 files changed, 214 insertions(+), 39 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 9361cd3c749..6feb35b2eda 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2786,12 +2786,26 @@ impl FundingScope { .funding_pubkey = counterparty_funding_pubkey; // New reserve values are based on the new channel value and are v2-specific - let counterparty_selected_channel_reserve_satoshis = - get_v2_channel_reserve_satoshis(post_channel_value, MIN_CHAN_DUST_LIMIT_SATOSHIS); - let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis( - post_channel_value, - context.counterparty_dust_limit_satoshis, - ); + let counterparty_selected_channel_reserve_satoshis = if prev_funding + .counterparty_selected_channel_reserve_satoshis + .expect("counterparty reserve is set") + == 0 + { + // If we previously had a 0-value reserve, continue with the same reserve + 0 + } else { + get_v2_channel_reserve_satoshis(post_channel_value, MIN_CHAN_DUST_LIMIT_SATOSHIS) + }; + let holder_selected_channel_reserve_satoshis = + if prev_funding.holder_selected_channel_reserve_satoshis == 0 { + // If the counterparty previously had a 0-value reserve, continue with the same reserve + 0 + } else { + get_v2_channel_reserve_satoshis( + post_channel_value, + context.counterparty_dust_limit_satoshis, + ) + }; Self { channel_transaction_parameters: post_channel_transaction_parameters, @@ -5032,27 +5046,27 @@ impl ChannelContext { )); } - if funding.is_outbound() { - let (local_stats, _local_htlcs) = self - .get_next_local_commitment_stats( - funding, - Some(HTLCAmountDirection { outbound: false, amount_msat: msg.amount_msat }), - include_counterparty_unknown_htlcs, - fee_spike_buffer_htlc, - self.feerate_per_kw, - dust_exposure_limiting_feerate, - ) - .map_err(|()| { - ChannelError::close(String::from("Balance exhausted on local commitment")) - })?; - // Check that they won't violate our local required channel reserve by adding this HTLC. - if local_stats.commitment_stats.holder_balance_msat + let (local_stats, _local_htlcs) = self + .get_next_local_commitment_stats( + funding, + Some(HTLCAmountDirection { outbound: false, amount_msat: msg.amount_msat }), + include_counterparty_unknown_htlcs, + fee_spike_buffer_htlc, + self.feerate_per_kw, + dust_exposure_limiting_feerate, + ) + .map_err(|()| { + ChannelError::close(String::from("Balance exhausted on local commitment")) + })?; + + // Check that they won't violate our local required channel reserve by adding this HTLC. + if funding.is_outbound() + && local_stats.commitment_stats.holder_balance_msat < funding.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 - { - return Err(ChannelError::close( - "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned() - )); - } + { + return Err(ChannelError::close( + "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned() + )); } Ok(()) @@ -5146,6 +5160,12 @@ impl ChannelContext { let commitment_txid = { let trusted_tx = commitment_data.tx.trust(); let bitcoin_tx = trusted_tx.built_transaction(); + if bitcoin_tx.transaction.output.is_empty() { + return Err(ChannelError::close( + "Commitment tx from peer has 0 outputs".to_owned(), + )); + } + let sighash = bitcoin_tx.get_sighash_all(&funding_script, funding.get_value_satoshis()); log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}", diff --git a/lightning/src/sign/tx_builder.rs b/lightning/src/sign/tx_builder.rs index 4273b62c7b7..4d62d28d9ad 100644 --- a/lightning/src/sign/tx_builder.rs +++ b/lightning/src/sign/tx_builder.rs @@ -206,6 +206,36 @@ fn get_dust_exposure_stats( } } +fn check_no_outputs( + is_outbound_from_holder: bool, holder_balance_before_fee_msat: u64, + counterparty_balance_before_fee_msat: u64, feerate_per_kw: u32, nondust_htlc_count: usize, + broadcaster_dust_limit_satoshis: u64, channel_type: &ChannelTypeFeatures, +) -> Result<(), ()> { + let commit_tx_fee_sat = commit_tx_fee_sat(feerate_per_kw, nondust_htlc_count, channel_type); + + let (real_holder_balance_msat, real_counterparty_balance_msat) = if is_outbound_from_holder { + ( + holder_balance_before_fee_msat.checked_sub(commit_tx_fee_sat * 1000).ok_or(())?, + counterparty_balance_before_fee_msat, + ) + } else { + ( + holder_balance_before_fee_msat, + counterparty_balance_before_fee_msat.checked_sub(commit_tx_fee_sat * 1000).ok_or(())?, + ) + }; + + // Make sure the commitment transaction has at least one output + let dust_limit_msat = broadcaster_dust_limit_satoshis * 1000; + if real_holder_balance_msat < dust_limit_msat + && real_counterparty_balance_msat < dust_limit_msat + && nondust_htlc_count == 0 + { + return Err(()); + } + Ok(()) +} + fn get_next_commitment_stats( local: bool, is_outbound_from_holder: bool, channel_value_satoshis: u64, value_to_holder_msat: u64, next_commitment_htlcs: &[HTLCAmountDirection], @@ -250,6 +280,15 @@ fn get_next_commitment_stats( channel_type, )?; + let (dust_exposure_msat, _extra_accepted_htlc_dust_exposure_msat) = get_dust_exposure_stats( + local, + next_commitment_htlcs, + feerate_per_kw, + dust_exposure_limiting_feerate, + broadcaster_dust_limit_satoshis, + channel_type, + ); + // Calculate fees on commitment transaction let nondust_htlc_count = next_commitment_htlcs .iter() @@ -257,18 +296,25 @@ fn get_next_commitment_stats( !htlc.is_dust(local, feerate_per_kw, broadcaster_dust_limit_satoshis, channel_type) }) .count(); - let commit_tx_fee_sat = commit_tx_fee_sat( + + // For zero-reserve channels, we check two things independently: + // 1) Given the current set of HTLCs and feerate, does the commitment have at least one output ? + check_no_outputs( + is_outbound_from_holder, + holder_balance_before_fee_msat, + counterparty_balance_before_fee_msat, feerate_per_kw, - nondust_htlc_count + addl_nondust_htlc_count, + nondust_htlc_count, + broadcaster_dust_limit_satoshis, channel_type, - ); + )?; - let (dust_exposure_msat, _extra_accepted_htlc_dust_exposure_msat) = get_dust_exposure_stats( - local, - next_commitment_htlcs, + // 2) Now including any additional non-dust HTLCs (usually the fee spike buffer HTLC), does the funder cover + // this bigger transaction fee ? The funder can dip below their dust limit to cover this case, as the + // commitment will have at least one output: the non-dust fee spike buffer HTLC offered by the counterparty. + let commit_tx_fee_sat = commit_tx_fee_sat( feerate_per_kw, - dust_exposure_limiting_feerate, - broadcaster_dust_limit_satoshis, + nondust_htlc_count + addl_nondust_htlc_count, channel_type, ); @@ -316,7 +362,7 @@ fn get_available_balances( if channel_type.supports_anchor_zero_fee_commitments() { 0 } else { 1 }; // Note that the feerate is 0 in zero-fee commitment channels, so this statement is a noop - let local_feerate = feerate_per_kw + let spiked_feerate = feerate_per_kw * if is_outbound_from_holder && !channel_type.supports_anchors_zero_fee_htlc_tx() { crate::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32 } else { @@ -328,19 +374,19 @@ fn get_available_balances( .filter(|htlc| { !htlc.is_dust( true, - local_feerate, + spiked_feerate, channel_constraints.holder_dust_limit_satoshis, channel_type, ) }) .count(); let local_max_commit_tx_fee_sat = commit_tx_fee_sat( - local_feerate, + spiked_feerate, local_nondust_htlc_count + fee_spike_buffer_htlc + 1, channel_type, ); let local_min_commit_tx_fee_sat = commit_tx_fee_sat( - local_feerate, + spiked_feerate, local_nondust_htlc_count + fee_spike_buffer_htlc, channel_type, ); @@ -512,7 +558,49 @@ fn get_available_balances( available_capacity_msat = 0; } - #[allow(deprecated)] // TODO: Remove once balance_msat is removed + // Now adjust our min and max size HTLC to make sure both the local and the remote commitments still have + // at least one output at the spiked feerate. + + let remote_nondust_htlc_count = pending_htlcs + .iter() + .filter(|htlc| { + !htlc.is_dust( + false, + spiked_feerate, + channel_constraints.counterparty_dust_limit_satoshis, + channel_type, + ) + }) + .count(); + + let (next_outbound_htlc_minimum_msat, available_capacity_msat) = + adjust_boundaries_if_max_dust_htlc_produces_no_output( + true, + is_outbound_from_holder, + local_balance_before_fee_msat, + remote_balance_before_fee_msat, + local_nondust_htlc_count, + spiked_feerate, + channel_constraints.holder_dust_limit_satoshis, + channel_type, + next_outbound_htlc_minimum_msat, + available_capacity_msat, + ); + + let (next_outbound_htlc_minimum_msat, available_capacity_msat) = + adjust_boundaries_if_max_dust_htlc_produces_no_output( + false, + is_outbound_from_holder, + local_balance_before_fee_msat, + remote_balance_before_fee_msat, + remote_nondust_htlc_count, + spiked_feerate, + channel_constraints.counterparty_dust_limit_satoshis, + channel_type, + next_outbound_htlc_minimum_msat, + available_capacity_msat, + ); + crate::ln::channel::AvailableBalances { inbound_capacity_msat: remote_balance_before_fee_msat .saturating_sub(channel_constraints.holder_selected_channel_reserve_satoshis * 1000), @@ -522,6 +610,73 @@ fn get_available_balances( } } +fn adjust_boundaries_if_max_dust_htlc_produces_no_output( + local: bool, is_outbound_from_holder: bool, holder_balance_before_fee_msat: u64, + counterparty_balance_before_fee_msat: u64, nondust_htlc_count: usize, feerate_per_kw: u32, + dust_limit_satoshis: u64, channel_type: &ChannelTypeFeatures, + next_outbound_htlc_minimum_msat: u64, available_capacity_msat: u64, +) -> (u64, u64) { + let tx_fee_sat = commit_tx_fee_sat(feerate_per_kw, nondust_htlc_count, channel_type); + let (holder_balance_msat, counterparty_balance_msat) = if is_outbound_from_holder { + ( + holder_balance_before_fee_msat.saturating_sub(tx_fee_sat.saturating_mul(1000)), + counterparty_balance_before_fee_msat, + ) + } else { + ( + holder_balance_before_fee_msat, + counterparty_balance_before_fee_msat.saturating_sub(tx_fee_sat.saturating_mul(1000)), + ) + }; + + let (htlc_success_tx_fee_sat, htlc_timeout_tx_fee_sat) = + second_stage_tx_fees_sat(channel_type, feerate_per_kw); + let min_nondust_htlc_sat = + dust_limit_satoshis + if local { htlc_timeout_tx_fee_sat } else { htlc_success_tx_fee_sat }; + let max_dust_htlc_msat = (min_nondust_htlc_sat.saturating_mul(1000)).saturating_sub(1); + + // If the biggest dust HTLC produces no outputs, then we have to say something... + let dust_limit_msat = dust_limit_satoshis.saturating_mul(1000); + if holder_balance_msat.saturating_sub(max_dust_htlc_msat) < dust_limit_msat + && counterparty_balance_msat < dust_limit_msat + && nondust_htlc_count == 0 + { + // If we are allowed to send non-dust HTLCs, set the min HTLC to the smallest non-dust HTLC... + if available_capacity_msat >= min_nondust_htlc_sat.saturating_mul(1000) { + ( + cmp::max( + min_nondust_htlc_sat.saturating_mul(1000), + next_outbound_htlc_minimum_msat, + ), + available_capacity_msat, + ) + // Otherwise, set the max HTLC to the biggest that still leaves our main balance output untrimmed. + // Note that this will be a dust HTLC. + } else { + // Remember we've got no non-dust HTLCs on the commitment here, + // so we just account for a single non-dust HTLC + let fee_spike_buffer_sat = commit_tx_fee_sat(feerate_per_kw, 1, channel_type); + // We must cover the greater of + // 1) The dust_limit_satoshis plus the fee of the exisiting commitment at the spiked feerate. + // 2) The fee of the commitment with an additional non-dust HTLC, aka the fee spike buffer HTLC. + // In this case we don't mind the holder balance output dropping below the dust limit, as + // this additional non-dust HTLC will create the single remaining output on the commitment. + let min_balance_msat = + cmp::max(dust_limit_satoshis + tx_fee_sat, fee_spike_buffer_sat) * 1000; + ( + next_outbound_htlc_minimum_msat, + cmp::min( + holder_balance_before_fee_msat.saturating_sub(min_balance_msat), + available_capacity_msat, + ), + ) + } + // Otherwise, it is impossible to produce no outputs with this upcoming HTLC add, so we stay quiet + } else { + (next_outbound_htlc_minimum_msat, available_capacity_msat) + } +} + pub(crate) trait TxBuilder { fn get_channel_stats( &self, local: bool, is_outbound_from_holder: bool, channel_value_satoshis: u64, From 53fc9a354c385ef760ac10ec6bf7cda141681767 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Thu, 26 Feb 2026 03:10:47 +0000 Subject: [PATCH 2/9] Add 0-reserve to `accept_inbound_channel_from_trusted_peer` This new flag sets 0-reserve for the channel opener. --- .../tests/lsps2_integration_tests.rs | 4 +- lightning/src/events/mod.rs | 2 +- lightning/src/ln/async_signer_tests.rs | 8 +++- lightning/src/ln/chanmon_update_fail_tests.rs | 8 +++- lightning/src/ln/channel.rs | 37 ++++++++++-------- lightning/src/ln/channel_open_tests.rs | 6 ++- lightning/src/ln/channel_type_tests.rs | 7 ++++ lightning/src/ln/channelmanager.rs | 38 +++++++++++++------ lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/ln/priv_short_conf_tests.rs | 14 +++++-- lightning/src/util/config.rs | 4 +- 11 files changed, 92 insertions(+), 40 deletions(-) diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index 33a6dd697cf..121e622f585 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -1513,10 +1513,12 @@ fn create_channel_with_manual_broadcast( Event::OpenChannelRequest { temporary_channel_id, .. } => { client_node .node - .accept_inbound_channel_from_trusted_peer_0conf( + .accept_inbound_channel_from_trusted_peer( &temporary_channel_id, &service_node_id, user_channel_id, + true, + false, None, ) .unwrap(); diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 3f6bb0efb01..1f11d3c0ee2 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -1643,7 +1643,7 @@ pub enum Event { /// Furthermore, note that if [`ChannelTypeFeatures::supports_zero_conf`] returns true on this type, /// the resulting [`ChannelManager`] will not be readable by versions of LDK prior to /// 0.0.107. Channels setting this type also need to get manually accepted via - /// [`crate::ln::channelmanager::ChannelManager::accept_inbound_channel_from_trusted_peer_0conf`], + /// [`crate::ln::channelmanager::ChannelManager::accept_inbound_channel_from_trusted_peer`], /// or will be rejected otherwise. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 451af3918bf..66cc56cd620 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -78,10 +78,12 @@ fn do_test_open_channel(zero_conf: bool) { Event::OpenChannelRequest { temporary_channel_id, .. } => { nodes[1] .node - .accept_inbound_channel_from_trusted_peer_0conf( + .accept_inbound_channel_from_trusted_peer( temporary_channel_id, &node_a_id, 0, + true, + false, None, ) .expect("Unable to accept inbound zero-conf channel"); @@ -383,10 +385,12 @@ fn do_test_funding_signed_0conf(signer_ops: Vec) { Event::OpenChannelRequest { temporary_channel_id, .. } => { nodes[1] .node - .accept_inbound_channel_from_trusted_peer_0conf( + .accept_inbound_channel_from_trusted_peer( temporary_channel_id, &node_a_id, 0, + true, + false, None, ) .expect("Unable to accept inbound zero-conf channel"); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index cd32d219b93..5b21d2fb5b3 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3235,7 +3235,9 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { if use_0conf { nodes[1] .node - .accept_inbound_channel_from_trusted_peer_0conf(&chan_id, &node_a_id, 0, None) + .accept_inbound_channel_from_trusted_peer( + &chan_id, &node_a_id, 0, true, false, None, + ) .unwrap(); } else { nodes[1].node.accept_inbound_channel(&chan_id, &node_a_id, 0, None).unwrap(); @@ -3344,7 +3346,9 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo if use_0conf { nodes[1] .node - .accept_inbound_channel_from_trusted_peer_0conf(&chan_id, &node_a_id, 0, None) + .accept_inbound_channel_from_trusted_peer( + &chan_id, &node_a_id, 0, true, false, None, + ) .unwrap(); } else { nodes[1].node.accept_inbound_channel(&chan_id, &node_a_id, 0, None).unwrap(); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 6feb35b2eda..b5209685c03 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3670,7 +3670,7 @@ impl ChannelContext { } } - if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS && holder_selected_channel_reserve_satoshis != 0 { // Protocol level safety check in place, although it should never happen because // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` return Err(ChannelError::close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); @@ -3682,7 +3682,7 @@ impl ChannelContext { log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.", msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS); } - if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis { + if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis && holder_selected_channel_reserve_satoshis != 0 { return Err(ChannelError::close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis))); } @@ -13661,7 +13661,7 @@ impl InboundV1Channel { fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, - current_chain_height: u32, logger: &L, is_0conf: bool, + current_chain_height: u32, logger: &L, is_0conf: bool, is_0reserve: bool, ) -> Result, ChannelError> { let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id), None); @@ -13669,7 +13669,11 @@ impl InboundV1Channel { // support this channel type. let channel_type = channel_type_from_open_channel(&msg.common_fields, our_supported_features)?; - let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.common_fields.funding_satoshis, config); + let holder_selected_channel_reserve_satoshis = if is_0reserve { + 0 + } else { + get_holder_selected_channel_reserve_satoshis(msg.common_fields.funding_satoshis, config) + }; let counterparty_pubkeys = ChannelPublicKeys { funding_pubkey: msg.common_fields.funding_pubkey, revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint), @@ -14061,10 +14065,11 @@ impl PendingV2Channel { let channel_value_satoshis = our_funding_contribution_sats.saturating_add(msg.common_fields.funding_satoshis); + // TODO(zero_reserve): support reading and writing the `disable_channel_reserve` field let counterparty_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis( - channel_value_satoshis, msg.common_fields.dust_limit_satoshis); - let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis( channel_value_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS); + let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis( + channel_value_satoshis, msg.common_fields.dust_limit_satoshis); let channel_type = channel_type_from_open_channel(&msg.common_fields, our_supported_features)?; @@ -15932,7 +15937,7 @@ mod tests { // Make sure A's dust limit is as we expect. let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network), &&logger).unwrap(); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); + let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false, /*is_0reserve=*/false).unwrap(); // Node B --> Node A: accept channel, explicitly setting B's dust limit. let mut accept_channel_msg = node_b_chan.accept_inbound_channel(&&logger).unwrap(); @@ -16077,7 +16082,7 @@ mod tests { // Create Node B's channel by receiving Node A's open_channel message let open_channel_msg = node_a_chan.get_open_channel(chain_hash, &&logger).unwrap(); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); + let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false, /*is_0reserve=*/false).unwrap(); // Node B --> Node A: accept channel let accept_channel_msg = node_b_chan.accept_inbound_channel(&&logger).unwrap(); @@ -16152,12 +16157,12 @@ mod tests { // Test that `InboundV1Channel::new` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound - 1 (2%) of the `channel_value`. - let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap(); + let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false, /*is_0reserve=*/false).unwrap(); let chan_3_value_msat = chan_3.funding.get_value_satoshis() * 1000; assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). - let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap(); + let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false, /*is_0reserve=*/false).unwrap(); let chan_4_value_msat = chan_4.funding.get_value_satoshis() * 1000; assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64); @@ -16176,14 +16181,14 @@ mod tests { // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. - let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap(); + let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false, /*is_0reserve=*/false).unwrap(); let chan_7_value_msat = chan_7.funding.get_value_satoshis() * 1000; assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64); // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. - let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap(); + let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false, /*is_0reserve=*/false).unwrap(); let chan_8_value_msat = chan_8.funding.get_value_satoshis() * 1000; assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat); } @@ -16236,7 +16241,7 @@ mod tests { inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32; if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 { - let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap(); + let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false, /*is_0reserve=*/false).unwrap(); let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.funding.get_value_satoshis() as f64 * inbound_selected_channel_reserve_perc) as u64); @@ -16244,7 +16249,7 @@ mod tests { assert_eq!(chan_inbound_node.funding.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve); } else { // Channel Negotiations failed - let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false); + let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false, /*is_0reserve=*/false); assert!(result.is_err()); } } @@ -16271,7 +16276,7 @@ mod tests { // Make sure A's dust limit is as we expect. let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network), &&logger).unwrap(); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); + let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false, /*is_0reserve=*/false).unwrap(); // Node B --> Node A: accept channel, explicitly setting B's dust limit. let mut accept_channel_msg = node_b_chan.accept_inbound_channel(&&logger).unwrap(); @@ -16375,6 +16380,7 @@ mod tests { 0, &&logger, false, + false, ) .unwrap(); outbound_chan @@ -18030,6 +18036,7 @@ mod tests { 0, &&logger, true, // Allow node b to send a 0conf channel_ready. + false, ).unwrap(); let accept_channel_msg = node_b_chan.accept_inbound_channel(&&logger).unwrap(); diff --git a/lightning/src/ln/channel_open_tests.rs b/lightning/src/ln/channel_open_tests.rs index 08cabc053c5..d1354ca35d4 100644 --- a/lightning/src/ln/channel_open_tests.rs +++ b/lightning/src/ln/channel_open_tests.rs @@ -157,10 +157,12 @@ fn test_0conf_limiting() { Event::OpenChannelRequest { temporary_channel_id, .. } => { nodes[1] .node - .accept_inbound_channel_from_trusted_peer_0conf( + .accept_inbound_channel_from_trusted_peer( &temporary_channel_id, &last_random_pk, 23, + true, + false, None, ) .unwrap(); @@ -970,6 +972,7 @@ pub fn test_user_configurable_csv_delay() { 0, &nodes[0].logger, /*is_0conf=*/ false, + /*is_0reserve=*/ false, ) { match error { ChannelError::Close((err, _)) => { @@ -1030,6 +1033,7 @@ pub fn test_user_configurable_csv_delay() { 0, &nodes[0].logger, /*is_0conf=*/ false, + /*is_0reserve=*/ false, ) { match error { ChannelError::Close((err, _)) => { diff --git a/lightning/src/ln/channel_type_tests.rs b/lightning/src/ln/channel_type_tests.rs index 2b069a6d314..5315250441a 100644 --- a/lightning/src/ln/channel_type_tests.rs +++ b/lightning/src/ln/channel_type_tests.rs @@ -168,6 +168,7 @@ fn test_zero_conf_channel_type_support() { 0, &&logger, /*is_0conf=*/ false, + /*is_0reserve=*/ false, ); assert!(res.is_ok()); } @@ -283,6 +284,7 @@ fn do_test_supports_channel_type(config: UserConfig, expected_channel_type: Chan 0, &&logger, /*is_0conf=*/ false, + /*is_0reserve=*/ false, ) .unwrap(); @@ -351,6 +353,7 @@ fn test_rejects_if_channel_type_not_set() { 0, &&logger, /*is_0conf=*/ false, + /*is_0reserve=*/ false, ); assert!(channel_b.is_err()); @@ -369,6 +372,7 @@ fn test_rejects_if_channel_type_not_set() { 0, &&logger, /*is_0conf=*/ false, + /*is_0reserve=*/ false, ) .unwrap(); @@ -435,6 +439,7 @@ fn test_rejects_if_channel_type_differ() { 0, &&logger, /*is_0conf=*/ false, + /*is_0reserve=*/ false, ) .unwrap(); @@ -519,6 +524,7 @@ fn test_rejects_simple_anchors_channel_type() { 0, &&logger, /*is_0conf=*/ false, + /*is_0reserve=*/ false, ); assert!(res.is_err()); @@ -559,6 +565,7 @@ fn test_rejects_simple_anchors_channel_type() { 0, &&logger, /*is_0conf=*/ false, + /*is_0reserve=*/ false, ) .unwrap(); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ada27af749f..f57d860f274 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10710,10 +10710,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// /// The `user_channel_id` parameter will be provided back in /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond - /// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call. + /// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer` call. /// /// Note that this method will return an error and reject the channel, if it requires support - /// for zero confirmations. Instead, `accept_inbound_channel_from_trusted_peer_0conf` must be + /// for zero confirmations. Instead, `accept_inbound_channel_from_trusted_peer` must be /// used to accept such channels. /// /// NOTE: LDK makes no attempt to prevent the counterparty from using non-standard inputs which @@ -10730,19 +10730,24 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ temporary_channel_id, counterparty_node_id, false, + false, user_channel_id, config_overrides, ) } - /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`], treating - /// it as confirmed immediately. + /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`]. Unlike + /// [`ChannelManager::accept_inbound_channel`], this method allows some combination of the + /// zero-conf and zero-reserve features to be set for the channel, see a description of these + /// features below. /// /// The `user_channel_id` parameter will be provided back in /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond - /// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call. + /// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer` call. + /// + /// # Zero-conf /// - /// Unlike [`ChannelManager::accept_inbound_channel`], this method accepts the incoming channel + /// If `accept_0conf` is set, the method accepts the incoming channel /// and (if the counterparty agrees), enables forwarding of payments immediately. /// /// This fully trusts that the counterparty has honestly and correctly constructed the funding @@ -10751,16 +10756,26 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// If it does not confirm before we decide to close the channel, or if the funding transaction /// does not pay to the correct script the correct amount, *you will lose funds*. /// + /// # Zero-reserve + /// + /// If `accept_0reserve` is set, the method accepts the incoming channel and sets the reserve the counterparty + /// must keep at all times in the channel to zero. + /// + /// This allows the counterparty to spend their entire channel balance, and attempt to force-close the channel + /// with a revoked commitment transaction **for free**. + /// /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id - pub fn accept_inbound_channel_from_trusted_peer_0conf( + pub fn accept_inbound_channel_from_trusted_peer( &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, - user_channel_id: u128, config_overrides: Option, + user_channel_id: u128, accept_0conf: bool, accept_0reserve: bool, + config_overrides: Option, ) -> Result<(), APIError> { self.do_accept_inbound_channel( temporary_channel_id, counterparty_node_id, - true, + accept_0conf, + accept_0reserve, user_channel_id, config_overrides, ) @@ -10769,7 +10784,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// TODO(dual_funding): Allow contributions, pass intended amount and inputs fn do_accept_inbound_channel( &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, - accept_0conf: bool, user_channel_id: u128, + accept_0conf: bool, accept_0reserve: bool, user_channel_id: u128, config_overrides: Option, ) -> Result<(), APIError> { let mut config = self.config.read().unwrap().clone(); @@ -10819,6 +10834,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ best_block_height, &self.logger, accept_0conf, + accept_0reserve, ) .map_err(|err| { MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id) @@ -10910,7 +10926,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; debug_assert!(peer_state.is_connected); peer_state.pending_msg_events.push(send_msg_err_event); - let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned(); + let err_str = "Please use accept_inbound_channel_from_trusted_peer to accept channels with zero confirmations.".to_owned(); log_error!(logger, "{}", err_str); return Err(APIError::APIMisuseError { err: err_str }); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 2d971c3a100..9d1d1086180 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1638,10 +1638,12 @@ pub fn exchange_open_accept_zero_conf_chan<'a, 'b, 'c, 'd>( Event::OpenChannelRequest { temporary_channel_id, .. } => { receiver .node - .accept_inbound_channel_from_trusted_peer_0conf( + .accept_inbound_channel_from_trusted_peer( &temporary_channel_id, &initiator_node_id, 0, + true, + false, None, ) .unwrap(); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index ffe5ea6cbb1..642e31c1b01 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -774,7 +774,7 @@ fn test_simple_0conf_channel() { // If our peer tells us they will accept our channel with 0 confs, and we funded the channel, // we should trust the funding won't be double-spent (assuming `trust_own_funding_0conf` is // set)! - // Further, if we `accept_inbound_channel_from_trusted_peer_0conf`, `channel_ready` messages + // Further, if we `accept_inbound_channel_from_trusted_peer`, `channel_ready` messages // should fly immediately and the channel should be available for use as soon as they are // received. @@ -818,10 +818,12 @@ fn test_0conf_channel_with_async_monitor() { Event::OpenChannelRequest { temporary_channel_id, .. } => { nodes[1] .node - .accept_inbound_channel_from_trusted_peer_0conf( + .accept_inbound_channel_from_trusted_peer( &temporary_channel_id, &node_a_id, 0, + true, + false, None, ) .unwrap(); @@ -1369,10 +1371,12 @@ fn test_zero_conf_accept_reject() { // Assert we can accept via the 0conf method assert!(nodes[1] .node - .accept_inbound_channel_from_trusted_peer_0conf( + .accept_inbound_channel_from_trusted_peer( &temporary_channel_id, &node_a_id, 0, + true, + false, None ) .is_ok()); @@ -1411,10 +1415,12 @@ fn test_connect_before_funding() { Event::OpenChannelRequest { temporary_channel_id, .. } => { nodes[1] .node - .accept_inbound_channel_from_trusted_peer_0conf( + .accept_inbound_channel_from_trusted_peer( &temporary_channel_id, &node_a_id, 0, + true, + false, None, ) .unwrap(); diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index e4158910b9a..14c507184ac 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -31,11 +31,11 @@ pub struct ChannelHandshakeConfig { /// A lower-bound of `1` is applied, requiring all channels to have a confirmed commitment /// transaction before operation. If you wish to accept channels with zero confirmations, /// manually accept them via [`Event::OpenChannelRequest`] using - /// [`ChannelManager::accept_inbound_channel_from_trusted_peer_0conf`]. + /// [`ChannelManager::accept_inbound_channel_from_trusted_peer`]. /// /// Default value: `6` /// - /// [`ChannelManager::accept_inbound_channel_from_trusted_peer_0conf`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel_from_trusted_peer_0conf + /// [`ChannelManager::accept_inbound_channel_from_trusted_peer`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel_from_trusted_peer /// [`Event::OpenChannelRequest`]: crate::events::Event::OpenChannelRequest pub minimum_depth: u32, /// Set to the number of blocks we require our counterparty to wait to claim their money (ie From da0520818548390796f7a8389601ec08dd7361f9 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Thu, 19 Feb 2026 07:32:12 +0000 Subject: [PATCH 3/9] Add `ChannelManager::create_channel_to_trusted_peer_0reserve` This new method sets 0-reserve for the channel accepter. --- lightning/src/ln/channel.rs | 50 +++++++++++++++----------- lightning/src/ln/channel_open_tests.rs | 1 + lightning/src/ln/channel_type_tests.rs | 7 ++++ lightning/src/ln/channelmanager.rs | 42 ++++++++++++++++++++-- 4 files changed, 78 insertions(+), 22 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index b5209685c03..ab4d0bb7fa9 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -4400,7 +4400,7 @@ impl ChannelContext { if channel_reserve_satoshis > funding.get_value_satoshis() { return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", channel_reserve_satoshis, funding.get_value_satoshis()))); } - if common_fields.dust_limit_satoshis > funding.holder_selected_channel_reserve_satoshis { + if common_fields.dust_limit_satoshis > funding.holder_selected_channel_reserve_satoshis && funding.holder_selected_channel_reserve_satoshis != 0 { return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", common_fields.dust_limit_satoshis, funding.holder_selected_channel_reserve_satoshis))); } if channel_reserve_satoshis > funding.get_value_satoshis() - funding.holder_selected_channel_reserve_satoshis { @@ -13285,15 +13285,20 @@ impl OutboundV1Channel { pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, - outbound_scid_alias: u64, temporary_channel_id: Option, logger: L + outbound_scid_alias: u64, temporary_channel_id: Option, logger: L, is_0reserve: bool, ) -> Result, APIError> { - let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); - if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - // Protocol level safety check in place, although it should never happen because - // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` - return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below \ - implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) }); - } + let holder_selected_channel_reserve_satoshis = if is_0reserve { + 0 + } else { + let reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); + if reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + // Protocol level safety check in place, although it should never happen because + // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` + return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below \ + implemention limit dust_limit_satoshis {}", reserve_satoshis) }); + } + reserve_satoshis + }; let channel_keys_id = signer_provider.generate_channel_keys_id(false, user_id); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); @@ -15875,6 +15880,7 @@ mod tests { 42, None, &logger, + false, ); match res { Err(APIError::IncompatibleShutdownScript { script }) => { @@ -15901,7 +15907,7 @@ mod tests { let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger, false).unwrap(); // Now change the fee so we can check that the fee in the open_channel message is the // same as the old fee. @@ -15931,7 +15937,7 @@ mod tests { let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let mut config = UserConfig::default(); config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 100_000_000, 42, &config, 0, 42, None, &logger).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 100_000_000, 42, &config, 0, 42, None, &logger, false).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. @@ -16022,7 +16028,7 @@ mod tests { let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let mut config = UserConfig::default(); config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; - let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10_000_000, 100_000_000, 42, &config, 0, 42, None, &logger).unwrap(); + let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10_000_000, 100_000_000, 42, &config, 0, 42, None, &logger, false).unwrap(); chan.context.counterparty_max_htlc_value_in_flight_msat = 1_000_000_000; let commitment_tx_fee_0_htlcs = commit_tx_fee_sat(chan.context.feerate_per_kw, 0, chan.funding.get_channel_type()) * 1000; @@ -16077,7 +16083,7 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger, false).unwrap(); // Create Node B's channel by receiving Node A's open_channel message let open_channel_msg = node_a_chan.get_open_channel(chain_hash, &&logger).unwrap(); @@ -16143,12 +16149,12 @@ mod tests { // Test that `OutboundV1Channel::new` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound + 1 (2%) of the `channel_value`. - let mut chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None, &logger).unwrap(); + let mut chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None, &logger, false).unwrap(); let chan_1_value_msat = chan_1.funding.get_value_satoshis() * 1000; assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). - let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None, &logger).unwrap(); + let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None, &logger, false).unwrap(); let chan_2_value_msat = chan_2.funding.get_value_satoshis() * 1000; assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64); @@ -16168,14 +16174,14 @@ mod tests { // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. - let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None, &logger).unwrap(); + let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None, &logger, false).unwrap(); let chan_5_value_msat = chan_5.funding.get_value_satoshis() * 1000; assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64); // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. - let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None, &logger).unwrap(); + let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None, &logger, false).unwrap(); let chan_6_value_msat = chan_6.funding.get_value_satoshis() * 1000; assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat); @@ -16231,7 +16237,7 @@ mod tests { let mut outbound_node_config = UserConfig::default(); outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32; - let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None, &logger).unwrap(); + let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None, &logger, false).unwrap(); let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.funding.get_value_satoshis() as f64 * outbound_selected_channel_reserve_perc) as u64); assert_eq!(chan.funding.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve); @@ -16270,7 +16276,7 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger, false).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. @@ -16362,6 +16368,7 @@ mod tests { 42, None, &logger, + false, ) .unwrap(); let open_channel_msg = &outbound_chan @@ -16719,6 +16726,7 @@ mod tests { 42, None, &*logger, + false, ) .unwrap(); // Nothing uses their network key in this test chan.context.holder_dust_limit_satoshis = 546; @@ -17443,6 +17451,7 @@ mod tests { 0, None, &*logger, + false, ) .unwrap(); @@ -18018,7 +18027,8 @@ mod tests { 0, 42, None, - &logger + &logger, + false, ).unwrap(); let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network), &&logger).unwrap(); diff --git a/lightning/src/ln/channel_open_tests.rs b/lightning/src/ln/channel_open_tests.rs index d1354ca35d4..87d3186c6ef 100644 --- a/lightning/src/ln/channel_open_tests.rs +++ b/lightning/src/ln/channel_open_tests.rs @@ -940,6 +940,7 @@ pub fn test_user_configurable_csv_delay() { 42, None, &logger, + false, ) { match error { APIError::APIMisuseError { err } => { diff --git a/lightning/src/ln/channel_type_tests.rs b/lightning/src/ln/channel_type_tests.rs index 5315250441a..de2f339adc5 100644 --- a/lightning/src/ln/channel_type_tests.rs +++ b/lightning/src/ln/channel_type_tests.rs @@ -144,6 +144,7 @@ fn test_zero_conf_channel_type_support() { 42, None, &logger, + false, ) .unwrap(); @@ -245,6 +246,7 @@ fn do_test_supports_channel_type(config: UserConfig, expected_channel_type: Chan 42, None, &logger, + false, ) .unwrap(); assert_eq!( @@ -266,6 +268,7 @@ fn do_test_supports_channel_type(config: UserConfig, expected_channel_type: Chan 42, None, &logger, + false, ) .unwrap(); @@ -332,6 +335,7 @@ fn test_rejects_if_channel_type_not_set() { 42, None, &logger, + false, ) .unwrap(); @@ -420,6 +424,7 @@ fn test_rejects_if_channel_type_differ() { 42, None, &logger, + false, ) .unwrap(); @@ -504,6 +509,7 @@ fn test_rejects_simple_anchors_channel_type() { 42, None, &logger, + false, ) .unwrap(); @@ -546,6 +552,7 @@ fn test_rejects_simple_anchors_channel_type() { 42, None, &logger, + false, ) .unwrap(); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f57d860f274..20432f7854d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3670,8 +3670,46 @@ impl< /// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id /// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id /// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id + pub fn create_channel( + &self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, + user_channel_id: u128, temporary_channel_id: Option, + override_config: Option, + ) -> Result { + self.create_channel_internal( + their_network_key, + channel_value_satoshis, + push_msat, + user_channel_id, + temporary_channel_id, + override_config, + false, + ) + } + + /// Creates a new outbound channel to the given remote node and with the given value. + /// + /// The only difference between this method and [`ChannelManager::create_channel`] is that this method sets + /// the reserve the counterparty must keep at all times in the channel to zero. This allows the counterparty to + /// spend their entire channel balance, and attempt to force-close the channel with a revoked commitment + /// transaction **for free**. + pub fn create_channel_to_trusted_peer_0reserve( + &self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, + user_channel_id: u128, temporary_channel_id: Option, + override_config: Option, + ) -> Result { + self.create_channel_internal( + their_network_key, + channel_value_satoshis, + push_msat, + user_channel_id, + temporary_channel_id, + override_config, + true, + ) + } + #[rustfmt::skip] - pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, temporary_channel_id: Option, override_config: Option) -> Result { + fn create_channel_internal(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, temporary_channel_id: Option, override_config: Option, is_0reserve: bool) -> Result { if channel_value_satoshis < 1000 { return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) }); } @@ -3707,7 +3745,7 @@ impl< }; match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config, - self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &self.logger) + self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &self.logger, is_0reserve) { Ok(res) => res, Err(e) => { From 1905f94e72277ab6971e7648291c182107aa273a Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Sun, 8 Feb 2026 01:24:17 +0000 Subject: [PATCH 4/9] Shakedown zero reserve channels --- lightning/src/ln/functional_test_utils.rs | 123 +++ lightning/src/ln/htlc_reserve_unit_tests.rs | 822 +++++++++++++++++++- 2 files changed, 941 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 9d1d1086180..1a8e92d24ef 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2137,6 +2137,129 @@ pub fn update_nodes_with_chan_announce<'a, 'b, 'c, 'd>( } } +pub fn handle_and_accept_open_zero_reserve_channel( + node: &Node, counterparty_id: PublicKey, msg: &OpenChannel, +) { + node.node.handle_open_channel(counterparty_id, &msg); + let events = node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => { + node.node + .accept_inbound_channel_from_trusted_peer( + temporary_channel_id, + counterparty_node_id, + 42, + false, + true, + None, + ) + .unwrap(); + }, + _ => panic!("Unexpected event"), + }; +} + +pub fn exchange_open_accept_zero_reserve_chan<'a, 'b, 'c, 'd>( + node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64, +) -> ChannelId { + let node_a_id = node_a.node.get_our_node_id(); + let node_b_id = node_b.node.get_our_node_id(); + + let create_chan_id = + node_a.node.create_channel(node_b_id, channel_value, push_msat, 42, None, None).unwrap(); + let open_channel_msg = get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b_id); + assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id); + assert_eq!( + node_a + .node + .list_channels() + .iter() + .find(|channel| channel.channel_id == create_chan_id) + .unwrap() + .user_channel_id, + 42 + ); + handle_and_accept_open_zero_reserve_channel(&node_b, node_a_id, &open_channel_msg); + + let accept_channel_msg = get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a_id); + assert_eq!(accept_channel_msg.common_fields.temporary_channel_id, create_chan_id); + node_a.node.handle_accept_channel(node_b_id, &accept_channel_msg); + assert_ne!( + node_b + .node + .list_channels() + .iter() + .find(|channel| channel.channel_id == create_chan_id) + .unwrap() + .user_channel_id, + 0 + ); + + create_chan_id +} + +pub fn create_zero_reserve_chan_between_nodes_with_value_init<'a, 'b, 'c>( + node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64, +) -> Transaction { + let create_chan_id = + exchange_open_accept_zero_reserve_chan(node_a, node_b, channel_value, push_msat); + sign_funding_transaction(node_a, node_b, channel_value, create_chan_id) +} + +pub fn create_zero_reserve_chan_between_nodes_with_value_a<'a, 'b, 'c: 'd, 'd>( + node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64, +) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), ChannelId, Transaction) { + let tx = create_zero_reserve_chan_between_nodes_with_value_init( + node_a, + node_b, + channel_value, + push_msat, + ); + let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx); + (msgs, chan_id, tx) +} + +pub fn create_zero_reserve_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>( + node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64, +) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) { + let (channel_ready, channel_id, tx) = create_zero_reserve_chan_between_nodes_with_value_a( + node_a, + node_b, + channel_value, + push_msat, + ); + let (announcement, as_update, bs_update) = + create_chan_between_nodes_with_value_b(node_a, node_b, &channel_ready); + (announcement, as_update, bs_update, channel_id, tx) +} + +pub fn create_announced_zero_reserve_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>( + nodes: &'a Vec>, a: usize, b: usize, channel_value: u64, push_msat: u64, +) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) { + let chan_announcement = create_zero_reserve_chan_between_nodes_with_value( + &nodes[a], + &nodes[b], + channel_value, + push_msat, + ); + update_nodes_with_chan_announce( + nodes, + a, + b, + &chan_announcement.0, + &chan_announcement.1, + &chan_announcement.2, + ); + (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4) +} + +pub fn create_announced_zero_reserve_chan_between_nodes<'a, 'b, 'c: 'd, 'd>( + nodes: &'a Vec>, a: usize, b: usize, +) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) { + create_announced_zero_reserve_chan_between_nodes_with_value(nodes, a, b, 100000, 10001) +} + pub fn do_check_spends Option>( tx: &Transaction, get_output: F, ) { diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index d88b9a2dc3f..b24049f9669 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -2,12 +2,13 @@ use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentPurpose}; use crate::ln::chan_utils::{ - self, commitment_tx_base_weight, second_stage_tx_fees_sat, CommitmentTransaction, - COMMITMENT_TX_WEIGHT_PER_HTLC, + self, commit_tx_fee_sat, commitment_tx_base_weight, second_stage_tx_fees_sat, + CommitmentTransaction, COMMITMENT_TX_WEIGHT_PER_HTLC, }; use crate::ln::channel::{ - get_holder_selected_channel_reserve_satoshis, Channel, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, - MIN_AFFORDABLE_HTLC_COUNT, MIN_CHAN_DUST_LIMIT_SATOSHIS, + get_holder_selected_channel_reserve_satoshis, Channel, ANCHOR_OUTPUT_VALUE_SATOSHI, + FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, + MIN_CHAN_DUST_LIMIT_SATOSHIS, }; use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder}; use crate::ln::functional_test_utils::*; @@ -2439,3 +2440,816 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { check_added_monitors(&nodes[1], 3); } } + +#[test] +fn test_create_channel_to_trusted_peer_0reserve() { + let mut config = test_default_channel_config(); + + // Legacy channels + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; + let channel_type = do_test_create_channel_to_trusted_peer_0reserve(config.clone()); + assert_eq!(channel_type, ChannelTypeFeatures::only_static_remote_key()); + + // Anchor channels + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; + let channel_type = do_test_create_channel_to_trusted_peer_0reserve(config.clone()); + assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies()); + + // 0FC channels + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; + let channel_type = do_test_create_channel_to_trusted_peer_0reserve(config.clone()); + assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_fee_commitments()); +} + +#[cfg(test)] +fn do_test_create_channel_to_trusted_peer_0reserve(mut config: UserConfig) -> ChannelTypeFeatures { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let channel_value_sat = 100_000; + + let temp_channel_id = nodes[0] + .node + .create_channel_to_trusted_peer_0reserve(node_b_id, channel_value_sat, 0, 42, None, None) + .unwrap(); + let mut open_channel_message = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_message); + let mut accept_channel_message = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); + let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id); + let funding_msgs = + create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx); + create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0); + + let details = &nodes[0].node.list_channels()[0]; + let reserve_sat = details.unspendable_punishment_reserve.unwrap(); + assert_ne!(reserve_sat, 0); + let channel_type = details.channel_type.clone().unwrap(); + let feerate_per_kw = details.feerate_sat_per_1000_weight.unwrap(); + let anchors_sat = + if channel_type == ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() { + 2 * 330 + } else { + 0 + }; + let spike_multiple = if channel_type == ChannelTypeFeatures::only_static_remote_key() { + FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32 + } else { + 1 + }; + let spiked_feerate = spike_multiple * feerate_per_kw; + let reserved_commit_tx_fee_sat = chan_utils::commit_tx_fee_sat( + spiked_feerate, + 2, // We reserve space for two HTLCs, the next outbound non-dust HTLC, and the fee spike buffer HTLC + &channel_type, + ); + + let max_outbound_htlc_sat = + channel_value_sat - anchors_sat - reserved_commit_tx_fee_sat - reserve_sat; + assert_eq!(details.next_outbound_htlc_limit_msat, max_outbound_htlc_sat * 1000); + send_payment(&nodes[0], &[&nodes[1]], max_outbound_htlc_sat * 1000); + + let details = &nodes[1].node.list_channels()[0]; + assert_eq!(details.unspendable_punishment_reserve.unwrap(), 0); + assert_eq!(details.next_outbound_htlc_limit_msat, max_outbound_htlc_sat * 1000); + send_payment(&nodes[1], &[&nodes[0]], max_outbound_htlc_sat * 1000); + + channel_type +} + +#[test] +fn test_accept_inbound_channel_from_trusted_peer_0reserve() { + let mut config = test_default_channel_config(); + + // Legacy channels + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; + let channel_type = do_test_accept_inbound_channel_from_trusted_peer_0reserve(config.clone()); + assert_eq!(channel_type, ChannelTypeFeatures::only_static_remote_key()); + + // Anchor channels + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; + let channel_type = do_test_accept_inbound_channel_from_trusted_peer_0reserve(config.clone()); + assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies()); + + // 0FC channels + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; + let channel_type = do_test_accept_inbound_channel_from_trusted_peer_0reserve(config.clone()); + assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_fee_commitments()); +} + +#[cfg(test)] +fn do_test_accept_inbound_channel_from_trusted_peer_0reserve( + mut config: UserConfig, +) -> ChannelTypeFeatures { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let channel_value_sat = 100_000; + let (_, _, _chan_id, _) = create_announced_zero_reserve_chan_between_nodes_with_value( + &nodes, + 0, + 1, + channel_value_sat, + 0, + ); + let details = &nodes[0].node.list_channels()[0]; + assert_eq!(details.unspendable_punishment_reserve.unwrap(), 0); + let channel_type = details.channel_type.clone().unwrap(); + let feerate_per_kw = details.feerate_sat_per_1000_weight.unwrap(); + let anchors_sat = + if channel_type == ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() { + 2 * 330 + } else { + 0 + }; + let spike_multiple = if channel_type == ChannelTypeFeatures::only_static_remote_key() { + FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32 + } else { + 1 + }; + let spiked_feerate = spike_multiple * feerate_per_kw; + let reserved_commit_tx_fee_sat = chan_utils::commit_tx_fee_sat( + spiked_feerate, + 2, // We reserve space for two HTLCs, the next outbound non-dust HTLC, and the fee spike buffer HTLC + &channel_type, + ); + + let max_outbound_htlc_sat = channel_value_sat - reserved_commit_tx_fee_sat - anchors_sat; + assert_eq!(details.next_outbound_htlc_limit_msat, max_outbound_htlc_sat * 1000); + send_payment(&nodes[0], &[&nodes[1]], max_outbound_htlc_sat * 1000); + + let details = &nodes[1].node.list_channels()[0]; + let reserve_sat = details.unspendable_punishment_reserve.unwrap(); + assert_ne!(reserve_sat, 0); + let max_outbound_htlc_sat = max_outbound_htlc_sat - reserve_sat; + assert_eq!(details.next_outbound_htlc_limit_msat, max_outbound_htlc_sat * 1000); + send_payment(&nodes[1], &[&nodes[0]], max_outbound_htlc_sat * 1000); + + channel_type +} + +enum NoOutputs { + PaymentSucceeds, + FailsReceiverUpdateAddHTLC, + ReceiverCanAcceptHTLCA, + ReceiverCanAcceptHTLCB, +} + +#[test] +fn test_zero_reserve_no_outputs() { + let mut config = test_default_channel_config(); + + // Legacy channels + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; + + let channel_type = do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::PaymentSucceeds); + assert_eq!(channel_type, ChannelTypeFeatures::only_static_remote_key()); + let channel_type = + do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::ReceiverCanAcceptHTLCA); + assert_eq!(channel_type, ChannelTypeFeatures::only_static_remote_key()); + let channel_type = + do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::ReceiverCanAcceptHTLCB); + assert_eq!(channel_type, ChannelTypeFeatures::only_static_remote_key()); + let channel_type = + do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::FailsReceiverUpdateAddHTLC); + assert_eq!(channel_type, ChannelTypeFeatures::only_static_remote_key()); + + // Anchor channels + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; + + let channel_type = do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::PaymentSucceeds); + assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies()); + + let channel_type = + do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::ReceiverCanAcceptHTLCA); + assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies()); + + let channel_type = + do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::FailsReceiverUpdateAddHTLC); + assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies()); + + // 0FC channels + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; + + let channel_type = do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::PaymentSucceeds); + assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_fee_commitments()); + + let channel_type = + do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::ReceiverCanAcceptHTLCA); + assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_fee_commitments()); + + let channel_type = + do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::FailsReceiverUpdateAddHTLC); + assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_fee_commitments()); +} + +fn do_test_zero_reserve_no_outputs( + mut config: UserConfig, no_outputs_case: NoOutputs, +) -> ChannelTypeFeatures { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + + let channel_type = if config.channel_handshake_config.negotiate_anchor_zero_fee_commitments { + ChannelTypeFeatures::anchors_zero_fee_commitments() + } else if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx { + ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() + } else { + ChannelTypeFeatures::only_static_remote_key() + }; + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let feerate_per_kw = 253; + let spike_multiple = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; + let anchors_sat = 2 * ANCHOR_OUTPUT_VALUE_SATOSHI; + let dust_limit_satoshis: u64 = 546; + let channel_value_sat = if channel_type == ChannelTypeFeatures::only_static_remote_key() { + // This is the fundee 1000sat reserve + 2 min HTLCs + 1002 + } else if channel_type == ChannelTypeFeatures::anchors_zero_fee_commitments() { + // This is the fundee 1000sat reserve + 2 min HTLCs + 1002 + } else if channel_type == ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() { + // min opener balance is the fee for 4 HTLCs, the anchors, and the dust limit + let min_channel_size = + commit_tx_fee_sat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) + + anchors_sat + dust_limit_satoshis; + assert!(min_channel_size > 1002); + min_channel_size + } else { + panic!("Unexpected channel type"); + }; + + // Create a channel with an identical, high dust limit and zero-reserve on both sides to make our lives easier + + nodes[0] + .node + .create_channel_to_trusted_peer_0reserve(node_b_id, channel_value_sat, 0, 42, None, None) + .unwrap(); + + let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + open_channel.common_fields.dust_limit_satoshis = dust_limit_satoshis; + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id: chan_id, .. } => { + nodes[1] + .node + .accept_inbound_channel_from_trusted_peer( + &chan_id, &node_a_id, 0, false, true, None, + ) + .unwrap(); + }, + _ => panic!("Unexpected event"), + }; + + let mut accept_channel_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + accept_channel_msg.common_fields.dust_limit_satoshis = dust_limit_satoshis; + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_msg); + + let (chan_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, channel_value_sat, 42); + + nodes[0].node.funding_transaction_generated(chan_id, node_b_id, tx.clone()).unwrap(); + nodes[1].node.handle_funding_created( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id), + ); + check_added_monitors(&nodes[1], 1); + expect_channel_pending_event(&nodes[1], &node_a_id); + + nodes[0].node.handle_funding_signed( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id), + ); + check_added_monitors(&nodes[0], 1); + expect_channel_pending_event(&nodes[0], &node_b_id); + + let (channel_ready, channel_id) = + create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); + let (announcement, as_update, bs_update) = + create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); + update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); + + { + let mut per_peer_lock; + let mut peer_state_lock; + let channel = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, channel_id); + if let Some(mut chan) = channel.as_funded_mut() { + chan.context.holder_dust_limit_satoshis = dust_limit_satoshis; + } else { + panic!("Unexpected Channel phase"); + } + } + + { + let mut per_peer_lock; + let mut peer_state_lock; + let channel = + get_channel_ref!(nodes[1], nodes[0], per_peer_lock, peer_state_lock, channel_id); + if let Some(mut chan) = channel.as_funded_mut() { + chan.context.holder_dust_limit_satoshis = dust_limit_satoshis; + } else { + panic!("Unexpected Channel phase"); + } + } + + let (sender_amount_msat, receiver_amount_msat) = if channel_type + == ChannelTypeFeatures::only_static_remote_key() + { + // We can't afford the fee for an additional non-dust HTLC + the fee spike HTLC, so we can only send + // dust HTLCs... + // We don't bother to add the second stage tx fees, these would only make this min bigger + let min_nondust_htlc_sat = dust_limit_satoshis; + assert!( + channel_value_sat + - commit_tx_fee_sat(spike_multiple * feerate_per_kw, 2, &channel_type) + < min_nondust_htlc_sat + ); + // But sending a big (not biggest) dust HTLC trims our balance output! + let max_dust_htlc = dust_limit_satoshis - 1; + assert!( + channel_value_sat - commit_tx_fee_sat(feerate_per_kw, 0, &channel_type) - max_dust_htlc + < dust_limit_satoshis + ); + // We cannot trim our own balance output, otherwise we'd have no outputs on the commitment. We must + // also reserve enough fees to pay for an incoming non-dust HTLC, aka the fee spike buffer HTLC. + let min_value_sat = core::cmp::max( + commit_tx_fee_sat(spike_multiple * feerate_per_kw, 0, &channel_type) + + dust_limit_satoshis, + commit_tx_fee_sat(spike_multiple * feerate_per_kw, 1, &channel_type), + ); + // At this point the tighter requirement is "must have an output" + assert!( + commit_tx_fee_sat(spike_multiple * feerate_per_kw, 0, &channel_type) + + dust_limit_satoshis + > commit_tx_fee_sat(spike_multiple * feerate_per_kw, 1, &channel_type) + ); + // But say at 9sat/vb with default dust limit, + // the tighter requirement is actually "must have funds for an inbound HTLC" ! + assert!( + commit_tx_fee_sat(9 * 250, 0, &channel_type) + 354 + < commit_tx_fee_sat(9 * 250, 1, &channel_type) + ); + let sender_amount_msat = (channel_value_sat - min_value_sat) * 1000; + let details_0 = &nodes[0].node.list_channels()[0]; + assert_eq!(details_0.next_outbound_htlc_minimum_msat, 1000); + assert_eq!(details_0.next_outbound_htlc_limit_msat, sender_amount_msat); + assert!( + details_0.next_outbound_htlc_limit_msat > details_0.next_outbound_htlc_minimum_msat + ); + + match no_outputs_case { + NoOutputs::PaymentSucceeds => (sender_amount_msat, sender_amount_msat), + NoOutputs::ReceiverCanAcceptHTLCA => { + // A dust HTLC with 1msat added to it will break counterparty `can_accept_incoming_htlc` + // validation, as this dust HTLC would push the holder's balance output below the + // dust limit at the spike multiple feerate. + (sender_amount_msat, sender_amount_msat + 1) + }, + NoOutputs::ReceiverCanAcceptHTLCB => { + // In `validate_update_add_htlc`, we check that there is still some output present on + // the commitment given the *current* set of HTLCs, and the *current* feerate. So this + // HTLC will pass at `validate_update_add_htlc`, but will fail in + // `can_accept_incoming_htlc` due to failed fee spike buffer checks. + let receiver_amount_msat = (channel_value_sat + - commit_tx_fee_sat(feerate_per_kw, 0, &channel_type) + - dust_limit_satoshis) + * 1000; + (sender_amount_msat, receiver_amount_msat) + }, + NoOutputs::FailsReceiverUpdateAddHTLC => { + // Same value as above, just add 1msat, and this fails at `validate_update_add_htlc` + let receiver_amount_msat = (channel_value_sat + - commit_tx_fee_sat(feerate_per_kw, 0, &channel_type) + - dust_limit_satoshis) + * 1000; + (sender_amount_msat, receiver_amount_msat + 1) + }, + } + } else if channel_type == ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() { + // We can afford the fee for an additional non-dust HTLC plus the fee spike HTLC, so we can send + // non-dust HTLCs + assert!( + channel_value_sat - anchors_sat - commit_tx_fee_sat(feerate_per_kw, 2, &channel_type) + > dust_limit_satoshis + ); + // But sending the biggest dust HTLC possible trims our balance output! + let max_dust_htlc = dust_limit_satoshis - 1; + assert!( + channel_value_sat + - anchors_sat - commit_tx_fee_sat(feerate_per_kw, 0, &channel_type) + - max_dust_htlc < dust_limit_satoshis + ); + // So we can *only* send non-dust HTLCs + let details_0 = &nodes[0].node.list_channels()[0]; + assert_eq!(details_0.next_outbound_htlc_minimum_msat, dust_limit_satoshis * 1000); + assert_eq!( + details_0.next_outbound_htlc_limit_msat, + (channel_value_sat - anchors_sat - commit_tx_fee_sat(feerate_per_kw, 2, &channel_type)) + * 1000 + ); + + // Send the smallest non-dust HTLC possible, this will pass both holder and counterparty validation + // + // One msat below the non-dust HTLC value will break counterparty validation at + // `validate_update_add_htlc`. This is why we don't bother taking a look at the range between the + // failure of `can_accept_incoming_htlc` and the failure of `validate_update_add_htlc`. + let sender_amount_msat = dust_limit_satoshis * 1000; + + match no_outputs_case { + NoOutputs::PaymentSucceeds => (sender_amount_msat, sender_amount_msat), + NoOutputs::ReceiverCanAcceptHTLCA => (sender_amount_msat, sender_amount_msat), + NoOutputs::ReceiverCanAcceptHTLCB => panic!("This case is not run"), + NoOutputs::FailsReceiverUpdateAddHTLC => (sender_amount_msat, sender_amount_msat - 1), + } + } else if channel_type == ChannelTypeFeatures::anchors_zero_fee_commitments() { + // We can afford to send a non-dust HTLC + assert!(channel_value_sat > dust_limit_satoshis); + // But sending the biggest dust HTLC possible trims our balance output! + let max_dust_htlc = dust_limit_satoshis - 1; + assert!(channel_value_sat - max_dust_htlc < dust_limit_satoshis); + // So we can *only* send non-dust HTLCs + let details_0 = &nodes[0].node.list_channels()[0]; + assert_eq!(details_0.next_outbound_htlc_minimum_msat, dust_limit_satoshis * 1000); + // 0FC + 0-reserve baby! + assert_eq!(details_0.next_outbound_htlc_limit_msat, channel_value_sat * 1000); + + // Send the smallest non-dust HTLC possible, this will pass both holder and counterparty validation + // + // One msat below the non-dust HTLC value will break counterparty validation at + // `validate_update_add_htlc`. + let sender_amount_msat = dust_limit_satoshis * 1000; + + match no_outputs_case { + NoOutputs::PaymentSucceeds => (sender_amount_msat, sender_amount_msat), + NoOutputs::ReceiverCanAcceptHTLCA => (sender_amount_msat, sender_amount_msat), + NoOutputs::ReceiverCanAcceptHTLCB => panic!("This case is not run"), + NoOutputs::FailsReceiverUpdateAddHTLC => (sender_amount_msat, sender_amount_msat - 1), + } + } else { + panic!("Unexpected channel type"); + }; + + if let NoOutputs::PaymentSucceeds = no_outputs_case { + send_payment(&nodes[0], &[&nodes[1]], sender_amount_msat); + // Node 1 the fundee has 0-reserve too, so whatever they receive, they can send right back! + // Node 0 should *always* have the funds to cover the fee of a single non-dust HTLC from node 1. + assert_eq!( + nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat, + sender_amount_msat + ); + send_payment(&nodes[1], &[&nodes[0]], sender_amount_msat); + } else { + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], sender_amount_msat); + let secp_ctx = Secp256k1::new(); + let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); + let cur_height = nodes[0].node.best_block.read().unwrap().height + 1; + let onion_keys = + onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); + let recipient_onion_fields = + RecipientOnionFields::secret_only(payment_secret, sender_amount_msat); + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::test_build_onion_payloads( + &route.paths[0], + &recipient_onion_fields, + cur_height, + &None, + None, + None, + ) + .unwrap(); + assert_eq!(htlc_msat, sender_amount_msat); + let onion_packet = + onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash) + .unwrap(); + let msg = msgs::UpdateAddHTLC { + channel_id, + htlc_id: 0, + amount_msat: receiver_amount_msat, + payment_hash, + cltv_expiry: htlc_cltv, + onion_routing_packet: onion_packet, + skimmed_fee_msat: None, + blinding_point: None, + hold_htlc: None, + accountable: None, + }; + + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); + + if let NoOutputs::FailsReceiverUpdateAddHTLC = no_outputs_case { + nodes[1].logger.assert_log_contains( + "lightning::ln::channelmanager", + "Remote HTLC add would overdraw remaining funds", + 3, + ); + assert_eq!(nodes[1].node.list_channels().len(), 0); + let err_msg = check_closed_broadcast(&nodes[1], 1, true).pop().unwrap(); + assert_eq!(err_msg.data, "Remote HTLC add would overdraw remaining funds"); + let reason = ClosureReason::ProcessingError { + err: "Remote HTLC add would overdraw remaining funds".to_string(), + }; + check_added_monitors(&nodes[1], 1); + check_closed_event(&nodes[1], 1, reason, &[node_a_id], channel_value_sat); + + return channel_type; + } + + // Now manually create the commitment_signed message corresponding to the update_add + // nodes[0] just sent. In the code for construction of this message, "local" refers + // to the sender of the message, and "remote" refers to the receiver. + + let feerate_per_kw = get_feerate!(nodes[0], nodes[1], channel_id); + + const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; + + let (local_secret, next_local_point) = { + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); + let local_chan = + chan_lock.channel_by_id.get(&channel_id).and_then(Channel::as_funded).unwrap(); + let chan_signer = local_chan.get_signer(); + // Make the signer believe we validated another commitment, so we can release the secret + chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; + + ( + chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(), + chan_signer + .as_ref() + .get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx) + .unwrap(), + ) + }; + let remote_point = { + let per_peer_lock; + let mut peer_state_lock; + + let channel = + get_channel_ref!(nodes[1], nodes[0], per_peer_lock, peer_state_lock, channel_id); + let chan_signer = channel.as_funded().unwrap().get_signer(); + chan_signer + .as_ref() + .get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx) + .unwrap() + }; + + // Build the remote commitment transaction so we can sign it, and then later use the + // signature for the commitment_signed message. + let accepted_htlc_info = chan_utils::HTLCOutputInCommitment { + offered: false, + amount_msat: receiver_amount_msat, + cltv_expiry: htlc_cltv, + payment_hash, + transaction_output_index: Some(1), + }; + + let local_chan_balance_msat = channel_value_sat * 1000; + let commitment_number = INITIAL_COMMITMENT_NUMBER - 1; + + let res = { + let per_peer_lock; + let mut peer_state_lock; + + let channel = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, channel_id); + let chan_signer = channel.as_funded().unwrap().get_signer(); + + let (commitment_tx, _stats) = SpecTxBuilder {}.build_commitment_transaction( + false, + commitment_number, + &remote_point, + &channel.funding().channel_transaction_parameters, + &secp_ctx, + local_chan_balance_msat, + vec![accepted_htlc_info], + feerate_per_kw, + dust_limit_satoshis, + &nodes[0].logger, + ); + let params = &channel.funding().channel_transaction_parameters; + chan_signer + .as_ecdsa() + .unwrap() + .sign_counterparty_commitment( + params, + &commitment_tx, + Vec::new(), + Vec::new(), + &secp_ctx, + ) + .unwrap() + }; + + let commit_signed_msg = msgs::CommitmentSigned { + channel_id, + signature: res.0, + htlc_signatures: res.1, + funding_txid: None, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }; + + // Send the commitment_signed message to the nodes[1]. + nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); + let _ = nodes[1].node.get_and_clear_pending_msg_events(); + + // Send the RAA to nodes[1]. + let raa_msg = msgs::RevokeAndACK { + channel_id, + per_commitment_secret: local_secret, + next_per_commitment_point: next_local_point, + #[cfg(taproot)] + next_local_nonce: None, + release_htlc_message_paths: Vec::new(), + }; + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); + expect_and_process_pending_htlcs(&nodes[1], false); + + if channel_type == ChannelTypeFeatures::only_static_remote_key() { + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }] + ); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + + // Make sure the HTLC failed in the way we expect. + match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, + .. + } => { + assert_eq!(update_fail_htlcs.len(), 1); + update_fail_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + }; + nodes[1].logger.assert_log( + "lightning::ln::channel", + "Attempting to fail HTLC due to balance exhausted on remote commitment".to_string(), + 1, + ); + + check_added_monitors(&nodes[1], 3); + } else { + expect_payment_claimable!(nodes[1], payment_hash, payment_secret, receiver_amount_msat); + check_added_monitors(&nodes[1], 2); + } + } + + channel_type +} + +#[test] +fn test_zero_reserve_zero_conf_combined() { + // Test that zero-reserve and zero-conf features work together: a channel that + // is immediately usable (no confirmations needed) and has zero reserve for the opener. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut config = test_default_channel_config(); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let channel_value_sat = 100_000; + + // Node 0 creates a channel to node 1. + nodes[0].node.create_channel(node_b_id, channel_value_sat, 0, 42, None, None).unwrap(); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + + // Node 1 accepts with both zero-conf AND zero-reserve. + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id: chan_id, .. } => { + nodes[1] + .node + .accept_inbound_channel_from_trusted_peer(&chan_id, &node_a_id, 0, true, true, None) + .unwrap(); + }, + _ => panic!("Unexpected event"), + }; + + // Verify zero-conf: minimum_depth should be 0. + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + assert_eq!(accept_channel.common_fields.minimum_depth, 0); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); + + // Create the funding transaction (no block confirmations needed for zero-conf). + let (temporary_channel_id, tx, _) = + create_funding_transaction(&nodes[0], &node_b_id, channel_value_sat, 42); + nodes[0] + .node + .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) + .unwrap(); + let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + + // Node 1 handles funding_created and immediately sends both FundingSigned and ChannelReady. + nodes[1].node.handle_funding_created(node_a_id, &funding_created); + check_added_monitors(&nodes[1], 1); + let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(bs_signed_locked.len(), 2); + + let as_channel_ready; + match &bs_signed_locked[0] { + MessageSendEvent::SendFundingSigned { node_id, msg } => { + assert_eq!(*node_id, node_a_id); + nodes[0].node.handle_funding_signed(node_b_id, &msg); + expect_channel_pending_event(&nodes[0], &node_b_id); + expect_channel_pending_event(&nodes[1], &node_a_id); + check_added_monitors(&nodes[0], 1); + + assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); + nodes[0].tx_broadcaster.clear(); + + as_channel_ready = + get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id); + }, + _ => panic!("Unexpected event"), + } + match &bs_signed_locked[1] { + MessageSendEvent::SendChannelReady { node_id, msg } => { + assert_eq!(*node_id, node_a_id); + nodes[0].node.handle_channel_ready(node_b_id, &msg); + expect_channel_ready_event(&nodes[0], &node_b_id); + }, + _ => panic!("Unexpected event"), + } + + nodes[1].node.handle_channel_ready(node_a_id, &as_channel_ready); + expect_channel_ready_event(&nodes[1], &node_a_id); + + let as_channel_update = + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); + let bs_channel_update = + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); + nodes[0].node.handle_channel_update(node_b_id, &bs_channel_update); + nodes[1].node.handle_channel_update(node_a_id, &as_channel_update); + + // Channel should be immediately usable without any block confirmations. + assert_eq!(nodes[0].node.list_usable_channels().len(), 1); + assert_eq!(nodes[1].node.list_usable_channels().len(), 1); + + // Verify zero-reserve: opener (node 0) should have 0 reserve. + let details_a = &nodes[0].node.list_channels()[0]; + let node_0_reserve = details_a.unspendable_punishment_reserve.unwrap(); + let node_0_max_htlc = details_a.next_outbound_htlc_limit_msat; + let channel_type = details_a.channel_type.clone().unwrap(); + assert_eq!(node_0_reserve, 0); + assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies()); + assert!(details_a.is_usable); + assert_eq!(details_a.confirmations.unwrap(), 0); + assert_eq!( + node_0_max_htlc, + (channel_value_sat - commit_tx_fee_sat(253, 2, &channel_type) - 2 * 330) * 1000 + ); + + // Verify acceptor (node 1) has a non-zero reserve. + let details_b = &nodes[1].node.list_channels()[0]; + assert_ne!(details_b.unspendable_punishment_reserve.unwrap(), 0); + assert!(details_b.is_usable); + + // Send payments in both directions to verify the combined feature works end-to-end. + send_payment(&nodes[0], &[&nodes[1]], node_0_max_htlc); + + let details_b = &nodes[1].node.list_channels()[0]; + let node_1_reserve = details_b.unspendable_punishment_reserve.unwrap(); + let node_1_max_htlc = details_b.next_outbound_htlc_limit_msat; + assert_eq!(node_1_reserve, 1000); + assert_eq!(node_1_max_htlc, node_0_max_htlc - node_1_reserve * 1000); + send_payment(&nodes[1], &[&nodes[0]], node_1_max_htlc); +} From 4e6a7527ab5963376d214c7cbcfc913eb01821b4 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Thu, 26 Feb 2026 03:01:46 +0000 Subject: [PATCH 5/9] Format `ChannelManager::create_channel_internal` and... `ChannelContext::do_accept_channel_checks`, `ChannelContext::new_for_outbound_channel`, `ChannelContext::new_for_inbound_channel`, `InboundV1Channel::new`, `OutboundV1Channel::new`. --- lightning/src/ln/channel.rs | 731 ++++++++++++++++++++--------- lightning/src/ln/channelmanager.rs | 68 ++- 2 files changed, 559 insertions(+), 240 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index ab4d0bb7fa9..a55b59e9afd 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3578,154 +3578,258 @@ impl InitialRemoteCommitmentReceiver for FundedChannel ChannelContext { - #[rustfmt::skip] fn new_for_inbound_channel<'a, ES: EntropySource, F: FeeEstimator, L: Logger>( - fee_estimator: &'a LowerBoundedFeeEstimator, - entropy_source: &'a ES, - signer_provider: &'a SP, - counterparty_node_id: PublicKey, - their_features: &'a InitFeatures, - user_id: u128, - config: &'a UserConfig, - current_chain_height: u32, - logger: &'a L, - is_0conf: bool, - our_funding_satoshis: u64, - counterparty_pubkeys: ChannelPublicKeys, - channel_type: ChannelTypeFeatures, - holder_selected_channel_reserve_satoshis: u64, - msg_channel_reserve_satoshis: u64, - msg_push_msat: u64, + fee_estimator: &'a LowerBoundedFeeEstimator, entropy_source: &'a ES, + signer_provider: &'a SP, counterparty_node_id: PublicKey, their_features: &'a InitFeatures, + user_id: u128, config: &'a UserConfig, current_chain_height: u32, logger: &'a L, + is_0conf: bool, our_funding_satoshis: u64, counterparty_pubkeys: ChannelPublicKeys, + channel_type: ChannelTypeFeatures, holder_selected_channel_reserve_satoshis: u64, + msg_channel_reserve_satoshis: u64, msg_push_msat: u64, open_channel_fields: msgs::CommonOpenChannelFields, ) -> Result<(FundingScope, ChannelContext), ChannelError> { - let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id), None); - let announce_for_forwarding = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false }; + let logger = WithContext::from( + logger, + Some(counterparty_node_id), + Some(open_channel_fields.temporary_channel_id), + None, + ); + let announce_for_forwarding = + if (open_channel_fields.channel_flags & 1) == 1 { true } else { false }; - let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis); + let channel_value_satoshis = + our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis); let channel_keys_id = signer_provider.generate_channel_keys_id(true, user_id); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT { - return Err(ChannelError::close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT))); + return Err(ChannelError::close(format!( + "Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", + config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT + ))); } if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { - return Err(ChannelError::close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis))); + return Err(ChannelError::close(format!( + "Funding must be smaller than the total bitcoin supply. It was {}", + channel_value_satoshis + ))); } if msg_channel_reserve_satoshis > channel_value_satoshis { - return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis))); + return Err(ChannelError::close(format!( + "Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", + msg_channel_reserve_satoshis, channel_value_satoshis + ))); } - let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000; + let full_channel_value_msat = + (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000; if msg_push_msat > full_channel_value_msat { - return Err(ChannelError::close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat))); + return Err(ChannelError::close(format!( + "push_msat {} was larger than channel amount minus reserve ({})", + msg_push_msat, full_channel_value_msat + ))); } if open_channel_fields.dust_limit_satoshis > channel_value_satoshis { - return Err(ChannelError::close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis))); + return Err(ChannelError::close(format!( + "dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", + open_channel_fields.dust_limit_satoshis, channel_value_satoshis + ))); } if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat { - return Err(ChannelError::close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat))); + return Err(ChannelError::close(format!( + "Minimum htlc value ({}) was larger than full channel value ({})", + open_channel_fields.htlc_minimum_msat, full_channel_value_msat + ))); } - FundedChannel::::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?; + FundedChannel::::check_remote_fee( + &channel_type, + fee_estimator, + open_channel_fields.commitment_feerate_sat_per_1000_weight, + None, + &&logger, + )?; - let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); + let max_counterparty_selected_contest_delay = u16::min( + config.channel_handshake_limits.their_to_self_delay, + MAX_LOCAL_BREAKDOWN_TIMEOUT, + ); if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay { - return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay))); + return Err(ChannelError::close(format!( + "They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", + max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay + ))); } if open_channel_fields.max_accepted_htlcs < 1 { - return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned())); + return Err(ChannelError::close( + "0 max_accepted_htlcs makes for a useless channel".to_owned(), + )); } if open_channel_fields.max_accepted_htlcs > max_htlcs(&channel_type) { - return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, max_htlcs(&channel_type)))); + return Err(ChannelError::close(format!( + "max_accepted_htlcs was {}. It must not be larger than {}", + open_channel_fields.max_accepted_htlcs, + max_htlcs(&channel_type) + ))); } // Now check against optional parameters as set by config... if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis { - return Err(ChannelError::close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis))); + return Err(ChannelError::close(format!( + "Funding satoshis ({}) is less than the user specified limit ({})", + channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis + ))); } - if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat { - return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat))); + if open_channel_fields.htlc_minimum_msat + > config.channel_handshake_limits.max_htlc_minimum_msat + { + return Err(ChannelError::close(format!( + "htlc_minimum_msat ({}) is higher than the user specified limit ({})", + open_channel_fields.htlc_minimum_msat, + config.channel_handshake_limits.max_htlc_minimum_msat + ))); } - if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat { - return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat))); + if open_channel_fields.max_htlc_value_in_flight_msat + < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat + { + return Err(ChannelError::close(format!( + "max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", + open_channel_fields.max_htlc_value_in_flight_msat, + config.channel_handshake_limits.min_max_htlc_value_in_flight_msat + ))); } - if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis { - return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis))); + if msg_channel_reserve_satoshis + > config.channel_handshake_limits.max_channel_reserve_satoshis + { + return Err(ChannelError::close(format!( + "channel_reserve_satoshis ({}) is higher than the user specified limit ({})", + msg_channel_reserve_satoshis, + config.channel_handshake_limits.max_channel_reserve_satoshis + ))); } - if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs { - return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs))); + if open_channel_fields.max_accepted_htlcs + < config.channel_handshake_limits.min_max_accepted_htlcs + { + return Err(ChannelError::close(format!( + "max_accepted_htlcs ({}) is less than the user specified limit ({})", + open_channel_fields.max_accepted_htlcs, + config.channel_handshake_limits.min_max_accepted_htlcs + ))); } if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + return Err(ChannelError::close(format!( + "dust_limit_satoshis ({}) is less than the implementation limit ({})", + open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS + ))); } - if open_channel_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); + if open_channel_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::close(format!( + "dust_limit_satoshis ({}) is greater than the implementation limit ({})", + open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS + ))); } // Convert things into internal flags and prep our state: if config.channel_handshake_limits.force_announced_channel_preference { if config.channel_handshake_config.announce_for_forwarding != announce_for_forwarding { - return Err(ChannelError::close("Peer tried to open channel but their announcement preference is different from ours".to_owned())); + return Err(ChannelError::close(String::from( + "Peer tried to open channel but their announcement preference is different from ours" + ))); } } - if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS && holder_selected_channel_reserve_satoshis != 0 { + if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS + && holder_selected_channel_reserve_satoshis != 0 + { // Protocol level safety check in place, although it should never happen because // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` - return Err(ChannelError::close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + return Err(ChannelError::close(format!( + "Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", + holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS + ))); } if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat { - return Err(ChannelError::close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat))); + return Err(ChannelError::close(format!( + "Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", + holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat + ))); } if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.", + log_debug!( + logger, + "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast \ + stale states without any risk, implying this channel is very insecure for our counterparty.", msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS); } - if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis && holder_selected_channel_reserve_satoshis != 0 { - return Err(ChannelError::close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis))); + if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis + && holder_selected_channel_reserve_satoshis != 0 + { + return Err(ChannelError::close(format!( + "Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", + open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis + ))); } // v1 channel opens set `our_funding_satoshis` to 0, and v2 channel opens set `msg_push_msat` to 0. debug_assert!(our_funding_satoshis == 0 || msg_push_msat == 0); let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat; - let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { - match &open_channel_fields.shutdown_scriptpubkey { - &Some(ref script) => { - // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything - if script.len() == 0 { - None - } else { - if !script::is_bolt2_compliant(&script, their_features) { - return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))) + let counterparty_shutdown_scriptpubkey = + if their_features.supports_upfront_shutdown_script() { + match &open_channel_fields.shutdown_scriptpubkey { + &Some(ref script) => { + // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything + if script.len() == 0 { + None + } else { + if !script::is_bolt2_compliant(&script, their_features) { + return Err(ChannelError::close(format!( + "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", + script + ))); + } + Some(script.clone()) } - Some(script.clone()) - } - }, - // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &None => { - return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); + }, + // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel + &None => { + return Err(ChannelError::close(String::from( + "Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out" + ))); + }, } - } - } else { None }; + } else { + None + }; - let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { - match signer_provider.get_shutdown_scriptpubkey() { - Ok(scriptpubkey) => Some(scriptpubkey), - Err(_) => return Err(ChannelError::close("Failed to get upfront shutdown scriptpubkey".to_owned())), - } - } else { None }; + let shutdown_scriptpubkey = + if config.channel_handshake_config.commit_upfront_shutdown_pubkey { + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => Some(scriptpubkey), + Err(_) => { + return Err(ChannelError::close( + "Failed to get upfront shutdown scriptpubkey".to_owned(), + )) + }, + } + } else { + None + }; if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { if !shutdown_scriptpubkey.is_compatible(&their_features) { - return Err(ChannelError::close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey))); + return Err(ChannelError::close(format!( + "Provided a scriptpubkey format not accepted by peer: {}", + shutdown_scriptpubkey + ))); } } let destination_script = match signer_provider.get_destination_script(channel_keys_id) { Ok(script) => script, - Err(_) => return Err(ChannelError::close("Failed to get destination script".to_owned())), + Err(_) => { + return Err(ChannelError::close("Failed to get destination script".to_owned())) + }, }; let mut secp_ctx = Secp256k1::new(); @@ -3747,9 +3851,15 @@ impl ChannelContext { holder_selected_channel_reserve_satoshis, #[cfg(debug_assertions)] - holder_prev_commitment_tx_balance: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))), + holder_prev_commitment_tx_balance: Mutex::new(( + value_to_self_msat, + (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat), + )), #[cfg(debug_assertions)] - counterparty_prev_commitment_tx_balance: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))), + counterparty_prev_commitment_tx_balance: Mutex::new(( + value_to_self_msat, + (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat), + )), #[cfg(any(test, fuzzing))] next_local_fee: Mutex::new(PredictedNextFee::default()), @@ -3781,7 +3891,9 @@ impl ChannelContext { config: LegacyChannelConfig { options: config.channel_config.clone(), announce_for_forwarding, - commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, + commit_upfront_shutdown_pubkey: config + .channel_handshake_config + .commit_upfront_shutdown_pubkey, }, prev_config: None, @@ -3791,7 +3903,7 @@ impl ChannelContext { temporary_channel_id: Some(open_channel_fields.temporary_channel_id), channel_id: open_channel_fields.temporary_channel_id, channel_state: ChannelState::NegotiatingFunding( - NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT + NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT, ), announcement_sigs_state: AnnouncementSigsState::NotSent, secp_ctx, @@ -3843,19 +3955,35 @@ impl ChannelContext { feerate_per_kw: open_channel_fields.commitment_feerate_sat_per_1000_weight, counterparty_dust_limit_satoshis: open_channel_fields.dust_limit_satoshis, holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, - counterparty_max_htlc_value_in_flight_msat: cmp::min(open_channel_fields.max_htlc_value_in_flight_msat, channel_value_satoshis * 1000), - holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), + counterparty_max_htlc_value_in_flight_msat: cmp::min( + open_channel_fields.max_htlc_value_in_flight_msat, + channel_value_satoshis * 1000, + ), + holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat( + channel_value_satoshis, + &config.channel_handshake_config, + ), counterparty_htlc_minimum_msat: open_channel_fields.htlc_minimum_msat, - holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, + holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 + { + 1 + } else { + config.channel_handshake_config.our_htlc_minimum_msat + }, counterparty_max_accepted_htlcs: open_channel_fields.max_accepted_htlcs, - holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, max_htlcs(&channel_type)), + holder_max_accepted_htlcs: cmp::min( + config.channel_handshake_config.our_max_accepted_htlcs, + max_htlcs(&channel_type), + ), minimum_depth, counterparty_forwarding_info: None, is_batch_funding: None, - counterparty_next_commitment_point: Some(open_channel_fields.first_per_commitment_point), + counterparty_next_commitment_point: Some( + open_channel_fields.first_per_commitment_point, + ), counterparty_current_commitment_point: None, counterparty_node_id, @@ -3892,100 +4020,139 @@ impl ChannelContext { // check if the funder's amount for the initial commitment tx is sufficient // for full fee payment plus a few HTLCs to ensure the channel will be useful. - let funders_amount_msat = funding.get_value_satoshis() * 1000 - funding.get_value_to_self_msat(); + let funders_amount_msat = + funding.get_value_satoshis() * 1000 - funding.get_value_to_self_msat(); let htlc_candidate = None; let include_counterparty_unknown_htlcs = false; let addl_nondust_htlc_count = MIN_AFFORDABLE_HTLC_COUNT; - let dust_exposure_limiting_feerate = channel_context.get_dust_exposure_limiting_feerate(&fee_estimator, funding.get_channel_type()); - let (remote_stats, _remote_htlcs) = channel_context.get_next_remote_commitment_stats( - &funding, - htlc_candidate, - include_counterparty_unknown_htlcs, - addl_nondust_htlc_count, - channel_context.feerate_per_kw, - dust_exposure_limiting_feerate - ).map_err(|()| ChannelError::close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction.", funders_amount_msat / 1000)))?; + let dust_exposure_limiting_feerate = channel_context + .get_dust_exposure_limiting_feerate(&fee_estimator, funding.get_channel_type()); + let (remote_stats, _remote_htlcs) = channel_context + .get_next_remote_commitment_stats( + &funding, + htlc_candidate, + include_counterparty_unknown_htlcs, + addl_nondust_htlc_count, + channel_context.feerate_per_kw, + dust_exposure_limiting_feerate, + ) + .map_err(|()| { + ChannelError::close(format!( + "Funding amount ({} sats) can't even pay fee for initial commitment transaction.", + funders_amount_msat / 1000 + )) + })?; // While it's reasonable for us to not meet the channel reserve initially (if they don't // want to push much to us), our counterparty should always have more than our reserve. - if remote_stats.commitment_stats.counterparty_balance_msat / 1000 < funding.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::close("Insufficient funding amount for initial reserve".to_owned())); + if remote_stats.commitment_stats.counterparty_balance_msat / 1000 + < funding.holder_selected_channel_reserve_satoshis + { + return Err(ChannelError::close( + "Insufficient funding amount for initial reserve".to_owned(), + )); } Ok((funding, channel_context)) } - #[rustfmt::skip] fn new_for_outbound_channel<'a, ES: EntropySource, F: FeeEstimator, L: Logger>( - fee_estimator: &'a LowerBoundedFeeEstimator, - entropy_source: &'a ES, - signer_provider: &'a SP, - counterparty_node_id: PublicKey, - their_features: &'a InitFeatures, - funding_satoshis: u64, - push_msat: u64, - user_id: u128, - config: &'a UserConfig, - current_chain_height: u32, - outbound_scid_alias: u64, + fee_estimator: &'a LowerBoundedFeeEstimator, entropy_source: &'a ES, + signer_provider: &'a SP, counterparty_node_id: PublicKey, their_features: &'a InitFeatures, + funding_satoshis: u64, push_msat: u64, user_id: u128, config: &'a UserConfig, + current_chain_height: u32, outbound_scid_alias: u64, temporary_channel_id_fn: Option ChannelId>, - holder_selected_channel_reserve_satoshis: u64, - channel_keys_id: [u8; 32], - holder_signer: SP::EcdsaSigner, - _logger: L, + holder_selected_channel_reserve_satoshis: u64, channel_keys_id: [u8; 32], + holder_signer: SP::EcdsaSigner, _logger: L, ) -> Result<(FundingScope, ChannelContext), APIError> { // This will be updated with the counterparty contribution if this is a dual-funded channel let channel_value_satoshis = funding_satoshis; let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay; - if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO { - return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)}); + if !their_features.supports_wumbo() + && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO + { + return Err(APIError::APIMisuseError { + err: format!( + "funding_value must not exceed {}, it was {}", + MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis + ), + }); } if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { - return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)}); + return Err(APIError::APIMisuseError { + err: format!( + "funding_value must be smaller than the total bitcoin supply, it was {}", + channel_value_satoshis + ), + }); } let channel_value_msat = channel_value_satoshis * 1000; if push_msat > channel_value_msat { - return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) }); + return Err(APIError::APIMisuseError { + err: format!( + "Push value ({}) was larger than channel_value ({})", + push_msat, channel_value_msat + ), + }); } if holder_selected_contest_delay < BREAKDOWN_TIMEOUT { - return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)}); + return Err(APIError::APIMisuseError { + err: format!( + "Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", + holder_selected_contest_delay + ), + }); } let channel_type = get_initial_channel_type(&config, their_features); debug_assert!(!channel_type.supports_any_optional_bits()); - debug_assert!(!channel_type.requires_unknown_bits_from(&channelmanager::provided_channel_type_features(&config))); + debug_assert!(!channel_type + .requires_unknown_bits_from(&channelmanager::provided_channel_type_features(&config))); - let commitment_feerate = selected_commitment_sat_per_1000_weight( - &fee_estimator, &channel_type, - ); + let commitment_feerate = + selected_commitment_sat_per_1000_weight(&fee_estimator, &channel_type); let value_to_self_msat = channel_value_satoshis * 1000 - push_msat; let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); - let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { - match signer_provider.get_shutdown_scriptpubkey() { - Ok(scriptpubkey) => Some(scriptpubkey), - Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}), - } - } else { None }; + let shutdown_scriptpubkey = + if config.channel_handshake_config.commit_upfront_shutdown_pubkey { + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => Some(scriptpubkey), + Err(_) => { + return Err(APIError::ChannelUnavailable { + err: "Failed to get shutdown scriptpubkey".to_owned(), + }) + }, + } + } else { + None + }; if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { if !shutdown_scriptpubkey.is_compatible(&their_features) { - return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); + return Err(APIError::IncompatibleShutdownScript { + script: shutdown_scriptpubkey.clone(), + }); } } let destination_script = match signer_provider.get_destination_script(channel_keys_id) { Ok(script) => script, - Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}), + Err(_) => { + return Err(APIError::ChannelUnavailable { + err: "Failed to get destination script".to_owned(), + }) + }, }; let pubkeys = holder_signer.pubkeys(&secp_ctx); - let temporary_channel_id = temporary_channel_id_fn.map(|f| f(&pubkeys)) + let temporary_channel_id = temporary_channel_id_fn + .map(|f| f(&pubkeys)) .unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source)); let funding = FundingScope { @@ -3996,9 +4163,15 @@ impl ChannelContext { // We'll add our counterparty's `funding_satoshis` to these max commitment output assertions // when we receive `accept_channel2`. #[cfg(debug_assertions)] - holder_prev_commitment_tx_balance: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), + holder_prev_commitment_tx_balance: Mutex::new(( + channel_value_satoshis * 1000 - push_msat, + push_msat, + )), #[cfg(debug_assertions)] - counterparty_prev_commitment_tx_balance: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), + counterparty_prev_commitment_tx_balance: Mutex::new(( + channel_value_satoshis * 1000 - push_msat, + push_msat, + )), #[cfg(any(test, fuzzing))] next_local_fee: Mutex::new(PredictedNextFee::default()), @@ -4028,7 +4201,9 @@ impl ChannelContext { config: LegacyChannelConfig { options: config.channel_config.clone(), announce_for_forwarding: config.channel_handshake_config.announce_for_forwarding, - commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, + commit_upfront_shutdown_pubkey: config + .channel_handshake_config + .commit_upfront_shutdown_pubkey, }, prev_config: None, @@ -4091,11 +4266,22 @@ impl ChannelContext { counterparty_max_htlc_value_in_flight_msat: 0, // We'll adjust this to include our counterparty's `funding_satoshis` when we // receive `accept_channel2`. - holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), + holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat( + channel_value_satoshis, + &config.channel_handshake_config, + ), counterparty_htlc_minimum_msat: 0, - holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, + holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 + { + 1 + } else { + config.channel_handshake_config.our_htlc_minimum_msat + }, counterparty_max_accepted_htlcs: 0, - holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, max_htlcs(&channel_type)), + holder_max_accepted_htlcs: cmp::min( + config.channel_handshake_config.our_max_accepted_htlcs, + max_htlcs(&channel_type), + ), minimum_depth: None, // Filled in in accept_channel counterparty_forwarding_info: None, @@ -4138,15 +4324,23 @@ impl ChannelContext { let htlc_candidate = None; let include_counterparty_unknown_htlcs = false; let addl_nondust_htlc_count = MIN_AFFORDABLE_HTLC_COUNT; - let dust_exposure_limiting_feerate = channel_context.get_dust_exposure_limiting_feerate(&fee_estimator, funding.get_channel_type()); - let _local_stats = channel_context.get_next_local_commitment_stats( - &funding, - htlc_candidate, - include_counterparty_unknown_htlcs, - addl_nondust_htlc_count, - channel_context.feerate_per_kw, - dust_exposure_limiting_feerate, - ).map_err(|()| APIError::APIMisuseError { err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction.", funding.get_value_to_self_msat() / 1000)})?; + let dust_exposure_limiting_feerate = channel_context + .get_dust_exposure_limiting_feerate(&fee_estimator, funding.get_channel_type()); + let _local_stats = channel_context + .get_next_local_commitment_stats( + &funding, + htlc_candidate, + include_counterparty_unknown_htlcs, + addl_nondust_htlc_count, + channel_context.feerate_per_kw, + dust_exposure_limiting_feerate, + ) + .map_err(|()| APIError::APIMisuseError { + err: format!( + "Funding amount ({}) can't even pay fee for initial commitment transaction.", + funding.get_value_to_self_msat() / 1000 + ), + })?; Ok((funding, channel_context)) } @@ -4372,103 +4566,181 @@ impl ChannelContext { /// Performs checks against necessary constraints after receiving either an `accept_channel` or /// `accept_channel2` message. - #[rustfmt::skip] pub fn do_accept_channel_checks( &mut self, funding: &mut FundingScope, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures, common_fields: &msgs::CommonAcceptChannelFields, channel_reserve_satoshis: u64, ) -> Result<(), ChannelError> { - let peer_limits = if let Some(ref limits) = self.inbound_handshake_limits_override { limits } else { default_limits }; + let peer_limits = if let Some(ref limits) = self.inbound_handshake_limits_override { + limits + } else { + default_limits + }; // Check sanity of message fields: if !funding.is_outbound() { - return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned())); + return Err(ChannelError::close( + "Got an accept_channel message from an inbound peer".to_owned(), + )); } - if !matches!(self.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) { - return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned())); + if !matches!(self.channel_state, ChannelState::NegotiatingFunding(flags) + if flags == NegotiatingFundingFlags::OUR_INIT_SENT) + { + return Err(ChannelError::close( + "Got an accept_channel message at a strange time".to_owned(), + )); } - let channel_type = common_fields.channel_type.as_ref() - .ok_or_else(|| ChannelError::close("option_channel_type assumed to be supported".to_owned()))?; + let channel_type = common_fields.channel_type.as_ref().ok_or_else(|| { + ChannelError::close("option_channel_type assumed to be supported".to_owned()) + })?; if channel_type != funding.get_channel_type() { - return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned())); + return Err(ChannelError::close(String::from( + "Channel Type in accept_channel didn't match the one sent in open_channel.", + ))); } if common_fields.dust_limit_satoshis > 21000000 * 100000000 { - return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", common_fields.dust_limit_satoshis))); + return Err(ChannelError::close(format!( + "Peer never wants payout outputs? dust_limit_satoshis was {}", + common_fields.dust_limit_satoshis + ))); } if channel_reserve_satoshis > funding.get_value_satoshis() { - return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", channel_reserve_satoshis, funding.get_value_satoshis()))); + return Err(ChannelError::close(format!( + "Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", + channel_reserve_satoshis, + funding.get_value_satoshis() + ))); } - if common_fields.dust_limit_satoshis > funding.holder_selected_channel_reserve_satoshis && funding.holder_selected_channel_reserve_satoshis != 0 { - return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", common_fields.dust_limit_satoshis, funding.holder_selected_channel_reserve_satoshis))); + if common_fields.dust_limit_satoshis > funding.holder_selected_channel_reserve_satoshis + && funding.holder_selected_channel_reserve_satoshis != 0 + { + return Err(ChannelError::close(format!( + "Dust limit ({}) is bigger than our channel reserve ({})", + common_fields.dust_limit_satoshis, funding.holder_selected_channel_reserve_satoshis + ))); } - if channel_reserve_satoshis > funding.get_value_satoshis() - funding.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", - channel_reserve_satoshis, funding.get_value_satoshis() - funding.holder_selected_channel_reserve_satoshis))); + if channel_reserve_satoshis + > funding.get_value_satoshis() - funding.holder_selected_channel_reserve_satoshis + { + return Err(ChannelError::close(format!( + "Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", + channel_reserve_satoshis, + funding.get_value_satoshis() - funding.holder_selected_channel_reserve_satoshis + ))); } - let full_channel_value_msat = (funding.get_value_satoshis() - channel_reserve_satoshis) * 1000; + let full_channel_value_msat = + (funding.get_value_satoshis() - channel_reserve_satoshis) * 1000; if common_fields.htlc_minimum_msat >= full_channel_value_msat { - return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", common_fields.htlc_minimum_msat, full_channel_value_msat))); + return Err(ChannelError::close(format!( + "Minimum htlc value ({}) is full channel value ({})", + common_fields.htlc_minimum_msat, full_channel_value_msat + ))); } - let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); + let max_delay_acceptable = + u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); if common_fields.to_self_delay > max_delay_acceptable { - return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, common_fields.to_self_delay))); + return Err(ChannelError::close(format!( + "They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", + max_delay_acceptable, common_fields.to_self_delay + ))); } if common_fields.max_accepted_htlcs < 1 { - return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned())); + return Err(ChannelError::close( + "0 max_accepted_htlcs makes for a useless channel".to_owned(), + )); } let channel_type = funding.get_channel_type(); if common_fields.max_accepted_htlcs > max_htlcs(channel_type) { - return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", common_fields.max_accepted_htlcs, max_htlcs(channel_type)))); + return Err(ChannelError::close(format!( + "max_accepted_htlcs was {}. It must not be larger than {}", + common_fields.max_accepted_htlcs, + max_htlcs(channel_type) + ))); } // Now check against optional parameters as set by config... if common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat { - return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat))); + return Err(ChannelError::close(format!( + "htlc_minimum_msat ({}) is higher than the user specified limit ({})", + common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat + ))); } - if common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat { - return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat))); + if common_fields.max_htlc_value_in_flight_msat + < peer_limits.min_max_htlc_value_in_flight_msat + { + return Err(ChannelError::close(format!( + "max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", + common_fields.max_htlc_value_in_flight_msat, + peer_limits.min_max_htlc_value_in_flight_msat + ))); } if channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis { - return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis))); + return Err(ChannelError::close(format!( + "channel_reserve_satoshis ({}) is higher than the user specified limit ({})", + channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis + ))); } if common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs { - return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs))); + return Err(ChannelError::close(format!( + "max_accepted_htlcs ({}) is less than the user specified limit ({})", + common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs + ))); } if common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + return Err(ChannelError::close(format!( + "dust_limit_satoshis ({}) is less than the implementation limit ({})", + common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS + ))); } if common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); + return Err(ChannelError::close(format!( + "dust_limit_satoshis ({}) is greater than the implementation limit ({})", + common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS + ))); } if common_fields.minimum_depth > peer_limits.max_minimum_depth { - return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, common_fields.minimum_depth))); + return Err(ChannelError::close(format!( + "We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", + peer_limits.max_minimum_depth, common_fields.minimum_depth + ))); } - let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { - match &common_fields.shutdown_scriptpubkey { - &Some(ref script) => { - // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything - if script.len() == 0 { - None - } else { - if !script::is_bolt2_compliant(&script, their_features) { - return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))); + let counterparty_shutdown_scriptpubkey = + if their_features.supports_upfront_shutdown_script() { + match &common_fields.shutdown_scriptpubkey { + &Some(ref script) => { + // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything + if script.len() == 0 { + None + } else { + if !script::is_bolt2_compliant(&script, their_features) { + return Err(ChannelError::close(format!( + "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", + script + ))); + } + Some(script.clone()) } - Some(script.clone()) - } - }, - // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &None => { - return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); + }, + // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel + &None => { + return Err(ChannelError::close(String::from( + "Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out" + ))); + }, } - } - } else { None }; + } else { + None + }; self.counterparty_dust_limit_satoshis = common_fields.dust_limit_satoshis; - self.counterparty_max_htlc_value_in_flight_msat = cmp::min(common_fields.max_htlc_value_in_flight_msat, funding.get_value_satoshis() * 1000); + self.counterparty_max_htlc_value_in_flight_msat = cmp::min( + common_fields.max_htlc_value_in_flight_msat, + funding.get_value_satoshis() * 1000, + ); funding.counterparty_selected_channel_reserve_satoshis = Some(channel_reserve_satoshis); self.counterparty_htlc_minimum_msat = common_fields.htlc_minimum_msat; self.counterparty_max_accepted_htlcs = common_fields.max_accepted_htlcs; @@ -4483,20 +4755,23 @@ impl ChannelContext { funding_pubkey: common_fields.funding_pubkey, revocation_basepoint: RevocationBasepoint::from(common_fields.revocation_basepoint), payment_point: common_fields.payment_basepoint, - delayed_payment_basepoint: DelayedPaymentBasepoint::from(common_fields.delayed_payment_basepoint), - htlc_basepoint: HtlcBasepoint::from(common_fields.htlc_basepoint) + delayed_payment_basepoint: DelayedPaymentBasepoint::from( + common_fields.delayed_payment_basepoint, + ), + htlc_basepoint: HtlcBasepoint::from(common_fields.htlc_basepoint), }; - funding.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { - selected_contest_delay: common_fields.to_self_delay, - pubkeys: counterparty_pubkeys, - }); + funding.channel_transaction_parameters.counterparty_parameters = + Some(CounterpartyChannelTransactionParameters { + selected_contest_delay: common_fields.to_self_delay, + pubkeys: counterparty_pubkeys, + }); self.counterparty_next_commitment_point = Some(common_fields.first_per_commitment_point); self.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; self.channel_state = ChannelState::NegotiatingFunding( - NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT + NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT, ); self.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. @@ -13281,21 +13556,27 @@ impl OutboundV1Channel { } #[allow(dead_code)] // TODO(dual_funding): Remove once opending V2 channels is enabled. - #[rustfmt::skip] pub fn new( - fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, - channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, - outbound_scid_alias: u64, temporary_channel_id: Option, logger: L, is_0reserve: bool, + fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, + counterparty_node_id: PublicKey, their_features: &InitFeatures, + channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, + current_chain_height: u32, outbound_scid_alias: u64, + temporary_channel_id: Option, logger: L, is_0reserve: bool, ) -> Result, APIError> { let holder_selected_channel_reserve_satoshis = if is_0reserve { 0 } else { - let reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); + let reserve_satoshis = + get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); if reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { // Protocol level safety check in place, although it should never happen because // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` - return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below \ - implemention limit dust_limit_satoshis {}", reserve_satoshis) }); + return Err(APIError::APIMisuseError { + err: format!( + "Holder selected channel reserve below implemention limit dust_limit_satoshis {}", + reserve_satoshis + ), + }); } reserve_satoshis }; @@ -13303,9 +13584,8 @@ impl OutboundV1Channel { let channel_keys_id = signer_provider.generate_channel_keys_id(false, user_id); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); - let temporary_channel_id_fn = temporary_channel_id.map(|id| { - move |_: &ChannelPublicKeys| id - }); + let temporary_channel_id_fn = + temporary_channel_id.map(|id| move |_: &ChannelPublicKeys| id); let (funding, context) = ChannelContext::new_for_outbound_channel( fee_estimator, @@ -13327,7 +13607,10 @@ impl OutboundV1Channel { )?; let unfunded_context = UnfundedChannelContext { unfunded_channel_age_ticks: 0, - holder_commitment_point: HolderCommitmentPoint::new(&context.holder_signer, &context.secp_ctx), + holder_commitment_point: HolderCommitmentPoint::new( + &context.holder_signer, + &context.secp_ctx, + ), }; // We initialize `signer_pending_open_channel` to false, and leave setting the flag @@ -13661,18 +13944,23 @@ pub(super) fn channel_type_from_open_channel( impl InboundV1Channel { /// Creates a new channel from a remote sides' request for one. /// Assumes chain_hash has already been checked and corresponds with what we expect! - #[rustfmt::skip] pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, current_chain_height: u32, logger: &L, is_0conf: bool, is_0reserve: bool, ) -> Result, ChannelError> { - let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id), None); + let logger = WithContext::from( + logger, + Some(counterparty_node_id), + Some(msg.common_fields.temporary_channel_id), + None, + ); // First check the channel type is known, failing before we do anything else if we don't // support this channel type. - let channel_type = channel_type_from_open_channel(&msg.common_fields, our_supported_features)?; + let channel_type = + channel_type_from_open_channel(&msg.common_fields, our_supported_features)?; let holder_selected_channel_reserve_satoshis = if is_0reserve { 0 @@ -13683,8 +13971,10 @@ impl InboundV1Channel { funding_pubkey: msg.common_fields.funding_pubkey, revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint), payment_point: msg.common_fields.payment_basepoint, - delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint), - htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint) + delayed_payment_basepoint: DelayedPaymentBasepoint::from( + msg.common_fields.delayed_payment_basepoint, + ), + htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint), }; let (funding, context) = ChannelContext::new_for_inbound_channel( @@ -13699,7 +13989,6 @@ impl InboundV1Channel { &&logger, is_0conf, 0, - counterparty_pubkeys, channel_type, holder_selected_channel_reserve_satoshis, @@ -13709,9 +13998,13 @@ impl InboundV1Channel { )?; let unfunded_context = UnfundedChannelContext { unfunded_channel_age_ticks: 0, - holder_commitment_point: HolderCommitmentPoint::new(&context.holder_signer, &context.secp_ctx), + holder_commitment_point: HolderCommitmentPoint::new( + &context.holder_signer, + &context.secp_ctx, + ), }; - let chan = Self { funding, context, unfunded_context, signer_pending_accept_channel: false }; + let chan = + Self { funding, context, unfunded_context, signer_pending_accept_channel: false }; Ok(chan) } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 20432f7854d..5378c9bff90 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3708,10 +3708,18 @@ impl< ) } - #[rustfmt::skip] - fn create_channel_internal(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, temporary_channel_id: Option, override_config: Option, is_0reserve: bool) -> Result { + fn create_channel_internal( + &self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, + user_channel_id: u128, temporary_channel_id: Option, + override_config: Option, is_0reserve: bool, + ) -> Result { if channel_value_satoshis < 1000 { - return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) }); + return Err(APIError::APIMisuseError { + err: format!( + "Channel value must be at least 1000 satoshis. It was {}", + channel_value_satoshis + ), + }); } let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -3720,17 +3728,26 @@ impl< let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(&their_network_key) - .ok_or_else(|| APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) })?; + let peer_state_mutex = + per_peer_state.get(&their_network_key).ok_or_else(|| APIError::APIMisuseError { + err: format!("Not connected to node: {}", their_network_key), + })?; let mut peer_state = peer_state_mutex.lock().unwrap(); if !peer_state.is_connected { - return Err(APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) }); + return Err(APIError::APIMisuseError { + err: format!("Not connected to node: {}", their_network_key), + }); } if let Some(temporary_channel_id) = temporary_channel_id { if peer_state.channel_by_id.contains_key(&temporary_channel_id) { - return Err(APIError::APIMisuseError{ err: format!("Channel with temporary channel ID {} already exists!", temporary_channel_id)}); + return Err(APIError::APIMisuseError { + err: format!( + "Channel with temporary channel ID {} already exists!", + temporary_channel_id + ), + }); } } @@ -3738,15 +3755,23 @@ impl< let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); let their_features = &peer_state.latest_features; let config = self.config.read().unwrap(); - let config = if let Some(config) = &override_config { - config - } else { - &*config - }; - match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, - their_features, channel_value_satoshis, push_msat, user_channel_id, config, - self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &self.logger, is_0reserve) - { + let config = if let Some(config) = &override_config { config } else { &*config }; + match OutboundV1Channel::new( + &self.fee_estimator, + &self.entropy_source, + &self.signer_provider, + their_network_key, + their_features, + channel_value_satoshis, + push_msat, + user_channel_id, + config, + self.best_block.read().unwrap().height, + outbound_scid_alias, + temporary_channel_id, + &self.logger, + is_0reserve, + ) { Ok(res) => res, Err(e) => { self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); @@ -3766,14 +3791,15 @@ impl< panic!("RNG is bad???"); } }, - hash_map::Entry::Vacant(entry) => { entry.insert(Channel::from(channel)); } + hash_map::Entry::Vacant(entry) => { + entry.insert(Channel::from(channel)); + }, } if let Some(msg) = res { - peer_state.pending_msg_events.push(MessageSendEvent::SendOpenChannel { - node_id: their_network_key, - msg, - }); + peer_state + .pending_msg_events + .push(MessageSendEvent::SendOpenChannel { node_id: their_network_key, msg }); } Ok(temporary_channel_id) } From 7fde002e7066ea6088f965b395c0328b85406e65 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Thu, 5 Mar 2026 09:00:32 +0000 Subject: [PATCH 6/9] Update `chanmon_consistency` to include 0FC and 0-reserve channels Co-Authored-By: HAL 9000 --- fuzz/src/chanmon_consistency.rs | 130 +++++++++++++++++++++++++------- 1 file changed, 102 insertions(+), 28 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 22006897a0f..2986a40f035 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -863,9 +863,15 @@ fn assert_action_timeout_awaiting_response(action: &msgs::ErrorAction) { )); } +pub enum ChanType { + Legacy, + KeyedAnchors, + ZeroFeeCommitments, +} + #[inline] pub fn do_test( - data: &[u8], underlying_out: Out, anchors: bool, + data: &[u8], underlying_out: Out, chan_type: ChanType, ) { let out = SearchingOutput::new(underlying_out); let broadcast_a = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); @@ -926,8 +932,19 @@ pub fn do_test( config.channel_config.forwarding_fee_proportional_millionths = 0; config.channel_handshake_config.announce_for_forwarding = true; config.reject_inbound_splices = false; - if !anchors { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + match chan_type { + ChanType::Legacy => { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; + }, + ChanType::KeyedAnchors => { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; + }, + ChanType::ZeroFeeCommitments => { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; + }, } let network = Network::Bitcoin; let best_block_timestamp = genesis_block(network).header.time; @@ -978,8 +995,19 @@ pub fn do_test( config.channel_config.forwarding_fee_proportional_millionths = 0; config.channel_handshake_config.announce_for_forwarding = true; config.reject_inbound_splices = false; - if !anchors { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + match chan_type { + ChanType::Legacy => { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; + }, + ChanType::KeyedAnchors => { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; + }, + ChanType::ZeroFeeCommitments => { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; + }, } let mut monitors = new_hash_map(); @@ -1078,8 +1106,23 @@ pub fn do_test( }}; } macro_rules! make_channel { - ($source: expr, $dest: expr, $source_monitor: expr, $dest_monitor: expr, $dest_keys_manager: expr, $chan_id: expr) => {{ - $source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None, None).unwrap(); + ($source: expr, $dest: expr, $source_monitor: expr, $dest_monitor: expr, $dest_keys_manager: expr, $chan_id: expr, $trusted_open: expr, $trusted_accept: expr) => {{ + if $trusted_open { + $source + .create_channel_to_trusted_peer_0reserve( + $dest.get_our_node_id(), + 100_000, + 42, + 0, + None, + None, + ) + .unwrap(); + } else { + $source + .create_channel($dest.get_our_node_id(), 100_000, 42, 0, None, None) + .unwrap(); + } let open_channel = { let events = $source.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1104,14 +1147,27 @@ pub fn do_test( random_bytes .copy_from_slice(&$dest_keys_manager.get_secure_random_bytes()[..16]); let user_channel_id = u128::from_be_bytes(random_bytes); - $dest - .accept_inbound_channel( - temporary_channel_id, - counterparty_node_id, - user_channel_id, - None, - ) - .unwrap(); + if $trusted_accept { + $dest + .accept_inbound_channel_from_trusted_peer( + temporary_channel_id, + counterparty_node_id, + user_channel_id, + false, + true, + None, + ) + .unwrap(); + } else { + $dest + .accept_inbound_channel( + temporary_channel_id, + counterparty_node_id, + user_channel_id, + None, + ) + .unwrap(); + } } else { panic!("Wrong event type"); } @@ -1287,12 +1343,16 @@ pub fn do_test( // Fuzz mode uses XOR-based hashing (all bytes XOR to one byte), and // versions 0-5 cause collisions between A-B and B-C channel pairs // (e.g., A-B with Version(1) collides with B-C with Version(3)). - make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 1); - make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 2); - make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 3); - make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 4); - make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 5); - make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 6); + // A-B: channel 2 A and B have 0-reserve (trusted open + trusted accept), + // channel 3 A has 0-reserve (trusted accept) + make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 1, false, false); + make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 2, true, true); + make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 3, false, true); + // B-C: channel 4 B has 0-reserve (via trusted accept), + // channel 5 C has 0-reserve (via trusted open) + make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 4, false, true); + make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 5, true, false); + make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 6, false, false); // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions // during normal operation in `test_return`. @@ -2301,7 +2361,7 @@ pub fn do_test( 0x80 => { let mut max_feerate = last_htlc_clear_fee_a; - if !anchors { + if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } if fee_est_a.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { @@ -2316,7 +2376,7 @@ pub fn do_test( 0x84 => { let mut max_feerate = last_htlc_clear_fee_b; - if !anchors { + if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } if fee_est_b.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { @@ -2331,7 +2391,7 @@ pub fn do_test( 0x88 => { let mut max_feerate = last_htlc_clear_fee_c; - if !anchors { + if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } if fee_est_c.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { @@ -2783,12 +2843,26 @@ impl SearchingOutput { } pub fn chanmon_consistency_test(data: &[u8], out: Out) { - do_test(data, out.clone(), false); - do_test(data, out, true); + do_test(data, out.clone(), ChanType::Legacy); + do_test(data, out.clone(), ChanType::KeyedAnchors); + do_test(data, out, ChanType::ZeroFeeCommitments); } #[no_mangle] pub extern "C" fn chanmon_consistency_run(data: *const u8, datalen: usize) { - do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {}, false); - do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {}, true); + do_test( + unsafe { std::slice::from_raw_parts(data, datalen) }, + test_logger::DevNull {}, + ChanType::Legacy, + ); + do_test( + unsafe { std::slice::from_raw_parts(data, datalen) }, + test_logger::DevNull {}, + ChanType::KeyedAnchors, + ); + do_test( + unsafe { std::slice::from_raw_parts(data, datalen) }, + test_logger::DevNull {}, + ChanType::ZeroFeeCommitments, + ); } From da22c330c056f2de4163a4e0137e5e94dc35f1f4 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Thu, 12 Mar 2026 19:53:22 +0000 Subject: [PATCH 7/9] f: tx_builder cleanups --- lightning/src/sign/tx_builder.rs | 70 +++++++++++++++----------------- 1 file changed, 33 insertions(+), 37 deletions(-) diff --git a/lightning/src/sign/tx_builder.rs b/lightning/src/sign/tx_builder.rs index 4d62d28d9ad..2c06c497902 100644 --- a/lightning/src/sign/tx_builder.rs +++ b/lightning/src/sign/tx_builder.rs @@ -206,34 +206,31 @@ fn get_dust_exposure_stats( } } -fn check_no_outputs( +fn has_output( is_outbound_from_holder: bool, holder_balance_before_fee_msat: u64, counterparty_balance_before_fee_msat: u64, feerate_per_kw: u32, nondust_htlc_count: usize, broadcaster_dust_limit_satoshis: u64, channel_type: &ChannelTypeFeatures, -) -> Result<(), ()> { +) -> bool { let commit_tx_fee_sat = commit_tx_fee_sat(feerate_per_kw, nondust_htlc_count, channel_type); let (real_holder_balance_msat, real_counterparty_balance_msat) = if is_outbound_from_holder { ( - holder_balance_before_fee_msat.checked_sub(commit_tx_fee_sat * 1000).ok_or(())?, + holder_balance_before_fee_msat.saturating_sub(commit_tx_fee_sat * 1000), counterparty_balance_before_fee_msat, ) } else { ( holder_balance_before_fee_msat, - counterparty_balance_before_fee_msat.checked_sub(commit_tx_fee_sat * 1000).ok_or(())?, + counterparty_balance_before_fee_msat.saturating_sub(commit_tx_fee_sat * 1000), ) }; // Make sure the commitment transaction has at least one output let dust_limit_msat = broadcaster_dust_limit_satoshis * 1000; - if real_holder_balance_msat < dust_limit_msat + let has_no_output = real_holder_balance_msat < dust_limit_msat && real_counterparty_balance_msat < dust_limit_msat - && nondust_htlc_count == 0 - { - return Err(()); - } - Ok(()) + && nondust_htlc_count == 0; + !has_no_output } fn get_next_commitment_stats( @@ -299,7 +296,7 @@ fn get_next_commitment_stats( // For zero-reserve channels, we check two things independently: // 1) Given the current set of HTLCs and feerate, does the commitment have at least one output ? - check_no_outputs( + if !has_output( is_outbound_from_holder, holder_balance_before_fee_msat, counterparty_balance_before_fee_msat, @@ -307,7 +304,9 @@ fn get_next_commitment_stats( nondust_htlc_count, broadcaster_dust_limit_satoshis, channel_type, - )?; + ) { + return Err(()); + } // 2) Now including any additional non-dust HTLCs (usually the fee spike buffer HTLC), does the funder cover // this bigger transaction fee ? The funder can dip below their dust limit to cover this case, as the @@ -612,35 +611,28 @@ fn get_available_balances( fn adjust_boundaries_if_max_dust_htlc_produces_no_output( local: bool, is_outbound_from_holder: bool, holder_balance_before_fee_msat: u64, - counterparty_balance_before_fee_msat: u64, nondust_htlc_count: usize, feerate_per_kw: u32, + counterparty_balance_before_fee_msat: u64, nondust_htlc_count: usize, spiked_feerate: u32, dust_limit_satoshis: u64, channel_type: &ChannelTypeFeatures, next_outbound_htlc_minimum_msat: u64, available_capacity_msat: u64, ) -> (u64, u64) { - let tx_fee_sat = commit_tx_fee_sat(feerate_per_kw, nondust_htlc_count, channel_type); - let (holder_balance_msat, counterparty_balance_msat) = if is_outbound_from_holder { - ( - holder_balance_before_fee_msat.saturating_sub(tx_fee_sat.saturating_mul(1000)), - counterparty_balance_before_fee_msat, - ) - } else { - ( - holder_balance_before_fee_msat, - counterparty_balance_before_fee_msat.saturating_sub(tx_fee_sat.saturating_mul(1000)), - ) - }; - + // First, determine the biggest dust HTLC we could send let (htlc_success_tx_fee_sat, htlc_timeout_tx_fee_sat) = - second_stage_tx_fees_sat(channel_type, feerate_per_kw); + second_stage_tx_fees_sat(channel_type, spiked_feerate); let min_nondust_htlc_sat = dust_limit_satoshis + if local { htlc_timeout_tx_fee_sat } else { htlc_success_tx_fee_sat }; let max_dust_htlc_msat = (min_nondust_htlc_sat.saturating_mul(1000)).saturating_sub(1); - // If the biggest dust HTLC produces no outputs, then we have to say something... - let dust_limit_msat = dust_limit_satoshis.saturating_mul(1000); - if holder_balance_msat.saturating_sub(max_dust_htlc_msat) < dust_limit_msat - && counterparty_balance_msat < dust_limit_msat - && nondust_htlc_count == 0 - { + // If this dust HTLC produces no outputs, then we have to say something! It is now possible to produce a + // commitment with no outputs. + if !has_output( + is_outbound_from_holder, + holder_balance_before_fee_msat.saturating_sub(max_dust_htlc_msat), + counterparty_balance_before_fee_msat, + spiked_feerate, + nondust_htlc_count, + dust_limit_satoshis, + channel_type, + ) { // If we are allowed to send non-dust HTLCs, set the min HTLC to the smallest non-dust HTLC... if available_capacity_msat >= min_nondust_htlc_sat.saturating_mul(1000) { ( @@ -653,18 +645,22 @@ fn adjust_boundaries_if_max_dust_htlc_produces_no_output( // Otherwise, set the max HTLC to the biggest that still leaves our main balance output untrimmed. // Note that this will be a dust HTLC. } else { - // Remember we've got no non-dust HTLCs on the commitment here, - // so we just account for a single non-dust HTLC - let fee_spike_buffer_sat = commit_tx_fee_sat(feerate_per_kw, 1, channel_type); + // Remember we've got no non-dust HTLCs on the commitment here + let current_spiked_tx_fee_sat = commit_tx_fee_sat(spiked_feerate, 0, channel_type); + let spike_buffer_tx_fee_sat = commit_tx_fee_sat(spiked_feerate, 1, channel_type); // We must cover the greater of // 1) The dust_limit_satoshis plus the fee of the exisiting commitment at the spiked feerate. // 2) The fee of the commitment with an additional non-dust HTLC, aka the fee spike buffer HTLC. // In this case we don't mind the holder balance output dropping below the dust limit, as // this additional non-dust HTLC will create the single remaining output on the commitment. let min_balance_msat = - cmp::max(dust_limit_satoshis + tx_fee_sat, fee_spike_buffer_sat) * 1000; + cmp::max(dust_limit_satoshis + current_spiked_tx_fee_sat, spike_buffer_tx_fee_sat) + * 1000; ( next_outbound_htlc_minimum_msat, + // We make no assumptions about the size of `available_capacity_msat` passed to this + // function, we only care that the new `available_capacity_msat` is under + // `holder_balance_before_fee_msat - min_balance_msat` cmp::min( holder_balance_before_fee_msat.saturating_sub(min_balance_msat), available_capacity_msat, From 2b67511e37d322ed6d72732933660e44d6999716 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Thu, 12 Mar 2026 20:43:02 +0000 Subject: [PATCH 8/9] f: 0FC channels always have an output on the commitment, the P2A output Also take the opportunity to remove some test case runs that don't do anything novel compared to other runs. --- lightning/src/ln/htlc_reserve_unit_tests.rs | 87 ++++++++------------- lightning/src/sign/tx_builder.rs | 4 +- 2 files changed, 37 insertions(+), 54 deletions(-) diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index b24049f9669..60ba38fd439 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -2639,10 +2639,6 @@ fn test_zero_reserve_no_outputs() { let channel_type = do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::PaymentSucceeds); assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies()); - let channel_type = - do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::ReceiverCanAcceptHTLCA); - assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies()); - let channel_type = do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::FailsReceiverUpdateAddHTLC); assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies()); @@ -2653,14 +2649,6 @@ fn test_zero_reserve_no_outputs() { let channel_type = do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::PaymentSucceeds); assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_fee_commitments()); - - let channel_type = - do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::ReceiverCanAcceptHTLCA); - assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_fee_commitments()); - - let channel_type = - do_test_zero_reserve_no_outputs(config.clone(), NoOutputs::FailsReceiverUpdateAddHTLC); - assert_eq!(channel_type, ChannelTypeFeatures::anchors_zero_fee_commitments()); } fn do_test_zero_reserve_no_outputs( @@ -2886,33 +2874,31 @@ fn do_test_zero_reserve_no_outputs( match no_outputs_case { NoOutputs::PaymentSucceeds => (sender_amount_msat, sender_amount_msat), - NoOutputs::ReceiverCanAcceptHTLCA => (sender_amount_msat, sender_amount_msat), + NoOutputs::ReceiverCanAcceptHTLCA => panic!("This case is not run"), NoOutputs::ReceiverCanAcceptHTLCB => panic!("This case is not run"), NoOutputs::FailsReceiverUpdateAddHTLC => (sender_amount_msat, sender_amount_msat - 1), } } else if channel_type == ChannelTypeFeatures::anchors_zero_fee_commitments() { // We can afford to send a non-dust HTLC assert!(channel_value_sat > dust_limit_satoshis); - // But sending the biggest dust HTLC possible trims our balance output! - let max_dust_htlc = dust_limit_satoshis - 1; - assert!(channel_value_sat - max_dust_htlc < dust_limit_satoshis); - // So we can *only* send non-dust HTLCs + // Sending the biggest dust HTLC possible trims our balance output! + let max_dust_htlc_sat = dust_limit_satoshis - 1; + assert!(channel_value_sat - max_dust_htlc_sat < dust_limit_satoshis); + // But we'll always have the P2A output on the commitment, so we are free to send any size HTLC, + // including those that result in only a single output on the commitment, the P2A output. let details_0 = &nodes[0].node.list_channels()[0]; - assert_eq!(details_0.next_outbound_htlc_minimum_msat, dust_limit_satoshis * 1000); + assert_eq!(details_0.next_outbound_htlc_minimum_msat, 1000); // 0FC + 0-reserve baby! assert_eq!(details_0.next_outbound_htlc_limit_msat, channel_value_sat * 1000); - // Send the smallest non-dust HTLC possible, this will pass both holder and counterparty validation - // - // One msat below the non-dust HTLC value will break counterparty validation at - // `validate_update_add_htlc`. - let sender_amount_msat = dust_limit_satoshis * 1000; + // Send the max size dust HTLC; this results in a commitment with only the P2A output present + let sender_amount_msat = max_dust_htlc_sat * 1000; match no_outputs_case { NoOutputs::PaymentSucceeds => (sender_amount_msat, sender_amount_msat), - NoOutputs::ReceiverCanAcceptHTLCA => (sender_amount_msat, sender_amount_msat), + NoOutputs::ReceiverCanAcceptHTLCA => panic!("This case is not run"), NoOutputs::ReceiverCanAcceptHTLCB => panic!("This case is not run"), - NoOutputs::FailsReceiverUpdateAddHTLC => (sender_amount_msat, sender_amount_msat - 1), + NoOutputs::FailsReceiverUpdateAddHTLC => panic!("This case is not run"), } } else { panic!("Unexpected channel type"); @@ -3093,37 +3079,32 @@ fn do_test_zero_reserve_no_outputs( nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); expect_and_process_pending_htlcs(&nodes[1], false); - if channel_type == ChannelTypeFeatures::only_static_remote_key() { - expect_htlc_handling_failed_destinations!( - nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::Receive { payment_hash }] - ); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }] + ); - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); - // Make sure the HTLC failed in the way we expect. - match events[0] { - MessageSendEvent::UpdateHTLCs { - updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, - .. - } => { - assert_eq!(update_fail_htlcs.len(), 1); - update_fail_htlcs[0].clone() - }, - _ => panic!("Unexpected event"), - }; - nodes[1].logger.assert_log( - "lightning::ln::channel", - "Attempting to fail HTLC due to balance exhausted on remote commitment".to_string(), - 1, - ); + // Make sure the HTLC failed in the way we expect. + match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, + .. + } => { + assert_eq!(update_fail_htlcs.len(), 1); + update_fail_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + }; + nodes[1].logger.assert_log( + "lightning::ln::channel", + "Attempting to fail HTLC due to balance exhausted on remote commitment".to_string(), + 1, + ); - check_added_monitors(&nodes[1], 3); - } else { - expect_payment_claimable!(nodes[1], payment_hash, payment_secret, receiver_amount_msat); - check_added_monitors(&nodes[1], 2); - } + check_added_monitors(&nodes[1], 3); } channel_type diff --git a/lightning/src/sign/tx_builder.rs b/lightning/src/sign/tx_builder.rs index 2c06c497902..4265c2d6686 100644 --- a/lightning/src/sign/tx_builder.rs +++ b/lightning/src/sign/tx_builder.rs @@ -229,7 +229,9 @@ fn has_output( let dust_limit_msat = broadcaster_dust_limit_satoshis * 1000; let has_no_output = real_holder_balance_msat < dust_limit_msat && real_counterparty_balance_msat < dust_limit_msat - && nondust_htlc_count == 0; + && nondust_htlc_count == 0 + // 0FC channels always have a P2A output on the commitment transaction + && !channel_type.supports_anchor_zero_fee_commitments(); !has_no_output } From ba6bd6076f4381b92fc752660fe3bf833bca8f02 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Fri, 13 Mar 2026 03:32:20 +0000 Subject: [PATCH 9/9] f: underscore that there is no guarantee that the counterparty accepts 0-reserve --- lightning/src/ln/channelmanager.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5378c9bff90..1e4c860d428 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3692,6 +3692,8 @@ impl< /// the reserve the counterparty must keep at all times in the channel to zero. This allows the counterparty to /// spend their entire channel balance, and attempt to force-close the channel with a revoked commitment /// transaction **for free**. + /// + /// Note that there is no guarantee that the counterparty accepts such a channel. pub fn create_channel_to_trusted_peer_0reserve( &self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, temporary_channel_id: Option, @@ -10828,6 +10830,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// This allows the counterparty to spend their entire channel balance, and attempt to force-close the channel /// with a revoked commitment transaction **for free**. /// + /// Note that there is no guarantee that the counterparty accepts such a channel themselves. + /// /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id pub fn accept_inbound_channel_from_trusted_peer(