Skip to content

Commit 2edc5b2

Browse files
committed
Refactor incoming HTLC accept checks out from Channel::update_add_htlc
In the future, we plan to completely remove `decode_update_add_htlc_onion` and replace it with a batched variant. This refactor, while improving readability in its current form, does not feature any functional changes and allows us to reuse the incoming HTLC acceptance checks in the batched variant.
1 parent cc7faa3 commit 2edc5b2

File tree

2 files changed

+120
-92
lines changed

2 files changed

+120
-92
lines changed

lightning/src/ln/channel.rs

Lines changed: 84 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -4121,20 +4121,12 @@ impl<SP: Deref> Channel<SP> where
41214121
Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger))
41224122
}
41234123

4124-
pub fn update_add_htlc<F, FE: Deref, L: Deref>(
4125-
&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
4126-
create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
4127-
) -> Result<(), ChannelError>
4128-
where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
4129-
FE::Target: FeeEstimator, L::Target: Logger,
4130-
{
4124+
pub fn update_add_htlc(
4125+
&mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus,
4126+
) -> Result<(), ChannelError> {
41314127
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
41324128
return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
41334129
}
4134-
// We can't accept HTLCs sent after we've sent a shutdown.
4135-
if self.context.channel_state.is_local_shutdown_sent() {
4136-
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
4137-
}
41384130
// If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
41394131
if self.context.channel_state.is_remote_shutdown_sent() {
41404132
return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
@@ -4153,7 +4145,6 @@ impl<SP: Deref> Channel<SP> where
41534145
}
41544146

41554147
let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4156-
let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
41574148
if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
41584149
return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
41594150
}
@@ -4182,34 +4173,6 @@ impl<SP: Deref> Channel<SP> where
41824173
}
41834174
}
41844175

4185-
let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4186-
let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4187-
(0, 0)
4188-
} else {
4189-
let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
4190-
(dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
4191-
dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
4192-
};
4193-
let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
4194-
if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
4195-
let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
4196-
if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
4197-
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
4198-
on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
4199-
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4200-
}
4201-
}
4202-
4203-
let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
4204-
if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
4205-
let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
4206-
if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
4207-
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
4208-
on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
4209-
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4210-
}
4211-
}
4212-
42134176
let pending_value_to_self_msat =
42144177
self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
42154178
let pending_remote_value_msat =
@@ -4243,23 +4206,7 @@ impl<SP: Deref> Channel<SP> where
42434206
} else {
42444207
0
42454208
};
4246-
if !self.context.is_outbound() {
4247-
// `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
4248-
// the spec because the fee spike buffer requirement doesn't exist on the receiver's
4249-
// side, only on the sender's. Note that with anchor outputs we are no longer as
4250-
// sensitive to fee spikes, so we need to account for them.
4251-
let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4252-
let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
4253-
if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4254-
remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
4255-
}
4256-
if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
4257-
// Note that if the pending_forward_status is not updated here, then it's because we're already failing
4258-
// the HTLC, i.e. its status is already set to failing.
4259-
log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
4260-
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4261-
}
4262-
} else {
4209+
if self.context.is_outbound() {
42634210
// Check that they won't violate our local required channel reserve by adding this HTLC.
42644211
let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
42654212
let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
@@ -6148,6 +6095,86 @@ impl<SP: Deref> Channel<SP> where
61486095
})
61496096
}
61506097

6098+
pub fn can_accept_incoming_htlc<F: Deref, L: Deref>(
6099+
&self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: L
6100+
) -> Result<(), (&'static str, u16)>
6101+
where
6102+
F::Target: FeeEstimator,
6103+
L::Target: Logger
6104+
{
6105+
if self.context.channel_state.is_local_shutdown_sent() {
6106+
return Err(("Shutdown was already sent", 0x4000|8))
6107+
}
6108+
6109+
let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
6110+
let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
6111+
let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
6112+
let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6113+
(0, 0)
6114+
} else {
6115+
let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
6116+
(dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
6117+
dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
6118+
};
6119+
let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
6120+
if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
6121+
let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
6122+
if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
6123+
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
6124+
on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
6125+
return Err(("Exceeded our dust exposure limit on counterparty commitment tx", 0x1000|7))
6126+
}
6127+
}
6128+
6129+
let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
6130+
if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
6131+
let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
6132+
if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
6133+
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
6134+
on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
6135+
return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7))
6136+
}
6137+
}
6138+
6139+
let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6140+
ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
6141+
} else {
6142+
0
6143+
};
6144+
6145+
let mut removed_outbound_total_msat = 0;
6146+
for ref htlc in self.context.pending_outbound_htlcs.iter() {
6147+
if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
6148+
removed_outbound_total_msat += htlc.amount_msat;
6149+
} else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
6150+
removed_outbound_total_msat += htlc.amount_msat;
6151+
}
6152+
}
6153+
6154+
let pending_value_to_self_msat =
6155+
self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
6156+
let pending_remote_value_msat =
6157+
self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
6158+
6159+
if !self.context.is_outbound() {
6160+
// `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
6161+
// the spec because the fee spike buffer requirement doesn't exist on the receiver's
6162+
// side, only on the sender's. Note that with anchor outputs we are no longer as
6163+
// sensitive to fee spikes, so we need to account for them.
6164+
let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
6165+
let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
6166+
if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6167+
remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
6168+
}
6169+
if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
6170+
log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
6171+
return Err(("Fee spike buffer violation", 0x1000|7));
6172+
}
6173+
}
6174+
6175+
Ok(())
6176+
}
6177+
61516178
pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
61526179
self.context.cur_holder_commitment_transaction_number + 1
61536180
}

lightning/src/ln/channelmanager.rs

Lines changed: 36 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -6814,52 +6814,53 @@ where
68146814
match peer_state.channel_by_id.entry(msg.channel_id) {
68156815
hash_map::Entry::Occupied(mut chan_phase_entry) => {
68166816
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
6817-
let pending_forward_info = match decoded_hop_res {
6817+
let mut pending_forward_info = match decoded_hop_res {
68186818
Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
68196819
self.construct_pending_htlc_status(
68206820
msg, counterparty_node_id, shared_secret, next_hop,
68216821
chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
68226822
),
68236823
Err(e) => PendingHTLCStatus::Fail(e)
68246824
};
6825-
let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
6825+
let logger = WithChannelContext::from(&self.logger, &chan.context);
6826+
// If the update_add is completely bogus, the call will Err and we will close,
6827+
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
6828+
// want to reject the new HTLC and fail it backwards instead of forwarding.
6829+
if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) {
68266830
if msg.blinding_point.is_some() {
6827-
return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
6828-
msgs::UpdateFailMalformedHTLC {
6829-
channel_id: msg.channel_id,
6830-
htlc_id: msg.htlc_id,
6831-
sha256_of_onion: [0; 32],
6832-
failure_code: INVALID_ONION_BLINDING,
6833-
}
6834-
))
6835-
}
6836-
// If the update_add is completely bogus, the call will Err and we will close,
6837-
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
6838-
// want to reject the new HTLC and fail it backwards instead of forwarding.
6839-
match pending_forward_info {
6840-
PendingHTLCStatus::Forward(PendingHTLCInfo {
6841-
ref incoming_shared_secret, ref routing, ..
6842-
}) => {
6843-
let reason = if routing.blinded_failure().is_some() {
6844-
HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
6845-
} else if (error_code & 0x1000) != 0 {
6846-
let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
6847-
HTLCFailReason::reason(real_code, error_data)
6848-
} else {
6849-
HTLCFailReason::from_failure_code(error_code)
6850-
}.get_encrypted_failure_packet(incoming_shared_secret, &None);
6851-
let msg = msgs::UpdateFailHTLC {
6831+
pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
6832+
msgs::UpdateFailMalformedHTLC {
68526833
channel_id: msg.channel_id,
68536834
htlc_id: msg.htlc_id,
6854-
reason
6855-
};
6856-
PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
6857-
},
6858-
_ => pending_forward_info
6835+
sha256_of_onion: [0; 32],
6836+
failure_code: INVALID_ONION_BLINDING,
6837+
}
6838+
))
6839+
} else {
6840+
match pending_forward_info {
6841+
PendingHTLCStatus::Forward(PendingHTLCInfo {
6842+
ref incoming_shared_secret, ref routing, ..
6843+
}) => {
6844+
let reason = if routing.blinded_failure().is_some() {
6845+
HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
6846+
} else if (error_code & 0x1000) != 0 {
6847+
let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
6848+
HTLCFailReason::reason(real_code, error_data)
6849+
} else {
6850+
HTLCFailReason::from_failure_code(error_code)
6851+
}.get_encrypted_failure_packet(incoming_shared_secret, &None);
6852+
let msg = msgs::UpdateFailHTLC {
6853+
channel_id: msg.channel_id,
6854+
htlc_id: msg.htlc_id,
6855+
reason
6856+
};
6857+
pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg));
6858+
},
6859+
_ => {},
6860+
}
68596861
}
6860-
};
6861-
let logger = WithChannelContext::from(&self.logger, &chan.context);
6862-
try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry);
6862+
}
6863+
try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info), chan_phase_entry);
68636864
} else {
68646865
return try_chan_phase_entry!(self, Err(ChannelError::Close(
68656866
"Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);

0 commit comments

Comments
 (0)