Skip to content

Commit 08aa8b7

Browse files
committed
Refactor incoming HTLC accept checks out from Channel::update_add_htlc
In the future, we plan to completely remove `decode_update_add_htlc_onion` and replace it with a batched variant. This refactor, while improving readability in its current form, does not feature any functional changes and allows us to reuse the incoming HTLC acceptance checks in the batched variant.
1 parent c0a2f6a commit 08aa8b7

File tree

2 files changed

+120
-92
lines changed

2 files changed

+120
-92
lines changed

lightning/src/ln/channel.rs

+84-57
Original file line numberDiff line numberDiff line change
@@ -4118,20 +4118,12 @@ impl<SP: Deref> Channel<SP> where
41184118
Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger))
41194119
}
41204120

4121-
pub fn update_add_htlc<F, FE: Deref, L: Deref>(
4122-
&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
4123-
create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
4124-
) -> Result<(), ChannelError>
4125-
where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
4126-
FE::Target: FeeEstimator, L::Target: Logger,
4127-
{
4121+
pub fn update_add_htlc(
4122+
&mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus,
4123+
) -> Result<(), ChannelError> {
41284124
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
41294125
return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
41304126
}
4131-
// We can't accept HTLCs sent after we've sent a shutdown.
4132-
if self.context.channel_state.is_local_shutdown_sent() {
4133-
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
4134-
}
41354127
// If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
41364128
if self.context.channel_state.is_remote_shutdown_sent() {
41374129
return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
@@ -4150,7 +4142,6 @@ impl<SP: Deref> Channel<SP> where
41504142
}
41514143

41524144
let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4153-
let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
41544145
if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
41554146
return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
41564147
}
@@ -4179,34 +4170,6 @@ impl<SP: Deref> Channel<SP> where
41794170
}
41804171
}
41814172

4182-
let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4183-
let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4184-
(0, 0)
4185-
} else {
4186-
let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
4187-
(dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
4188-
dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
4189-
};
4190-
let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
4191-
if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
4192-
let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
4193-
if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
4194-
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
4195-
on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
4196-
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4197-
}
4198-
}
4199-
4200-
let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
4201-
if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
4202-
let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
4203-
if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
4204-
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
4205-
on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
4206-
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4207-
}
4208-
}
4209-
42104173
let pending_value_to_self_msat =
42114174
self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
42124175
let pending_remote_value_msat =
@@ -4240,23 +4203,7 @@ impl<SP: Deref> Channel<SP> where
42404203
} else {
42414204
0
42424205
};
4243-
if !self.context.is_outbound() {
4244-
// `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
4245-
// the spec because the fee spike buffer requirement doesn't exist on the receiver's
4246-
// side, only on the sender's. Note that with anchor outputs we are no longer as
4247-
// sensitive to fee spikes, so we need to account for them.
4248-
let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4249-
let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
4250-
if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4251-
remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
4252-
}
4253-
if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
4254-
// Note that if the pending_forward_status is not updated here, then it's because we're already failing
4255-
// the HTLC, i.e. its status is already set to failing.
4256-
log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
4257-
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4258-
}
4259-
} else {
4206+
if self.context.is_outbound() {
42604207
// Check that they won't violate our local required channel reserve by adding this HTLC.
42614208
let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
42624209
let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
@@ -6145,6 +6092,86 @@ impl<SP: Deref> Channel<SP> where
61456092
})
61466093
}
61476094

6095+
pub fn can_accept_incoming_htlc<F: Deref, L: Deref>(
6096+
&self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: L
6097+
) -> Result<(), (&'static str, u16)>
6098+
where
6099+
F::Target: FeeEstimator,
6100+
L::Target: Logger
6101+
{
6102+
if self.context.channel_state.is_local_shutdown_sent() {
6103+
return Err(("Shutdown was already sent", 0x4000|8))
6104+
}
6105+
6106+
let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
6107+
let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
6108+
let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
6109+
let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6110+
(0, 0)
6111+
} else {
6112+
let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
6113+
(dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
6114+
dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
6115+
};
6116+
let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
6117+
if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
6118+
let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
6119+
if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
6120+
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
6121+
on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
6122+
return Err(("Exceeded our dust exposure limit on counterparty commitment tx", 0x1000|7))
6123+
}
6124+
}
6125+
6126+
let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
6127+
if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
6128+
let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
6129+
if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
6130+
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
6131+
on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
6132+
return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7))
6133+
}
6134+
}
6135+
6136+
let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6137+
ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
6138+
} else {
6139+
0
6140+
};
6141+
6142+
let mut removed_outbound_total_msat = 0;
6143+
for ref htlc in self.context.pending_outbound_htlcs.iter() {
6144+
if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
6145+
removed_outbound_total_msat += htlc.amount_msat;
6146+
} else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
6147+
removed_outbound_total_msat += htlc.amount_msat;
6148+
}
6149+
}
6150+
6151+
let pending_value_to_self_msat =
6152+
self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
6153+
let pending_remote_value_msat =
6154+
self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
6155+
6156+
if !self.context.is_outbound() {
6157+
// `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
6158+
// the spec because the fee spike buffer requirement doesn't exist on the receiver's
6159+
// side, only on the sender's. Note that with anchor outputs we are no longer as
6160+
// sensitive to fee spikes, so we need to account for them.
6161+
let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
6162+
let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
6163+
if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6164+
remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
6165+
}
6166+
if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
6167+
log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
6168+
return Err(("Fee spike buffer violation", 0x1000|7));
6169+
}
6170+
}
6171+
6172+
Ok(())
6173+
}
6174+
61486175
pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
61496176
self.context.cur_holder_commitment_transaction_number + 1
61506177
}

lightning/src/ln/channelmanager.rs

+36-35
Original file line numberDiff line numberDiff line change
@@ -6796,52 +6796,53 @@ where
67966796
match peer_state.channel_by_id.entry(msg.channel_id) {
67976797
hash_map::Entry::Occupied(mut chan_phase_entry) => {
67986798
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
6799-
let pending_forward_info = match decoded_hop_res {
6799+
let mut pending_forward_info = match decoded_hop_res {
68006800
Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
68016801
self.construct_pending_htlc_status(
68026802
msg, counterparty_node_id, shared_secret, next_hop,
68036803
chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
68046804
),
68056805
Err(e) => PendingHTLCStatus::Fail(e)
68066806
};
6807-
let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
6807+
let logger = WithChannelContext::from(&self.logger, &chan.context);
6808+
// If the update_add is completely bogus, the call will Err and we will close,
6809+
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
6810+
// want to reject the new HTLC and fail it backwards instead of forwarding.
6811+
if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) {
68086812
if msg.blinding_point.is_some() {
6809-
return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
6810-
msgs::UpdateFailMalformedHTLC {
6811-
channel_id: msg.channel_id,
6812-
htlc_id: msg.htlc_id,
6813-
sha256_of_onion: [0; 32],
6814-
failure_code: INVALID_ONION_BLINDING,
6815-
}
6816-
))
6817-
}
6818-
// If the update_add is completely bogus, the call will Err and we will close,
6819-
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
6820-
// want to reject the new HTLC and fail it backwards instead of forwarding.
6821-
match pending_forward_info {
6822-
PendingHTLCStatus::Forward(PendingHTLCInfo {
6823-
ref incoming_shared_secret, ref routing, ..
6824-
}) => {
6825-
let reason = if routing.blinded_failure().is_some() {
6826-
HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
6827-
} else if (error_code & 0x1000) != 0 {
6828-
let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
6829-
HTLCFailReason::reason(real_code, error_data)
6830-
} else {
6831-
HTLCFailReason::from_failure_code(error_code)
6832-
}.get_encrypted_failure_packet(incoming_shared_secret, &None);
6833-
let msg = msgs::UpdateFailHTLC {
6813+
pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
6814+
msgs::UpdateFailMalformedHTLC {
68346815
channel_id: msg.channel_id,
68356816
htlc_id: msg.htlc_id,
6836-
reason
6837-
};
6838-
PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
6839-
},
6840-
_ => pending_forward_info
6817+
sha256_of_onion: [0; 32],
6818+
failure_code: INVALID_ONION_BLINDING,
6819+
}
6820+
))
6821+
} else {
6822+
match pending_forward_info {
6823+
PendingHTLCStatus::Forward(PendingHTLCInfo {
6824+
ref incoming_shared_secret, ref routing, ..
6825+
}) => {
6826+
let reason = if routing.blinded_failure().is_some() {
6827+
HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
6828+
} else if (error_code & 0x1000) != 0 {
6829+
let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
6830+
HTLCFailReason::reason(real_code, error_data)
6831+
} else {
6832+
HTLCFailReason::from_failure_code(error_code)
6833+
}.get_encrypted_failure_packet(incoming_shared_secret, &None);
6834+
let msg = msgs::UpdateFailHTLC {
6835+
channel_id: msg.channel_id,
6836+
htlc_id: msg.htlc_id,
6837+
reason
6838+
};
6839+
pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg));
6840+
},
6841+
_ => {},
6842+
}
68416843
}
6842-
};
6843-
let logger = WithChannelContext::from(&self.logger, &chan.context);
6844-
try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry);
6844+
}
6845+
try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info), chan_phase_entry);
68456846
} else {
68466847
return try_chan_phase_entry!(self, Err(ChannelError::Close(
68476848
"Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);

0 commit comments

Comments
 (0)