Skip to content

Commit 5d187f6

Browse files
authored
Merge pull request #2529 from TheBlueMatt/2023-08-shutdown-remove-early-sign
Don't send init `closing_signed` too early after final HTLC removal
2 parents 185fbc1 + 70b1866 commit 5d187f6

File tree

2 files changed

+132
-1
lines changed

2 files changed

+132
-1
lines changed

lightning/src/ln/channel.rs

+31
Original file line numberDiff line numberDiff line change
@@ -816,6 +816,19 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
816816
#[cfg(not(test))]
817817
closing_fee_limits: Option<(u64, u64)>,
818818

819+
/// If we remove an HTLC (or fee update), commit, and receive our counterparty's
820+
/// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
821+
/// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
822+
/// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
823+
/// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
824+
///
825+
/// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
826+
/// until we see a `commitment_signed` before doing so.
827+
///
828+
/// We don't bother to persist this - we anticipate this state won't last longer than a few
829+
/// milliseconds, so any accidental force-closes here should be exceedingly rare.
830+
expecting_peer_commitment_signed: bool,
831+
819832
/// The hash of the block in which the funding transaction was included.
820833
funding_tx_confirmed_in: Option<BlockHash>,
821834
funding_tx_confirmation_height: u32,
@@ -3249,6 +3262,7 @@ impl<SP: Deref> Channel<SP> where
32493262
};
32503263

32513264
self.context.cur_holder_commitment_transaction_number -= 1;
3265+
self.context.expecting_peer_commitment_signed = false;
32523266
// Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
32533267
// build_commitment_no_status_check() next which will reset this to RAAFirst.
32543268
self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
@@ -3514,6 +3528,7 @@ impl<SP: Deref> Channel<SP> where
35143528
// Take references explicitly so that we can hold multiple references to self.context.
35153529
let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
35163530
let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3531+
let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
35173532

35183533
// We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
35193534
pending_inbound_htlcs.retain(|htlc| {
@@ -3522,6 +3537,7 @@ impl<SP: Deref> Channel<SP> where
35223537
if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
35233538
value_to_self_msat_diff += htlc.amount_msat as i64;
35243539
}
3540+
*expecting_peer_commitment_signed = true;
35253541
false
35263542
} else { true }
35273543
});
@@ -3581,6 +3597,7 @@ impl<SP: Deref> Channel<SP> where
35813597
if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
35823598
log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
35833599
htlc.state = OutboundHTLCState::Committed;
3600+
*expecting_peer_commitment_signed = true;
35843601
}
35853602
if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
35863603
log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
@@ -3601,6 +3618,7 @@ impl<SP: Deref> Channel<SP> where
36013618
log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
36023619
self.context.feerate_per_kw = feerate;
36033620
self.context.pending_update_fee = None;
3621+
self.context.expecting_peer_commitment_signed = true;
36043622
},
36053623
FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
36063624
FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
@@ -4381,6 +4399,10 @@ impl<SP: Deref> Channel<SP> where
43814399
-> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
43824400
where F::Target: FeeEstimator, L::Target: Logger
43834401
{
4402+
// If we're waiting on a monitor persistence, that implies we're also waiting to send some
4403+
// message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4404+
// initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4405+
// that closing_negotiation_ready checks this case (as well as a few others).
43844406
if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
43854407
return Ok((None, None, None));
43864408
}
@@ -4392,6 +4414,12 @@ impl<SP: Deref> Channel<SP> where
43924414
return Ok((None, None, None));
43934415
}
43944416

4417+
// If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4418+
// local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4419+
if self.context.expecting_peer_commitment_signed {
4420+
return Ok((None, None, None));
4421+
}
4422+
43954423
let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
43964424

43974425
assert!(self.context.shutdown_scriptpubkey.is_some());
@@ -6005,6 +6033,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
60056033

60066034
last_sent_closing_fee: None,
60076035
pending_counterparty_closing_signed: None,
6036+
expecting_peer_commitment_signed: false,
60086037
closing_fee_limits: None,
60096038
target_closing_feerate_sats_per_kw: None,
60106039

@@ -6637,6 +6666,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
66376666

66386667
last_sent_closing_fee: None,
66396668
pending_counterparty_closing_signed: None,
6669+
expecting_peer_commitment_signed: false,
66406670
closing_fee_limits: None,
66416671
target_closing_feerate_sats_per_kw: None,
66426672

@@ -7708,6 +7738,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
77087738

77097739
last_sent_closing_fee: None,
77107740
pending_counterparty_closing_signed: None,
7741+
expecting_peer_commitment_signed: false,
77117742
closing_fee_limits: None,
77127743
target_closing_feerate_sats_per_kw,
77137744

lightning/src/ln/shutdown_tests.rs

+101-1
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,9 @@
1010
//! Tests of our shutdown and closing_signed negotiation logic.
1111
1212
use crate::sign::{EntropySource, SignerProvider};
13+
use crate::chain::ChannelMonitorUpdateStatus;
1314
use crate::chain::transaction::OutPoint;
14-
use crate::events::{MessageSendEvent, MessageSendEventsProvider, ClosureReason};
15+
use crate::events::{MessageSendEvent, HTLCDestination, MessageSendEventsProvider, ClosureReason};
1516
use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, ChannelShutdownState, ChannelDetails};
1617
use crate::routing::router::{PaymentParameters, get_route, RouteParameters};
1718
use crate::ln::msgs;
@@ -1237,3 +1238,102 @@ fn simple_target_feerate_shutdown() {
12371238
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
12381239
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
12391240
}
1241+
1242+
fn do_outbound_update_no_early_closing_signed(use_htlc: bool) {
1243+
// Previously, if we have a pending inbound HTLC (or fee update) on a channel which has
1244+
// initiated shutdown, we'd send our initial closing_signed immediately after receiving the
1245+
// peer's last RAA to remove the HTLC/fee update, but before receiving their final
1246+
// commitment_signed for a commitment without the HTLC/with the new fee. This caused at least
1247+
// LDK peers to force-close as we initiated closing_signed prior to the channel actually being
1248+
// fully empty of pending updates/HTLCs.
1249+
let chanmon_cfgs = create_chanmon_cfgs(2);
1250+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1251+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1252+
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1253+
1254+
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1255+
1256+
send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1257+
let payment_hash_opt = if use_htlc {
1258+
Some(route_payment(&nodes[1], &[&nodes[0]], 10_000).1)
1259+
} else {
1260+
None
1261+
};
1262+
1263+
if use_htlc {
1264+
nodes[0].node.fail_htlc_backwards(&payment_hash_opt.unwrap());
1265+
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[0],
1266+
[HTLCDestination::FailedPayment { payment_hash: payment_hash_opt.unwrap() }]);
1267+
} else {
1268+
*chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap() *= 10;
1269+
nodes[0].node.timer_tick_occurred();
1270+
}
1271+
let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
1272+
check_added_monitors(&nodes[0], 1);
1273+
1274+
nodes[1].node.close_channel(&chan_id, &nodes[0].node.get_our_node_id()).unwrap();
1275+
let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
1276+
nodes[0].node.close_channel(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
1277+
let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
1278+
1279+
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown);
1280+
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_1_shutdown);
1281+
1282+
if use_htlc {
1283+
nodes[1].node.handle_update_fail_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
1284+
} else {
1285+
nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &updates.update_fee.unwrap());
1286+
}
1287+
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
1288+
check_added_monitors(&nodes[1], 1);
1289+
let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
1290+
1291+
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1292+
check_added_monitors(&nodes[0], 1);
1293+
1294+
// At this point the Channel on nodes[0] has no record of any HTLCs but the latest
1295+
// broadcastable commitment does contain the HTLC (but only the ChannelMonitor knows this).
1296+
// Thus, the channel should not yet initiate closing_signed negotiation (but previously did).
1297+
assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new());
1298+
1299+
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1300+
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
1301+
check_added_monitors(&nodes[0], 1);
1302+
assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new());
1303+
1304+
expect_channel_shutdown_state!(nodes[0], chan_id, ChannelShutdownState::ResolvingHTLCs);
1305+
assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new());
1306+
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
1307+
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1308+
1309+
let as_raa_closing_signed = nodes[0].node.get_and_clear_pending_msg_events();
1310+
assert_eq!(as_raa_closing_signed.len(), 2);
1311+
1312+
if let MessageSendEvent::SendRevokeAndACK { msg, .. } = &as_raa_closing_signed[0] {
1313+
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &msg);
1314+
check_added_monitors(&nodes[1], 1);
1315+
if use_htlc {
1316+
expect_payment_failed!(nodes[1], payment_hash_opt.unwrap(), true);
1317+
}
1318+
} else { panic!("Unexpected message {:?}", as_raa_closing_signed[0]); }
1319+
1320+
if let MessageSendEvent::SendClosingSigned { msg, .. } = &as_raa_closing_signed[1] {
1321+
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &msg);
1322+
} else { panic!("Unexpected message {:?}", as_raa_closing_signed[1]); }
1323+
1324+
let bs_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
1325+
nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &bs_closing_signed);
1326+
let (_, as_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
1327+
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &as_2nd_closing_signed.unwrap());
1328+
let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
1329+
assert!(node_1_none.is_none());
1330+
1331+
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1332+
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1333+
}
1334+
1335+
#[test]
1336+
fn outbound_update_no_early_closing_signed() {
1337+
do_outbound_update_no_early_closing_signed(true);
1338+
do_outbound_update_no_early_closing_signed(false);
1339+
}

0 commit comments

Comments
 (0)