Skip to content

Commit f97a450

Browse files
committed
Remove the peer_disconnected no_connection_possible flag
Long ago, we used the `no_connection_possible` to signal that a peer has some unknown feature set or some other condition prevents us from ever connecting to the given peer. In that case we'd automatically force-close all channels with the given peer. This was somewhat surprising to users so we removed the automatic force-close, leaving the flag serving no LDK-internal purpose. Distilling the concept of "can we connect to this peer again in the future" to a simple flag turns out to be ripe with edge cases, so users actually using the flag to force-close channels would likely cause surprising behavior. Thus, there's really not a lot of reason to keep the flag, especially given its untested and likely to be broken in subtle ways anyway.
1 parent 0f07d5c commit f97a450

14 files changed

+137
-163
lines changed

lightning-invoice/src/utils.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -842,13 +842,13 @@ mod test {
842842

843843
// With only one sufficient-value peer connected we should only get its hint
844844
scid_aliases.remove(&chan_b.0.short_channel_id_alias.unwrap());
845-
nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
845+
nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
846846
match_invoice_routes(Some(1_000_000_000), &nodes[0], scid_aliases.clone());
847847

848848
// If we don't have any sufficient-value peers connected we should get all hints with
849849
// sufficient value, even though there is a connected insufficient-value peer.
850850
scid_aliases.insert(chan_b.0.short_channel_id_alias.unwrap());
851-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
851+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
852852
match_invoice_routes(Some(1_000_000_000), &nodes[0], scid_aliases);
853853
}
854854

lightning-net-tokio/src/lib.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -643,7 +643,7 @@ mod tests {
643643
fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {}
644644
fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {}
645645
fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {}
646-
fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
646+
fn peer_disconnected(&self, their_node_id: &PublicKey) {
647647
if *their_node_id == self.expected_pubkey {
648648
self.disconnected_flag.store(true, Ordering::SeqCst);
649649
self.pubkey_disconnected.clone().try_send(()).unwrap();

lightning/src/ln/chanmon_update_fail_tests.rs

+26-26
Original file line numberDiff line numberDiff line change
@@ -181,8 +181,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
181181
assert_eq!(nodes[0].node.list_channels().len(), 1);
182182

183183
if disconnect {
184-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
185-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
184+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
185+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
186186
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
187187
}
188188

@@ -234,8 +234,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
234234
assert_eq!(nodes[0].node.list_channels().len(), 1);
235235

236236
if disconnect {
237-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
238-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
237+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
238+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
239239
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
240240
}
241241

@@ -337,8 +337,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
337337
};
338338

339339
if disconnect_count & !disconnect_flags > 0 {
340-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
341-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
340+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
341+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
342342
}
343343

344344
// Now fix monitor updating...
@@ -348,8 +348,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
348348
check_added_monitors!(nodes[0], 0);
349349

350350
macro_rules! disconnect_reconnect_peers { () => { {
351-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
352-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
351+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
352+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
353353

354354
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
355355
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
@@ -1110,8 +1110,8 @@ fn test_monitor_update_fail_reestablish() {
11101110

11111111
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
11121112

1113-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1114-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1113+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1114+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
11151115

11161116
nodes[2].node.claim_funds(payment_preimage);
11171117
check_added_monitors!(nodes[2], 1);
@@ -1146,8 +1146,8 @@ fn test_monitor_update_fail_reestablish() {
11461146
nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
11471147
check_added_monitors!(nodes[1], 1);
11481148

1149-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1150-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1149+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1150+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
11511151

11521152
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
11531153
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
@@ -1315,8 +1315,8 @@ fn claim_while_disconnected_monitor_update_fail() {
13151315
// Forward a payment for B to claim
13161316
let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
13171317

1318-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1319-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1318+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1319+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
13201320

13211321
nodes[1].node.claim_funds(payment_preimage_1);
13221322
check_added_monitors!(nodes[1], 1);
@@ -1451,8 +1451,8 @@ fn monitor_failed_no_reestablish_response() {
14511451

14521452
// Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
14531453
// is still failing to update monitors.
1454-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1455-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1454+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1455+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
14561456

14571457
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
14581458
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
@@ -1873,8 +1873,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
18731873
}
18741874

18751875
// Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
1876-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1877-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1876+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1877+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
18781878
reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
18791879
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
18801880
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -2041,8 +2041,8 @@ fn test_pending_update_fee_ack_on_reconnect() {
20412041
let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
20422042
// bs_first_raa is not delivered until it is re-generated after reconnect
20432043

2044-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2045-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
2044+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2045+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
20462046

20472047
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
20482048
let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
@@ -2169,8 +2169,8 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
21692169
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
21702170
}
21712171

2172-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2173-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
2172+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2173+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
21742174

21752175
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
21762176
let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
@@ -2303,9 +2303,9 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
23032303
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
23042304
reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
23052305
} else {
2306-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2306+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
23072307
}
2308-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
2308+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
23092309

23102310
// Now reconnect the two
23112311
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
@@ -2493,8 +2493,8 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
24932493
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
24942494
}
24952495

2496-
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
2497-
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2496+
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
2497+
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
24982498

24992499
if second_fails {
25002500
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));

lightning/src/ln/channelmanager.rs

+6-6
Original file line numberDiff line numberDiff line change
@@ -6270,13 +6270,13 @@ where
62706270
let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
62716271
}
62726272

6273-
fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) {
6273+
fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
62746274
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
62756275
let mut failed_channels = Vec::new();
62766276
let mut per_peer_state = self.per_peer_state.write().unwrap();
62776277
let remove_peer = {
6278-
log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
6279-
log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
6278+
log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates.",
6279+
log_pubkey!(counterparty_node_id));
62806280
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
62816281
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
62826282
let peer_state = &mut *peer_state_lock;
@@ -6332,7 +6332,7 @@ where
63326332

63336333
fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> {
63346334
if !init_msg.features.supports_static_remote_key() {
6335-
log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting with no_connection_possible", log_pubkey!(counterparty_node_id));
6335+
log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
63366336
return Err(());
63376337
}
63386338

@@ -8210,8 +8210,8 @@ mod tests {
82108210

82118211
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
82128212

8213-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
8214-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
8213+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
8214+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
82158215

82168216
nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
82178217
check_closed_broadcast!(nodes[0], true);

0 commit comments

Comments
 (0)