Skip to content

Commit 7f9ee5a

Browse files
committed
f basically rewrite the whole thing, this time with tests
1 parent 6f0354a commit 7f9ee5a

File tree

1 file changed

+161
-30
lines changed

1 file changed

+161
-30
lines changed

lightning/src/ln/channelmanager.rs

+161-30
Original file line numberDiff line numberDiff line change
@@ -4251,6 +4251,37 @@ where
42514251
Ok(())
42524252
}
42534253

4254+
/// Gets the number of peers which match the given filter which have unfunded inbound channels.
4255+
///
4256+
/// The filter is called for each node and provided with the number of unfunded channels the
4257+
/// node has.
4258+
///
4259+
/// If the peer in question has funded channels, however, zero will be returned.
4260+
fn peers_with_unfunded_channels<Filter>(&self, mut filter: Filter) -> usize
4261+
where Filter: FnMut(&PublicKey, &PeerState<<SP::Target as SignerProvider>::Signer>, usize) -> bool {
4262+
let mut peers_with_unfunded_channels = 0;
4263+
let best_block_height = self.best_block.read().unwrap().height();
4264+
{
4265+
let peer_state_lock = self.per_peer_state.read().unwrap();
4266+
for (node_id, peer_mtx) in peer_state_lock.iter() {
4267+
let peer = peer_mtx.lock().unwrap();
4268+
let mut num_unfunded_channels = 0;
4269+
for (_, chan) in peer.channel_by_id.iter() {
4270+
if !chan.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 &&
4271+
chan.get_funding_tx_confirmations(best_block_height) == 0
4272+
{
4273+
num_unfunded_channels += 1;
4274+
}
4275+
}
4276+
if !filter(node_id, &*peer, num_unfunded_channels) { continue; }
4277+
if num_unfunded_channels == peer.channel_by_id.len() {
4278+
peers_with_unfunded_channels += 1;
4279+
}
4280+
}
4281+
}
4282+
return peers_with_unfunded_channels;
4283+
}
4284+
42544285
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
42554286
if msg.chain_hash != self.genesis_hash {
42564287
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
@@ -4270,25 +4301,37 @@ where
42704301
if let None = peer_state_mutex_opt {
42714302
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone()))
42724303
}
4273-
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4274-
let peer_state = &mut *peer_state_lock;
42754304

4276-
let mut unfunded_channels = 0;
4277-
let best_block_height = self.best_block.read().unwrap().height();
4278-
for (_, chan) in peer_state.channel_by_id.iter() {
4279-
if chan.get_funding_tx_confirmations(best_block_height) == 0 && chan.minimum_depth() != 0 {
4280-
unfunded_channels += 1;
4281-
}
4282-
}
4283-
if unfunded_channels >= MAX_UNFUNDED_CHANS_PER_PEER {
4305+
// Get the number of peers with channels, but without funded ones. We don't care too much
4306+
// about peers that never open a channel, so we filter by peers that have at least one
4307+
// channel, and then limit the number of those with unfunded channels.
4308+
let mut this_node_unfunded_channels = 0;
4309+
let peers_with_unfunded_channels = self.peers_with_unfunded_channels(
4310+
|node_id, node, unfunded_channels| {
4311+
if node_id == counterparty_node_id {
4312+
this_node_unfunded_channels = unfunded_channels;
4313+
}
4314+
!node.channel_by_id.is_empty()
4315+
});
4316+
if this_node_unfunded_channels >= MAX_UNFUNDED_CHANS_PER_PEER {
42844317
return Err(MsgHandleErrInternal::send_err_msg_no_close(
42854318
format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
42864319
msg.temporary_channel_id.clone()));
42874320
}
4321+
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4322+
let peer_state = &mut *peer_state_lock;
4323+
// If this peer already has some channels, a new channel won't increase our number of peers
4324+
// with unfunded channels, so as long as we aren't over the maximum number of unfunded
4325+
// channels per-peer we can accept channels from a peer with existing ones.
4326+
if peer_state.channel_by_id.is_empty() && peers_with_unfunded_channels >= MAX_NO_CHANNEL_PEERS {
4327+
return Err(MsgHandleErrInternal::send_err_msg_no_close(
4328+
"Have too many peers with unfunded channels, not accepting new ones".to_owned(),
4329+
msg.temporary_channel_id.clone()));
4330+
}
42884331

42894332
let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
42904333
counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
4291-
&self.default_configuration, best_block_height, &self.logger, outbound_scid_alias)
4334+
&self.default_configuration, self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias)
42924335
{
42934336
Err(e) => {
42944337
self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
@@ -6241,26 +6284,19 @@ where
62416284

62426285
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
62436286

6244-
let mut peers_with_unfunded_channels = 0;
6245-
let mut peer_has_channels = false;
6246-
let best_block_height = self.best_block.read().unwrap().height();
6247-
{
6248-
let peer_state_lock = self.per_peer_state.read().unwrap();
6249-
for (node_id, peer) in peer_state_lock.iter() {
6250-
if node_id == counterparty_node_id {
6251-
peer_has_channels = true;
6252-
break;
6253-
}
6254-
let mut has_funded_channels = false;
6255-
for (_, chan) in peer.lock().unwrap().channel_by_id.iter() {
6256-
if chan.get_funding_tx_confirmations(best_block_height) != 0 || chan.minimum_depth() == 0 {
6257-
has_funded_channels = true;
6258-
}
6287+
// If we have too many peers connected which don't have funded channels, disconnect the
6288+
// peer immediately. If we have a bunch of unfunded channels taking up space in memory for
6289+
// disconnected peers, we still let new peers connect, but we'll reject new channels from
6290+
// them.
6291+
let mut this_peer_has_funded_channels = false;
6292+
let connected_peers_with_unfunded_channels = self.peers_with_unfunded_channels(
6293+
|node_id, node, num_unfunded_channels| {
6294+
if node_id == counterparty_node_id && num_unfunded_channels != node.channel_by_id.len() {
6295+
this_peer_has_funded_channels = true;
62596296
}
6260-
if !has_funded_channels { peers_with_unfunded_channels += 1 }
6261-
}
6262-
}
6263-
if !peer_has_channels && inbound && peers_with_unfunded_channels >= MAX_NO_CHANNEL_PEERS {
6297+
node.is_connected
6298+
});
6299+
if inbound && !this_peer_has_funded_channels && connected_peers_with_unfunded_channels >= MAX_NO_CHANNEL_PEERS {
62646300
return Err(());
62656301
}
62666302

@@ -8449,6 +8485,101 @@ mod tests {
84498485
nodes[1].node.handle_update_fee(&unkown_public_key, &update_fee_msg);
84508486
}
84518487

8488+
#[test]
8489+
fn test_connection_limiting() {
8490+
// Test that we limit un-channel'd peers and un-funded channels properly.
8491+
let chanmon_cfgs = create_chanmon_cfgs(2);
8492+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8493+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8494+
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8495+
8496+
// Note that create_network connects the nodes together for us
8497+
8498+
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
8499+
let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8500+
8501+
let mut funding_tx = None;
8502+
for (idx, _) in (0..super::MAX_UNFUNDED_CHANS_PER_PEER).enumerate() {
8503+
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
8504+
let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8505+
8506+
if idx == 0 {
8507+
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
8508+
let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
8509+
funding_tx = Some(tx.clone());
8510+
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx).unwrap();
8511+
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8512+
8513+
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
8514+
check_added_monitors!(nodes[1], 1);
8515+
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
8516+
8517+
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
8518+
check_added_monitors!(nodes[0], 1);
8519+
}
8520+
open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
8521+
}
8522+
8523+
// A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
8524+
open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
8525+
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
8526+
assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id,
8527+
open_channel_msg.temporary_channel_id);
8528+
8529+
// Further, because all of our channels with nodes[0] are inbound, and none of them funded,
8530+
// it doesn't count as a "protected" peer, counting towards the MAX_NO_CHANNEL_PEERS limit.
8531+
let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS);
8532+
for _ in 1..super::MAX_NO_CHANNEL_PEERS {
8533+
let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
8534+
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
8535+
peer_pks.push(random_pk);
8536+
nodes[1].node.peer_connected(&random_pk, &msgs::Init {
8537+
features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
8538+
}
8539+
let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
8540+
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
8541+
nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
8542+
features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
8543+
8544+
// Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
8545+
// them if we have too many un-channel'd peers.
8546+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
8547+
let chan_closed_events = nodes[1].node.get_and_clear_pending_events();
8548+
assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1);
8549+
for ev in chan_closed_events {
8550+
if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
8551+
}
8552+
nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
8553+
features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
8554+
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
8555+
features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
8556+
8557+
// Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
8558+
// Even though we accept one more connection from new peers, we won't actually let them
8559+
// open channels.
8560+
assert_eq!(peer_pks.len(), super::MAX_NO_CHANNEL_PEERS - 1);
8561+
for peer_pk in peer_pks {
8562+
nodes[1].node.handle_open_channel(&peer_pk, &open_channel_msg);
8563+
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pk);
8564+
open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
8565+
}
8566+
nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
8567+
assert_eq!(get_err_msg!(nodes[1], last_random_pk).channel_id,
8568+
open_channel_msg.temporary_channel_id);
8569+
8570+
// If we fund the first channel, nodes[0] has a live on-chain channel with us, it is now
8571+
// "protected" and can connect again.
8572+
mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
8573+
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
8574+
features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
8575+
get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
8576+
8577+
// Further, because the first channel was funded, we can open another channel with
8578+
// last_random_pk.
8579+
nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
8580+
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
8581+
}
8582+
84528583
#[cfg(anchors)]
84538584
#[test]
84548585
fn test_anchors_zero_fee_htlc_tx_fallback() {

0 commit comments

Comments
 (0)