Skip to content

Commit 5a0d6d0

Browse files
Implement some rate limiting for onion messages.
In this commit, we add business logic for checking if a peer's outbound buffer has room for onion messages, and if so pulls them from an implementer of a new trait, OnionMessageProvider. Makes sure channel messages are prioritized over OMs. The onion_message module remains private until further rate limiting is added.
1 parent 356ec9b commit 5a0d6d0

File tree

2 files changed

+54
-1
lines changed

2 files changed

+54
-1
lines changed

lightning/src/ln/peer_handler.rs

+44-1
Original file line numberDiff line numberDiff line change
@@ -306,6 +306,10 @@ enum InitSyncTracker{
306306
/// forwarding gossip messages to peers altogether.
307307
const FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO: usize = 2;
308308

309+
/// The ratio between buffer sizes at which we stop sending initial sync messages vs when we pause
310+
/// forwarding onion messages to peers altogether.
311+
const OM_BUFFER_LIMIT_RATIO: usize = 2;
312+
309313
/// When the outbound buffer has this many messages, we'll stop reading bytes from the peer until
310314
/// we have fewer than this many messages in the outbound buffer again.
311315
/// We also use this as the target number of outbound gossip messages to keep in the write buffer,
@@ -315,6 +319,10 @@ const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 10;
315319
/// the peer.
316320
const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO;
317321

322+
/// When the outbound buffer has this many messages, we won't poll for new onion messages for this
323+
/// peer.
324+
const OUTBOUND_BUFFER_LIMIT_PAUSE_OMS: usize = 16;
325+
318326
/// If we've sent a ping, and are still awaiting a response, we may need to churn our way through
319327
/// the socket receive buffer before receiving the ping.
320328
///
@@ -417,6 +425,14 @@ impl Peer {
417425
}
418426
true
419427
}
428+
429+
/// Returns the number of onion messages we can fit in this peer's buffer.
430+
fn onion_message_buffer_slots_available(&self) -> usize {
431+
cmp::min(
432+
OUTBOUND_BUFFER_LIMIT_PAUSE_OMS.saturating_sub(self.pending_outbound_buffer.len()),
433+
(BUFFER_DRAIN_MSGS_PER_TICK * OM_BUFFER_LIMIT_RATIO).saturating_sub(self.msgs_sent_since_pong))
434+
}
435+
420436
}
421437

422438
/// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g.
@@ -824,8 +840,12 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
824840
/// ready to call `[write_buffer_space_avail`] again if a write call generated here isn't
825841
/// sufficient!
826842
///
843+
/// If any bytes are written, [`process_events`] should be called afterwards.
844+
// TODO: why?
845+
///
827846
/// [`send_data`]: SocketDescriptor::send_data
828847
/// [`write_buffer_space_avail`]: PeerManager::write_buffer_space_avail
848+
/// [`process_events`]: PeerManager::process_events
829849
pub fn write_buffer_space_avail(&self, descriptor: &mut Descriptor) -> Result<(), PeerHandleError> {
830850
let peers = self.peers.read().unwrap();
831851
match peers.get(descriptor) {
@@ -1668,7 +1688,30 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
16681688
}
16691689

16701690
for (descriptor, peer_mutex) in peers.iter() {
1671-
self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer_mutex.lock().unwrap());
1691+
let peer_node_id = {
1692+
let mut peer = peer_mutex.lock().unwrap();
1693+
self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer);
1694+
peer.their_node_id
1695+
};
1696+
1697+
// Only see if we have room for onion messages after we've written all channel messages, to
1698+
// ensure the latter take priority.
1699+
if let Some(peer_node_id) = peer_node_id {
1700+
loop {
1701+
let om_buffer_slots_avail = {
1702+
let peer = peer_mutex.lock().unwrap();
1703+
peer.onion_message_buffer_slots_available()
1704+
};
1705+
if om_buffer_slots_avail == 0 { break; }
1706+
let onion_msgs = self.message_handler.onion_message_handler.next_onion_messages_for_peer(
1707+
peer_node_id, om_buffer_slots_avail);
1708+
if onion_msgs.len() == 0 { break; }
1709+
for msg in onion_msgs {
1710+
self.enqueue_message(&mut *get_peer_for_forwarding!(&peer_node_id), &msg);
1711+
}
1712+
self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer_mutex.lock().unwrap());
1713+
}
1714+
}
16721715
}
16731716
}
16741717
if !peers_to_disconnect.is_empty() {

lightning/src/onion_message/messenger.rs

+10
Original file line numberDiff line numberDiff line change
@@ -292,6 +292,16 @@ impl<Signer: Sign, K: Deref, L: Deref> OnionMessageProvider for OnionMessenger<S
292292
L::Target: Logger,
293293
{
294294
fn next_onion_messages_for_peer(&self, peer_node_id: PublicKey, max_messages: usize) -> Vec<msgs::OnionMessage> {
295+
let mut pending_msgs = self.pending_messages.lock().unwrap();
296+
if let Some(msgs) = pending_msgs.get_mut(&peer_node_id) {
297+
if max_messages >= msgs.len() {
298+
let mut peer_pending_msgs = Vec::new();
299+
mem::swap(msgs, &mut peer_pending_msgs);
300+
return peer_pending_msgs
301+
} else {
302+
return msgs.split_off(max_messages)
303+
}
304+
}
295305
Vec::new()
296306
}
297307
}

0 commit comments

Comments
 (0)