Skip to content

Commit 0e2ee89

Browse files
Implement some rate limiting for onion messages.
In this commit, we add business logic for checking if a peer's outbound buffer has room for onion messages, and if so pulls them from an implementer of a new trait, OnionMessageProvider. Makes sure channel messages are prioritized over OMs. The onion_message module remains private until further rate limiting is added.
1 parent 356ec9b commit 0e2ee89

File tree

2 files changed

+47
-0
lines changed

2 files changed

+47
-0
lines changed

lightning/src/ln/peer_handler.rs

+37
Original file line numberDiff line numberDiff line change
@@ -306,6 +306,10 @@ enum InitSyncTracker{
306306
/// forwarding gossip messages to peers altogether.
307307
const FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO: usize = 2;
308308

309+
/// The ratio between buffer sizes at which we stop sending initial sync messages vs when we pause
310+
/// forwarding onion messages to peers altogether.
311+
const OM_BUFFER_LIMIT_RATIO: usize = 2;
312+
309313
/// When the outbound buffer has this many messages, we'll stop reading bytes from the peer until
310314
/// we have fewer than this many messages in the outbound buffer again.
311315
/// We also use this as the target number of outbound gossip messages to keep in the write buffer,
@@ -315,6 +319,10 @@ const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 10;
315319
/// the peer.
316320
const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO;
317321

322+
/// When the outbound buffer has this many messages, we won't poll for new onion messages for this
323+
/// peer.
324+
const OUTBOUND_BUFFER_LIMIT_PAUSE_OMS: usize = 8;
325+
318326
/// If we've sent a ping, and are still awaiting a response, we may need to churn our way through
319327
/// the socket receive buffer before receiving the ping.
320328
///
@@ -417,6 +425,13 @@ impl Peer {
417425
}
418426
true
419427
}
428+
429+
/// Returns the number of onion messages we can fit in this peer's buffer.
430+
fn onion_message_buffer_slots_available(&self) -> usize {
431+
cmp::min(
432+
OUTBOUND_BUFFER_LIMIT_PAUSE_OMS.saturating_sub(self.pending_outbound_buffer.len()),
433+
(BUFFER_DRAIN_MSGS_PER_TICK * OM_BUFFER_LIMIT_RATIO).saturating_sub(self.msgs_sent_since_pong))
434+
}
420435
}
421436

422437
/// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g.
@@ -824,8 +839,11 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
824839
/// ready to call `[write_buffer_space_avail`] again if a write call generated here isn't
825840
/// sufficient!
826841
///
842+
/// If any bytes are written, [`process_events`] should be called afterwards.
843+
///
827844
/// [`send_data`]: SocketDescriptor::send_data
828845
/// [`write_buffer_space_avail`]: PeerManager::write_buffer_space_avail
846+
/// [`process_events`]: PeerManager::process_events
829847
pub fn write_buffer_space_avail(&self, descriptor: &mut Descriptor) -> Result<(), PeerHandleError> {
830848
let peers = self.peers.read().unwrap();
831849
match peers.get(descriptor) {
@@ -1669,6 +1687,25 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
16691687

16701688
for (descriptor, peer_mutex) in peers.iter() {
16711689
self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer_mutex.lock().unwrap());
1690+
1691+
// Only see if we have room for onion messages after we've written all channel messages, to
1692+
// ensure the latter take priority.
1693+
loop {
1694+
let (peer_node_id, om_buffer_slots_avail) = {
1695+
let peer = peer_mutex.lock().unwrap();
1696+
if let Some(peer_node_id) = peer.their_node_id {
1697+
(peer_node_id, peer.onion_message_buffer_slots_available())
1698+
} else { break; }
1699+
};
1700+
if om_buffer_slots_avail == 0 { break; }
1701+
let onion_msgs = self.message_handler.onion_message_handler.next_onion_messages_for_peer(
1702+
peer_node_id, om_buffer_slots_avail);
1703+
if onion_msgs.len() == 0 { break; }
1704+
for msg in onion_msgs {
1705+
self.enqueue_message(&mut *get_peer_for_forwarding!(&peer_node_id), &msg);
1706+
}
1707+
self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer_mutex.lock().unwrap());
1708+
}
16721709
}
16731710
}
16741711
if !peers_to_disconnect.is_empty() {

lightning/src/onion_message/messenger.rs

+10
Original file line numberDiff line numberDiff line change
@@ -292,6 +292,16 @@ impl<Signer: Sign, K: Deref, L: Deref> OnionMessageProvider for OnionMessenger<S
292292
L::Target: Logger,
293293
{
294294
fn next_onion_messages_for_peer(&self, peer_node_id: PublicKey, max_messages: usize) -> Vec<msgs::OnionMessage> {
295+
let mut pending_msgs = self.pending_messages.lock().unwrap();
296+
if let Some(msgs) = pending_msgs.get_mut(&peer_node_id) {
297+
if max_messages >= msgs.len() {
298+
let mut peer_pending_msgs = Vec::new();
299+
mem::swap(msgs, &mut peer_pending_msgs);
300+
return peer_pending_msgs
301+
} else {
302+
return msgs.split_off(max_messages)
303+
}
304+
}
295305
Vec::new()
296306
}
297307
}

0 commit comments

Comments
 (0)