Skip to content

Commit d5fbc04

Browse files
Forward onion messages in PeerManager
Making sure channel messages are prioritized over OMs and we only write them when there's sufficient space in the peer's buffer. We also take this opportunity to add a utility for when to drop gossip.
1 parent 7ee6edc commit d5fbc04

File tree

1 file changed

+53
-11
lines changed

1 file changed

+53
-11
lines changed

lightning/src/ln/peer_handler.rs

+53-11
Original file line numberDiff line numberDiff line change
@@ -309,15 +309,23 @@ enum InitSyncTracker{
309309
/// forwarding gossip messages to peers altogether.
310310
const FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO: usize = 2;
311311

312+
/// The ratio between buffer sizes at which we stop sending initial sync messages vs when we pause
313+
/// forwarding onion messages to peers altogether.
314+
const OM_BUFFER_LIMIT_RATIO: usize = 2;
315+
312316
/// When the outbound buffer has this many messages, we'll stop reading bytes from the peer until
313317
/// we have fewer than this many messages in the outbound buffer again.
314-
/// We also use this as the target number of outbound gossip messages to keep in the write buffer,
315-
/// refilled as we send bytes.
318+
/// We also use this as the target number of outbound gossip and onion messages to keep in the write
319+
/// buffer, refilled as we send bytes.
316320
const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 10;
317321
/// When the outbound buffer has this many messages, we'll simply skip relaying gossip messages to
318322
/// the peer.
319323
const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO;
320324

325+
/// When the outbound buffer has this many messages, we won't poll for new onion messages for this
326+
/// peer.
327+
const OUTBOUND_BUFFER_LIMIT_PAUSE_OMS: usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * OM_BUFFER_LIMIT_RATIO;
328+
321329
/// If we've sent a ping, and are still awaiting a response, we may need to churn our way through
322330
/// the socket receive buffer before receiving the ping.
323331
///
@@ -393,6 +401,22 @@ impl Peer {
393401
InitSyncTracker::NodesSyncing(pk) => pk < node_id,
394402
}
395403
}
404+
405+
/// Returns the number of onion messages we can fit in this peer's buffer.
406+
fn onion_message_buffer_slots_available(&self) -> usize {
407+
cmp::min(
408+
OUTBOUND_BUFFER_LIMIT_PAUSE_OMS.saturating_sub(self.pending_outbound_buffer.len()),
409+
(BUFFER_DRAIN_MSGS_PER_TICK * OM_BUFFER_LIMIT_RATIO).saturating_sub(self.msgs_sent_since_pong))
410+
}
411+
412+
/// Returns whether this peer's buffer is full and we should drop gossip messages.
413+
fn buffer_full_drop_gossip(&self) -> bool {
414+
if self.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
415+
|| self.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO {
416+
return false
417+
}
418+
true
419+
}
396420
}
397421

398422
/// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g.
@@ -811,8 +835,12 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
811835
/// ready to call `[write_buffer_space_avail`] again if a write call generated here isn't
812836
/// sufficient!
813837
///
838+
/// If any bytes are written, [`process_events`] should be called afterwards.
839+
// TODO: why?
840+
///
814841
/// [`send_data`]: SocketDescriptor::send_data
815842
/// [`write_buffer_space_avail`]: PeerManager::write_buffer_space_avail
843+
/// [`process_events`]: PeerManager::process_events
816844
pub fn write_buffer_space_avail(&self, descriptor: &mut Descriptor) -> Result<(), PeerHandleError> {
817845
let peers = self.peers.read().unwrap();
818846
match peers.get(descriptor) {
@@ -1335,9 +1363,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
13351363
!peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
13361364
continue
13371365
}
1338-
if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
1339-
|| peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
1340-
{
1366+
if peer.buffer_full_drop_gossip() {
13411367
log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
13421368
continue;
13431369
}
@@ -1361,9 +1387,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
13611387
!peer.should_forward_node_announcement(msg.contents.node_id) {
13621388
continue
13631389
}
1364-
if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
1365-
|| peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
1366-
{
1390+
if peer.buffer_full_drop_gossip() {
13671391
log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
13681392
continue;
13691393
}
@@ -1386,9 +1410,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
13861410
!peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
13871411
continue
13881412
}
1389-
if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
1390-
|| peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
1391-
{
1413+
if peer.buffer_full_drop_gossip() {
13921414
log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
13931415
continue;
13941416
}
@@ -1412,6 +1434,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
14121434
/// You don't have to call this function explicitly if you are using [`lightning-net-tokio`]
14131435
/// or one of the other clients provided in our language bindings.
14141436
///
1437+
/// Note that this method should be called again if any bytes are written.
1438+
///
14151439
/// Note that if there are any other calls to this function waiting on lock(s) this may return
14161440
/// without doing any work. All available events that need handling will be handled before the
14171441
/// other calls return.
@@ -1666,6 +1690,24 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
16661690

16671691
for (descriptor, peer_mutex) in peers.iter() {
16681692
self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer_mutex.lock().unwrap());
1693+
1694+
// Only see if we have room for onion messages after we've written all channel messages, to
1695+
// ensure they take priority.
1696+
let (peer_node_id, om_buffer_slots_avail) = {
1697+
let peer = peer_mutex.lock().unwrap();
1698+
if let Some(peer_node_id) = peer.their_node_id {
1699+
(Some(peer_node_id.clone()), peer.onion_message_buffer_slots_available())
1700+
} else { (None, 0) }
1701+
};
1702+
if peer_node_id.is_some() && om_buffer_slots_avail > 0 {
1703+
for event in self.message_handler.onion_message_handler.next_onion_messages_for_peer(
1704+
peer_node_id.unwrap(), om_buffer_slots_avail)
1705+
{
1706+
if let MessageSendEvent::SendOnionMessage { ref node_id, ref msg } = event {
1707+
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
1708+
}
1709+
}
1710+
}
16691711
}
16701712
}
16711713
if !peers_to_disconnect.is_empty() {

0 commit comments

Comments
 (0)