@@ -166,6 +166,12 @@ impl BackgroundProcessor {
166
166
/// [`FilesystemPersister::persist_network_graph`]: lightning_persister::FilesystemPersister::persist_network_graph
167
167
/// [`NetworkGraph`]: lightning::routing::network_graph::NetworkGraph
168
168
/// [`NetworkGraph::write`]: lightning::routing::network_graph::NetworkGraph#impl-Writeable
169
+ ///
170
+ /// # Graph Sync
171
+ ///
172
+ /// If a rapid graph sync is meant to run at startup, set `await_graph_sync_completion` to true
173
+ /// to indicate to [`BackgroundProcessor`] not to prune the [`NetworkGraph`] instance until
174
+ /// [`graph_sync_complete`] is called.
169
175
pub fn start <
170
176
Signer : ' static + Sign ,
171
177
CA : ' static + Deref + Send + Sync ,
@@ -267,23 +273,26 @@ impl BackgroundProcessor {
267
273
last_ping_call = Instant :: now ( ) ;
268
274
}
269
275
270
- // The network graph must not be pruned while graph sync completion is pending
271
- let is_currently_awaiting_graph_sync = is_awaiting_graph_sync_completion_clone. load ( Ordering :: Acquire ) ;
272
- if !is_currently_awaiting_graph_sync {
273
- // Note that we want to run a graph prune once not long after startup before
274
- // falling back to our usual hourly prunes. This avoids short-lived clients never
275
- // pruning their network graph. We run once 60 seconds after startup before
276
- // continuing our normal cadence.
277
- if last_prune_call. elapsed ( ) . as_secs ( ) > if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER } {
278
- if let Some ( ref handler) = net_graph_msg_handler {
279
- log_trace ! ( logger, "Pruning network graph of stale entries" ) ;
280
- handler. network_graph ( ) . remove_stale_channels ( ) ;
281
- if let Err ( e) = persister. persist_graph ( handler. network_graph ( ) ) {
282
- log_error ! ( logger, "Error: Failed to persist network graph, check your disk and permissions {}" , e)
283
- }
284
- last_prune_call = Instant :: now ( ) ;
285
- have_pruned = true ;
276
+ // Note that we want to run a graph prune once not long after startup before
277
+ // falling back to our usual hourly prunes. This avoids short-lived clients never
278
+ // pruning their network graph. We run once 60 seconds after startup before
279
+ // continuing our normal cadence.
280
+ if last_prune_call. elapsed ( ) . as_secs ( ) > if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER } {
281
+ if let Some ( ref handler) = net_graph_msg_handler {
282
+ log_trace ! ( logger, "Assessing prunability of network graph" ) ;
283
+ // The network graph must not be pruned while graph sync completion is pending
284
+ let is_currently_awaiting_graph_sync = is_awaiting_graph_sync_completion_clone. load ( Ordering :: Acquire ) ;
285
+ if is_currently_awaiting_graph_sync {
286
+ log_trace ! ( logger, "Not pruning network graph due to pending graph sync" ) ;
287
+ continue ;
288
+ }
289
+ log_trace ! ( logger, "Pruning network graph of stale entries" ) ;
290
+ handler. network_graph ( ) . remove_stale_channels ( ) ;
291
+ if let Err ( e) = persister. persist_graph ( handler. network_graph ( ) ) {
292
+ log_error ! ( logger, "Error: Failed to persist network graph, check your disk and permissions {}" , e)
286
293
}
294
+ last_prune_call = Instant :: now ( ) ;
295
+ have_pruned = true ;
287
296
}
288
297
}
289
298
}
@@ -330,7 +339,8 @@ impl BackgroundProcessor {
330
339
self . stop_and_join_thread ( )
331
340
}
332
341
333
- /// Signal to `BackgroundProcessor` that graph sync has completed
342
+ /// Signal to [`BackgroundProcessor`] that the initial rapid graph sync has completed.
343
+ ///
334
344
/// This function can only be called usefully once, so there is an implicit understanding
335
345
/// that running graph sync multiple times after startup is API misuse.
336
346
pub fn graph_sync_complete ( & self ) {
@@ -448,7 +458,6 @@ mod tests {
448
458
fn with_manager_error ( self , error : std:: io:: ErrorKind , message : & ' static str ) -> Self {
449
459
Self { manager_error : Some ( ( error, message) ) , ..self }
450
460
}
451
-
452
461
}
453
462
454
463
impl < Signer : Sign , M : Deref , T : Deref , K : Deref , F : Deref , L : Deref > super :: Persister < Signer , M , T , K , F , L > for Persister where
@@ -467,7 +476,7 @@ mod tests {
467
476
468
477
fn persist_graph ( & self , network_graph : & NetworkGraph ) -> Result < ( ) , std:: io:: Error > {
469
478
if let Some ( sender) = & self . graph_sender {
470
- sender. send ( network_graph. clone ( ) ) ;
479
+ sender. send ( network_graph. clone ( ) ) . unwrap ( ) ;
471
480
} ;
472
481
473
482
match self . graph_error {
@@ -770,29 +779,35 @@ mod tests {
770
779
771
780
#[ test]
772
781
fn test_not_pruning_network_graph_until_graph_sync_completion ( ) {
773
- let mut nodes = create_nodes ( 2 , "test_not_pruning_network_graph_until_graph_sync_completion" . to_string ( ) ) ;
774
- let channel_value = 100000 ;
782
+ let nodes = create_nodes ( 2 , "test_not_pruning_network_graph_until_graph_sync_completion" . to_string ( ) ) ;
775
783
let data_dir = nodes[ 0 ] . persister . get_data_dir ( ) ;
776
784
let ( sender, receiver) = std:: sync:: mpsc:: sync_channel ( 1 ) ;
777
785
let persister = Persister :: new ( data_dir. clone ( ) ) . with_graph_sender ( sender) ;
778
786
let network_graph = nodes[ 0 ] . network_graph . clone ( ) ;
779
787
let features = ChannelFeatures :: empty ( ) ;
780
- network_graph. update_channel_from_partial_announcement ( 42 , 53 , features, nodes[ 0 ] . node . get_our_node_id ( ) , nodes[ 1 ] . node . get_our_node_id ( ) ) ;
788
+ network_graph. update_channel_from_partial_announcement ( 42 , 53 , features, nodes[ 0 ] . node . get_our_node_id ( ) , nodes[ 1 ] . node . get_our_node_id ( ) )
789
+ . expect ( "Failed to update channel from partial announcement" ) ;
781
790
let original_graph_description = network_graph. to_string ( ) ;
782
791
assert ! ( original_graph_description. contains( "42: features: 0000, node_one:" ) ) ;
783
792
784
793
let event_handler = |_: & _ | { } ;
785
794
let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , true , nodes[ 0 ] . logger . clone ( ) ) ;
786
795
787
- let reception_result = receiver
788
- . recv_timeout ( Duration :: from_secs ( EVENT_DEADLINE ) ) ;
789
- assert ! ( reception_result. is_err( ) ) ;
796
+ loop {
797
+ let log_entries = nodes[ 0 ] . logger . lines . lock ( ) . unwrap ( ) ;
798
+ let expected_log_a = "Assessing prunability of network graph" . to_string ( ) ;
799
+ let expected_log_b = "Not pruning network graph due to pending graph sync" . to_string ( ) ;
800
+ if log_entries. get ( & ( "lightning_background_processor" . to_string ( ) , expected_log_a) ) . is_some ( ) &&
801
+ log_entries. get ( & ( "lightning_background_processor" . to_string ( ) , expected_log_b) ) . is_some ( ) {
802
+ break
803
+ }
804
+ }
790
805
791
806
bg_processor. graph_sync_complete ( ) ;
792
807
793
- let graph = receiver
794
- . recv_timeout ( Duration :: from_secs ( EVENT_DEADLINE ) )
795
- . expect ( "SpendableOutputs not handled within deadline" ) ;
808
+ let _ = receiver
809
+ . recv_timeout ( Duration :: from_secs ( super :: FIRST_NETWORK_PRUNE_TIMER * 2 ) )
810
+ . expect ( "Network graph not pruned within deadline" ) ;
796
811
let current_graph_description = network_graph. to_string ( ) ;
797
812
assert_ne ! ( current_graph_description, original_graph_description) ;
798
813
assert_eq ! ( current_graph_description. len( ) , 31 ) ;
0 commit comments