9
9
#![ cfg_attr( docsrs, feature( doc_auto_cfg) ) ]
10
10
11
11
#[ macro_use] extern crate lightning;
12
+ extern crate lightning_rapid_gossip_sync;
12
13
13
14
use lightning:: chain;
14
15
use lightning:: chain:: chaininterface:: { BroadcasterInterface , FeeEstimator } ;
@@ -28,6 +29,7 @@ use std::thread;
28
29
use std:: thread:: JoinHandle ;
29
30
use std:: time:: { Duration , Instant } ;
30
31
use std:: ops:: Deref ;
32
+ use lightning_rapid_gossip_sync:: RapidGossipSync ;
31
33
32
34
/// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
33
35
/// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
@@ -151,6 +153,13 @@ impl BackgroundProcessor {
151
153
/// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
152
154
/// [`NetworkGraph`]: lightning::routing::network_graph::NetworkGraph
153
155
/// [`NetworkGraph::write`]: lightning::routing::network_graph::NetworkGraph#impl-Writeable
156
+ ///
157
+ /// # Rapid Gossip Sync
158
+ ///
159
+ /// If a rapid gossip sync is meant to run at startup, pass an option with a set
160
+ /// [`RapidGossipSync`] reference to `rapid_gossip_sync` to indicate to [`BackgroundProcessor`]
161
+ /// not to prune the [`NetworkGraph`] instance until the [`RapidGossipSync`] instance
162
+ /// completes its first sync.
154
163
pub fn start <
155
164
' a ,
156
165
Signer : ' static + Sign ,
@@ -175,9 +184,11 @@ impl BackgroundProcessor {
175
184
PM : ' static + Deref < Target = PeerManager < Descriptor , CMH , RMH , L , UMH > > + Send + Sync ,
176
185
S : ' static + Deref < Target = SC > + Send + Sync ,
177
186
SC : WriteableScore < ' a > ,
187
+ RGS : ' static + Deref < Target = RapidGossipSync < G > > + Send
178
188
> (
179
189
persister : PS , event_handler : EH , chain_monitor : M , channel_manager : CM ,
180
- net_graph_msg_handler : Option < NG > , peer_manager : PM , logger : L , scorer : Option < S >
190
+ net_graph_msg_handler : Option < NG > , peer_manager : PM , logger : L , scorer : Option < S > ,
191
+ rapid_gossip_sync : Option < RGS >
181
192
) -> Self
182
193
where
183
194
CA :: Target : ' static + chain:: Access ,
@@ -273,21 +284,36 @@ impl BackgroundProcessor {
273
284
// continuing our normal cadence.
274
285
if last_prune_call. elapsed ( ) . as_secs ( ) > if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER } {
275
286
if let Some ( ref handler) = net_graph_msg_handler {
276
- log_trace ! ( logger, "Pruning network graph of stale entries" ) ;
277
- handler. network_graph ( ) . remove_stale_channels ( ) ;
278
- if let Err ( e) = persister. persist_graph ( handler. network_graph ( ) ) {
279
- log_error ! ( logger, "Error: Failed to persist network graph, check your disk and permissions {}" , e)
280
- }
281
- }
282
- if let Some ( ref scorer) = scorer {
283
- log_trace ! ( logger, "Persisting scorer" ) ;
284
- if let Err ( e) = persister. persist_scorer ( & scorer) {
285
- log_error ! ( logger, "Error: Failed to persist scorer, check your disk and permissions {}" , e)
287
+ log_trace ! ( logger, "Assessing prunability of network graph" ) ;
288
+
289
+ // The network graph must not be pruned while rapid sync completion is pending
290
+ let should_prune = if let Some ( rapid_sync) = rapid_gossip_sync. as_ref ( ) {
291
+ rapid_sync. is_initial_sync_complete ( )
292
+ } else {
293
+ true
294
+ } ;
295
+
296
+ if should_prune {
297
+ log_trace ! ( logger, "Pruning network graph of stale entries" ) ;
298
+ handler. network_graph ( ) . remove_stale_channels ( ) ;
299
+
300
+ if let Err ( e) = persister. persist_graph ( handler. network_graph ( ) ) {
301
+ log_error ! ( logger, "Error: Failed to persist network graph, check your disk and permissions {}" , e)
302
+ }
303
+
304
+ last_prune_call = Instant :: now ( ) ;
305
+ have_pruned = true ;
306
+ } else {
307
+ log_trace ! ( logger, "Not pruning network graph due to pending gossip sync" ) ;
286
308
}
287
309
}
310
+ }
288
311
289
- last_prune_call = Instant :: now ( ) ;
290
- have_pruned = true ;
312
+ if let Some ( ref scorer) = scorer {
313
+ log_trace ! ( logger, "Persisting scorer" ) ;
314
+ if let Err ( e) = persister. persist_scorer ( & scorer) {
315
+ log_error ! ( logger, "Error: Failed to persist scorer, check your disk and permissions {}" , e)
316
+ }
291
317
}
292
318
}
293
319
@@ -370,7 +396,7 @@ mod tests {
370
396
use lightning:: chain:: transaction:: OutPoint ;
371
397
use lightning:: get_event_msg;
372
398
use lightning:: ln:: channelmanager:: { BREAKDOWN_TIMEOUT , ChainParameters , ChannelManager , SimpleArcChannelManager } ;
373
- use lightning:: ln:: features:: InitFeatures ;
399
+ use lightning:: ln:: features:: { ChannelFeatures , InitFeatures } ;
374
400
use lightning:: ln:: msgs:: { ChannelMessageHandler , Init } ;
375
401
use lightning:: ln:: peer_handler:: { PeerManager , MessageHandler , SocketDescriptor , IgnoringMessageHandler } ;
376
402
use lightning:: routing:: network_graph:: { NetworkGraph , NetGraphMsgHandler } ;
@@ -385,8 +411,10 @@ mod tests {
385
411
use std:: fs;
386
412
use std:: path:: PathBuf ;
387
413
use std:: sync:: { Arc , Mutex } ;
414
+ use std:: sync:: mpsc:: SyncSender ;
388
415
use std:: time:: Duration ;
389
416
use lightning:: routing:: scoring:: { FixedPenaltyScorer } ;
417
+ use lightning_rapid_gossip_sync:: RapidGossipSync ;
390
418
use super :: { BackgroundProcessor , FRESHNESS_TIMER } ;
391
419
392
420
const EVENT_DEADLINE : u64 = 5 * FRESHNESS_TIMER ;
@@ -428,6 +456,7 @@ mod tests {
428
456
429
457
struct Persister {
430
458
graph_error : Option < ( std:: io:: ErrorKind , & ' static str ) > ,
459
+ graph_persistence_notifier : Option < SyncSender < ( ) > > ,
431
460
manager_error : Option < ( std:: io:: ErrorKind , & ' static str ) > ,
432
461
scorer_error : Option < ( std:: io:: ErrorKind , & ' static str ) > ,
433
462
filesystem_persister : FilesystemPersister ,
@@ -436,13 +465,17 @@ mod tests {
436
465
impl Persister {
437
466
fn new ( data_dir : String ) -> Self {
438
467
let filesystem_persister = FilesystemPersister :: new ( data_dir. clone ( ) ) ;
439
- Self { graph_error : None , manager_error : None , scorer_error : None , filesystem_persister }
468
+ Self { graph_error : None , graph_persistence_notifier : None , manager_error : None , scorer_error : None , filesystem_persister }
440
469
}
441
470
442
471
fn with_graph_error ( self , error : std:: io:: ErrorKind , message : & ' static str ) -> Self {
443
472
Self { graph_error : Some ( ( error, message) ) , ..self }
444
473
}
445
474
475
+ fn with_graph_persistence_notifier ( self , sender : SyncSender < ( ) > ) -> Self {
476
+ Self { graph_persistence_notifier : Some ( sender) , ..self }
477
+ }
478
+
446
479
fn with_manager_error ( self , error : std:: io:: ErrorKind , message : & ' static str ) -> Self {
447
480
Self { manager_error : Some ( ( error, message) ) , ..self }
448
481
}
@@ -461,6 +494,10 @@ mod tests {
461
494
}
462
495
463
496
if key == "network_graph" {
497
+ if let Some ( sender) = & self . graph_persistence_notifier {
498
+ sender. send ( ( ) ) . unwrap ( ) ;
499
+ } ;
500
+
464
501
if let Some ( ( error, message) ) = self . graph_error {
465
502
return Err ( std:: io:: Error :: new ( error, message) )
466
503
}
@@ -602,7 +639,8 @@ mod tests {
602
639
let data_dir = nodes[ 0 ] . persister . get_data_dir ( ) ;
603
640
let persister = Arc :: new ( Persister :: new ( data_dir) ) ;
604
641
let event_handler = |_: & _ | { } ;
605
- let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) ) ;
642
+ let rapid_gossip_sync: Option < Arc < RapidGossipSync < Arc < NetworkGraph > > > > = None ;
643
+ let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) , rapid_gossip_sync) ;
606
644
607
645
macro_rules! check_persisted_data {
608
646
( $node: expr, $filepath: expr) => {
@@ -667,7 +705,8 @@ mod tests {
667
705
let data_dir = nodes[ 0 ] . persister . get_data_dir ( ) ;
668
706
let persister = Arc :: new ( Persister :: new ( data_dir) ) ;
669
707
let event_handler = |_: & _ | { } ;
670
- let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) ) ;
708
+ let rapid_gossip_sync: Option < Arc < RapidGossipSync < Arc < NetworkGraph > > > > = None ;
709
+ let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) , rapid_gossip_sync) ;
671
710
loop {
672
711
let log_entries = nodes[ 0 ] . logger . lines . lock ( ) . unwrap ( ) ;
673
712
let desired_log = "Calling ChannelManager's timer_tick_occurred" . to_string ( ) ;
@@ -690,7 +729,8 @@ mod tests {
690
729
let data_dir = nodes[ 0 ] . persister . get_data_dir ( ) ;
691
730
let persister = Arc :: new ( Persister :: new ( data_dir) . with_manager_error ( std:: io:: ErrorKind :: Other , "test" ) ) ;
692
731
let event_handler = |_: & _ | { } ;
693
- let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) ) ;
732
+ let rapid_gossip_sync: Option < Arc < RapidGossipSync < Arc < NetworkGraph > > > > = None ;
733
+ let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) , rapid_gossip_sync) ;
694
734
match bg_processor. join ( ) {
695
735
Ok ( _) => panic ! ( "Expected error persisting manager" ) ,
696
736
Err ( e) => {
@@ -707,7 +747,8 @@ mod tests {
707
747
let data_dir = nodes[ 0 ] . persister . get_data_dir ( ) ;
708
748
let persister = Arc :: new ( Persister :: new ( data_dir) . with_graph_error ( std:: io:: ErrorKind :: Other , "test" ) ) ;
709
749
let event_handler = |_: & _ | { } ;
710
- let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) ) ;
750
+ let rapid_gossip_sync: Option < Arc < RapidGossipSync < Arc < NetworkGraph > > > > = None ;
751
+ let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) , rapid_gossip_sync) ;
711
752
712
753
match bg_processor. stop ( ) {
713
754
Ok ( _) => panic ! ( "Expected error persisting network graph" ) ,
@@ -725,7 +766,8 @@ mod tests {
725
766
let data_dir = nodes[ 0 ] . persister . get_data_dir ( ) ;
726
767
let persister = Arc :: new ( Persister :: new ( data_dir) . with_scorer_error ( std:: io:: ErrorKind :: Other , "test" ) ) ;
727
768
let event_handler = |_: & _ | { } ;
728
- let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) ) ;
769
+ let rapid_gossip_sync: Option < Arc < RapidGossipSync < Arc < NetworkGraph > > > > = None ;
770
+ let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) , rapid_gossip_sync) ;
729
771
730
772
match bg_processor. stop ( ) {
731
773
Ok ( _) => panic ! ( "Expected error persisting scorer" ) ,
@@ -748,7 +790,8 @@ mod tests {
748
790
let event_handler = move |event : & Event | {
749
791
sender. send ( handle_funding_generation_ready ! ( event, channel_value) ) . unwrap ( ) ;
750
792
} ;
751
- let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) ) ;
793
+ let rapid_gossip_sync: Option < Arc < RapidGossipSync < Arc < NetworkGraph > > > > = None ;
794
+ let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) , rapid_gossip_sync) ;
752
795
753
796
// Open a channel and check that the FundingGenerationReady event was handled.
754
797
begin_open_channel ! ( nodes[ 0 ] , nodes[ 1 ] , channel_value) ;
@@ -773,7 +816,8 @@ mod tests {
773
816
let ( sender, receiver) = std:: sync:: mpsc:: sync_channel ( 1 ) ;
774
817
let event_handler = move |event : & Event | sender. send ( event. clone ( ) ) . unwrap ( ) ;
775
818
let persister = Arc :: new ( Persister :: new ( data_dir) ) ;
776
- let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) ) ;
819
+ let rapid_gossip_sync: Option < Arc < RapidGossipSync < Arc < NetworkGraph > > > > = None ;
820
+ let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) , rapid_gossip_sync) ;
777
821
778
822
// Force close the channel and check that the SpendableOutputs event was handled.
779
823
nodes[ 0 ] . node . force_close_channel ( & nodes[ 0 ] . node . list_channels ( ) [ 0 ] . channel_id , & nodes[ 1 ] . node . get_our_node_id ( ) ) . unwrap ( ) ;
@@ -791,6 +835,69 @@ mod tests {
791
835
assert ! ( bg_processor. stop( ) . is_ok( ) ) ;
792
836
}
793
837
838
+ #[ test]
839
+ fn test_not_pruning_network_graph_until_graph_sync_completion ( ) {
840
+ let nodes = create_nodes ( 2 , "test_not_pruning_network_graph_until_graph_sync_completion" . to_string ( ) ) ;
841
+ let data_dir = nodes[ 0 ] . persister . get_data_dir ( ) ;
842
+ let ( sender, receiver) = std:: sync:: mpsc:: sync_channel ( 1 ) ;
843
+ let persister = Arc :: new ( Persister :: new ( data_dir. clone ( ) ) . with_graph_persistence_notifier ( sender) ) ;
844
+ let network_graph = nodes[ 0 ] . network_graph . clone ( ) ;
845
+ let rapid_sync = Arc :: new ( RapidGossipSync :: new ( network_graph. clone ( ) ) ) ;
846
+ let features = ChannelFeatures :: empty ( ) ;
847
+ network_graph. add_channel_from_partial_announcement ( 42 , 53 , features, nodes[ 0 ] . node . get_our_node_id ( ) , nodes[ 1 ] . node . get_our_node_id ( ) )
848
+ . expect ( "Failed to update channel from partial announcement" ) ;
849
+ let original_graph_description = network_graph. to_string ( ) ;
850
+ assert ! ( original_graph_description. contains( "42: features: 0000, node_one:" ) ) ;
851
+ assert_eq ! ( network_graph. read_only( ) . channels( ) . len( ) , 1 ) ;
852
+
853
+ let event_handler = |_: & _ | { } ;
854
+ let background_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) , Some ( rapid_sync. clone ( ) ) ) ;
855
+
856
+ loop {
857
+ let log_entries = nodes[ 0 ] . logger . lines . lock ( ) . unwrap ( ) ;
858
+ let expected_log_a = "Assessing prunability of network graph" . to_string ( ) ;
859
+ let expected_log_b = "Not pruning network graph due to pending gossip sync" . to_string ( ) ;
860
+ if log_entries. get ( & ( "lightning_background_processor" . to_string ( ) , expected_log_a) ) . is_some ( ) &&
861
+ log_entries. get ( & ( "lightning_background_processor" . to_string ( ) , expected_log_b) ) . is_some ( ) {
862
+ break
863
+ }
864
+ }
865
+
866
+ let initialization_input = vec ! [
867
+ 76 , 68 , 75 , 1 , 111 , 226 , 140 , 10 , 182 , 241 , 179 , 114 , 193 , 166 , 162 , 70 , 174 , 99 , 247 ,
868
+ 79 , 147 , 30 , 131 , 101 , 225 , 90 , 8 , 156 , 104 , 214 , 25 , 0 , 0 , 0 , 0 , 0 , 97 , 227 , 98 , 218 ,
869
+ 0 , 0 , 0 , 4 , 2 , 22 , 7 , 207 , 206 , 25 , 164 , 197 , 231 , 230 , 231 , 56 , 102 , 61 , 250 , 251 ,
870
+ 187 , 172 , 38 , 46 , 79 , 247 , 108 , 44 , 155 , 48 , 219 , 238 , 252 , 53 , 192 , 6 , 67 , 2 , 36 , 125 ,
871
+ 157 , 176 , 223 , 175 , 234 , 116 , 94 , 248 , 201 , 225 , 97 , 235 , 50 , 47 , 115 , 172 , 63 , 136 ,
872
+ 88 , 216 , 115 , 11 , 111 , 217 , 114 , 84 , 116 , 124 , 231 , 107 , 2 , 158 , 1 , 242 , 121 , 152 , 106 ,
873
+ 204 , 131 , 186 , 35 , 93 , 70 , 216 , 10 , 237 , 224 , 183 , 89 , 95 , 65 , 3 , 83 , 185 , 58 , 138 ,
874
+ 181 , 64 , 187 , 103 , 127 , 68 , 50 , 2 , 201 , 19 , 17 , 138 , 136 , 149 , 185 , 226 , 156 , 137 , 175 ,
875
+ 110 , 32 , 237 , 0 , 217 , 90 , 31 , 100 , 228 , 149 , 46 , 219 , 175 , 168 , 77 , 4 , 143 , 38 , 128 ,
876
+ 76 , 97 , 0 , 0 , 0 , 2 , 0 , 0 , 255 , 8 , 153 , 192 , 0 , 2 , 27 , 0 , 0 , 0 , 1 , 0 , 0 , 255 , 2 , 68 ,
877
+ 226 , 0 , 6 , 11 , 0 , 1 , 2 , 3 , 0 , 0 , 0 , 2 , 0 , 40 , 0 , 0 , 0 , 0 , 0 , 0 , 3 , 232 , 0 , 0 , 3 , 232 ,
878
+ 0 , 0 , 0 , 1 , 0 , 0 , 0 , 0 , 58 , 85 , 116 , 216 , 255 , 8 , 153 , 192 , 0 , 2 , 27 , 0 , 0 , 25 , 0 , 0 ,
879
+ 0 , 1 , 0 , 0 , 0 , 125 , 255 , 2 , 68 , 226 , 0 , 6 , 11 , 0 , 1 , 5 , 0 , 0 , 0 , 0 , 29 , 129 , 25 , 192 ,
880
+ ] ;
881
+ rapid_sync. update_network_graph ( & initialization_input[ ..] ) . unwrap ( ) ;
882
+
883
+ // this should have added two channels
884
+ assert_eq ! ( network_graph. read_only( ) . channels( ) . len( ) , 3 ) ;
885
+
886
+ let _ = receiver
887
+ . recv_timeout ( Duration :: from_secs ( super :: FIRST_NETWORK_PRUNE_TIMER * 2 ) )
888
+ . expect ( "Network graph not pruned within deadline" ) ;
889
+ let current_graph_description = network_graph. to_string ( ) ;
890
+
891
+ background_processor. stop ( ) . unwrap ( ) ;
892
+
893
+ assert_ne ! ( current_graph_description, original_graph_description) ;
894
+ assert ! ( !current_graph_description. contains( "node_one:" ) ) ;
895
+ assert ! ( !current_graph_description. contains( "node_two:" ) ) ;
896
+
897
+ // all channels should now be pruned
898
+ assert_eq ! ( network_graph. read_only( ) . channels( ) . len( ) , 0 ) ;
899
+ }
900
+
794
901
#[ test]
795
902
fn test_invoice_payer ( ) {
796
903
let keys_manager = test_utils:: TestKeysInterface :: new ( & [ 0u8 ; 32 ] , Network :: Testnet ) ;
@@ -803,7 +910,8 @@ mod tests {
803
910
let router = DefaultRouter :: new ( Arc :: clone ( & nodes[ 0 ] . network_graph ) , Arc :: clone ( & nodes[ 0 ] . logger ) , random_seed_bytes) ;
804
911
let invoice_payer = Arc :: new ( InvoicePayer :: new ( Arc :: clone ( & nodes[ 0 ] . node ) , router, Arc :: clone ( & nodes[ 0 ] . scorer ) , Arc :: clone ( & nodes[ 0 ] . logger ) , |_: & _ | { } , Retry :: Attempts ( 2 ) ) ) ;
805
912
let event_handler = Arc :: clone ( & invoice_payer) ;
806
- let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) ) ;
913
+ let rapid_gossip_sync: Option < Arc < RapidGossipSync < Arc < NetworkGraph > > > > = None ;
914
+ let bg_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . net_graph_msg_handler . clone ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) , rapid_gossip_sync) ;
807
915
assert ! ( bg_processor. stop( ) . is_ok( ) ) ;
808
916
}
809
917
}
0 commit comments