Skip to content

Commit d509e52

Browse files
committed
Indicate ongoing rapid sync to background processor.
Create a wrapper struct for rapid gossip sync that can be passed to BackgroundProcessor's start method, allowing it to only start pruning the network graph upon rapid gossip sync's completion.
1 parent 0b77008 commit d509e52

File tree

6 files changed

+410
-248
lines changed

6 files changed

+410
-248
lines changed

fuzz/src/process_network_graph.rs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,13 @@
1-
// Import that needs to be added manually
1+
// Imports that need to be added manually
2+
use lightning_rapid_gossip_sync::RapidGossipSync;
23
use utils::test_logger;
34

45
/// Actual fuzz test, method signature and name are fixed
56
fn do_test(data: &[u8]) {
67
let block_hash = bitcoin::BlockHash::default();
78
let network_graph = lightning::routing::network_graph::NetworkGraph::new(block_hash);
8-
lightning_rapid_gossip_sync::processing::update_network_graph(&network_graph, data);
9+
let rapid_sync = RapidGossipSync::new(&network_graph);
10+
let _ = rapid_sync.update_network_graph(data);
911
}
1012

1113
/// Method that needs to be added manually, {name}_test

lightning-background-processor/Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ rustdoc-args = ["--cfg", "docsrs"]
1616
[dependencies]
1717
bitcoin = "0.28.1"
1818
lightning = { version = "0.0.106", path = "../lightning", features = ["std"] }
19+
lightning-rapid-gossip-sync = { version = "0.0.106", path = "../lightning-rapid-gossip-sync" }
1920

2021
[dev-dependencies]
2122
lightning = { version = "0.0.106", path = "../lightning", features = ["_test_utils"] }

lightning-background-processor/src/lib.rs

Lines changed: 132 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
1010

1111
#[macro_use] extern crate lightning;
12+
extern crate lightning_rapid_gossip_sync;
1213

1314
use lightning::chain;
1415
use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
@@ -22,6 +23,7 @@ use lightning::routing::scoring::WriteableScore;
2223
use lightning::util::events::{Event, EventHandler, EventsProvider};
2324
use lightning::util::logger::Logger;
2425
use lightning::util::persist::Persister;
26+
use lightning_rapid_gossip_sync::RapidGossipSync;
2527
use std::sync::Arc;
2628
use std::sync::atomic::{AtomicBool, Ordering};
2729
use std::thread;
@@ -142,6 +144,12 @@ impl BackgroundProcessor {
142144
/// functionality implemented by other handlers.
143145
/// * [`NetGraphMsgHandler`] if given will update the [`NetworkGraph`] based on payment failures.
144146
///
147+
/// # Rapid Gossip Sync
148+
///
149+
/// If rapid gossip sync is meant to run at startup, pass an optional [`RapidGossipSync`]
150+
/// to `rapid_gossip_sync` to indicate to [`BackgroundProcessor`] not to prune the
151+
/// [`NetworkGraph`] instance until the [`RapidGossipSync`] instance completes its first sync.
152+
///
145153
/// [top-level documentation]: BackgroundProcessor
146154
/// [`join`]: Self::join
147155
/// [`stop`]: Self::stop
@@ -175,9 +183,11 @@ impl BackgroundProcessor {
175183
PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, L, UMH>> + Send + Sync,
176184
S: 'static + Deref<Target = SC> + Send + Sync,
177185
SC: WriteableScore<'a>,
186+
RGS: 'static + Deref<Target = RapidGossipSync<G>> + Send
178187
>(
179188
persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
180-
net_graph_msg_handler: Option<NG>, peer_manager: PM, logger: L, scorer: Option<S>
189+
net_graph_msg_handler: Option<NG>, peer_manager: PM, logger: L, scorer: Option<S>,
190+
rapid_gossip_sync: Option<RGS>
181191
) -> Self
182192
where
183193
CA::Target: 'static + chain::Access,
@@ -273,10 +283,26 @@ impl BackgroundProcessor {
273283
// continuing our normal cadence.
274284
if last_prune_call.elapsed().as_secs() > if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER } {
275285
if let Some(ref handler) = net_graph_msg_handler {
276-
log_trace!(logger, "Pruning network graph of stale entries");
277-
handler.network_graph().remove_stale_channels();
278-
if let Err(e) = persister.persist_graph(handler.network_graph()) {
279-
log_error!(logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
286+
log_trace!(logger, "Assessing prunability of network graph");
287+
288+
// The network graph must not be pruned while rapid sync completion is pending
289+
let should_prune = match rapid_gossip_sync.as_ref(){
290+
Some(rapid_sync) => rapid_sync.is_initial_sync_complete(),
291+
None => true
292+
};
293+
294+
if should_prune {
295+
log_trace!(logger, "Pruning network graph of stale entries");
296+
handler.network_graph().remove_stale_channels();
297+
298+
if let Err(e) = persister.persist_graph(handler.network_graph()) {
299+
log_error!(logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
300+
}
301+
302+
last_prune_call = Instant::now();
303+
have_pruned = true;
304+
} else {
305+
log_trace!(logger, "Not pruning network graph due to pending gossip sync");
280306
}
281307
}
282308
if let Some(ref scorer) = scorer {
@@ -285,9 +311,6 @@ impl BackgroundProcessor {
285311
log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
286312
}
287313
}
288-
289-
last_prune_call = Instant::now();
290-
have_pruned = true;
291314
}
292315
}
293316

@@ -370,7 +393,7 @@ mod tests {
370393
use lightning::chain::transaction::OutPoint;
371394
use lightning::get_event_msg;
372395
use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, ChannelManager, SimpleArcChannelManager};
373-
use lightning::ln::features::InitFeatures;
396+
use lightning::ln::features::{ChannelFeatures, InitFeatures};
374397
use lightning::ln::msgs::{ChannelMessageHandler, Init};
375398
use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
376399
use lightning::routing::network_graph::{NetworkGraph, NetGraphMsgHandler};
@@ -385,8 +408,10 @@ mod tests {
385408
use std::fs;
386409
use std::path::PathBuf;
387410
use std::sync::{Arc, Mutex};
411+
use std::sync::mpsc::SyncSender;
388412
use std::time::Duration;
389413
use lightning::routing::scoring::{FixedPenaltyScorer};
414+
use lightning_rapid_gossip_sync::RapidGossipSync;
390415
use super::{BackgroundProcessor, FRESHNESS_TIMER};
391416

392417
const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
@@ -414,6 +439,7 @@ mod tests {
414439
logger: Arc<test_utils::TestLogger>,
415440
best_block: BestBlock,
416441
scorer: Arc<Mutex<FixedPenaltyScorer>>,
442+
rapid_gossip_sync: Option<Arc<RapidGossipSync<Arc<NetworkGraph>>>>
417443
}
418444

419445
impl Drop for Node {
@@ -428,6 +454,7 @@ mod tests {
428454

429455
struct Persister {
430456
graph_error: Option<(std::io::ErrorKind, &'static str)>,
457+
graph_persistence_notifier: Option<SyncSender<()>>,
431458
manager_error: Option<(std::io::ErrorKind, &'static str)>,
432459
scorer_error: Option<(std::io::ErrorKind, &'static str)>,
433460
filesystem_persister: FilesystemPersister,
@@ -436,13 +463,17 @@ mod tests {
436463
impl Persister {
437464
fn new(data_dir: String) -> Self {
438465
let filesystem_persister = FilesystemPersister::new(data_dir.clone());
439-
Self { graph_error: None, manager_error: None, scorer_error: None, filesystem_persister }
466+
Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
440467
}
441468

442469
fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
443470
Self { graph_error: Some((error, message)), ..self }
444471
}
445472

473+
fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
474+
Self { graph_persistence_notifier: Some(sender), ..self }
475+
}
476+
446477
fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
447478
Self { manager_error: Some((error, message)), ..self }
448479
}
@@ -461,6 +492,10 @@ mod tests {
461492
}
462493

463494
if key == "network_graph" {
495+
if let Some(sender) = &self.graph_persistence_notifier {
496+
sender.send(()).unwrap();
497+
};
498+
464499
if let Some((error, message)) = self.graph_error {
465500
return Err(std::io::Error::new(error, message))
466501
}
@@ -504,7 +539,8 @@ mod tests {
504539
let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )};
505540
let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(Recipient::Node).unwrap(), &seed, logger.clone(), IgnoringMessageHandler{}));
506541
let scorer = Arc::new(Mutex::new(test_utils::TestScorer::with_penalty(0)));
507-
let node = Node { node: manager, net_graph_msg_handler, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
542+
let rapid_gossip_sync = None;
543+
let node = Node { node: manager, net_graph_msg_handler, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer, rapid_gossip_sync };
508544
nodes.push(node);
509545
}
510546

@@ -602,7 +638,7 @@ mod tests {
602638
let data_dir = nodes[0].persister.get_data_dir();
603639
let persister = Arc::new(Persister::new(data_dir));
604640
let event_handler = |_: &_| {};
605-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
641+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
606642

607643
macro_rules! check_persisted_data {
608644
($node: expr, $filepath: expr) => {
@@ -667,7 +703,7 @@ mod tests {
667703
let data_dir = nodes[0].persister.get_data_dir();
668704
let persister = Arc::new(Persister::new(data_dir));
669705
let event_handler = |_: &_| {};
670-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
706+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
671707
loop {
672708
let log_entries = nodes[0].logger.lines.lock().unwrap();
673709
let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string();
@@ -690,7 +726,7 @@ mod tests {
690726
let data_dir = nodes[0].persister.get_data_dir();
691727
let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
692728
let event_handler = |_: &_| {};
693-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
729+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
694730
match bg_processor.join() {
695731
Ok(_) => panic!("Expected error persisting manager"),
696732
Err(e) => {
@@ -707,7 +743,7 @@ mod tests {
707743
let data_dir = nodes[0].persister.get_data_dir();
708744
let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
709745
let event_handler = |_: &_| {};
710-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
746+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
711747

712748
match bg_processor.stop() {
713749
Ok(_) => panic!("Expected error persisting network graph"),
@@ -725,7 +761,7 @@ mod tests {
725761
let data_dir = nodes[0].persister.get_data_dir();
726762
let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
727763
let event_handler = |_: &_| {};
728-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
764+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
729765

730766
match bg_processor.stop() {
731767
Ok(_) => panic!("Expected error persisting scorer"),
@@ -748,7 +784,7 @@ mod tests {
748784
let event_handler = move |event: &Event| {
749785
sender.send(handle_funding_generation_ready!(event, channel_value)).unwrap();
750786
};
751-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
787+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
752788

753789
// Open a channel and check that the FundingGenerationReady event was handled.
754790
begin_open_channel!(nodes[0], nodes[1], channel_value);
@@ -773,7 +809,7 @@ mod tests {
773809
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
774810
let event_handler = move |event: &Event| sender.send(event.clone()).unwrap();
775811
let persister = Arc::new(Persister::new(data_dir));
776-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
812+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
777813

778814
// Force close the channel and check that the SpendableOutputs event was handled.
779815
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
@@ -791,6 +827,83 @@ mod tests {
791827
assert!(bg_processor.stop().is_ok());
792828
}
793829

830+
#[test]
831+
fn test_scorer_persistence() {
832+
let nodes = create_nodes(2, "test_scorer_persistence".to_string());
833+
let data_dir = nodes[0].persister.get_data_dir();
834+
let persister = Arc::new(Persister::new(data_dir));
835+
let event_handler = |_: &_| {};
836+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
837+
838+
loop {
839+
let log_entries = nodes[0].logger.lines.lock().unwrap();
840+
let expected_log = "Persisting scorer".to_string();
841+
if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
842+
break
843+
}
844+
}
845+
846+
assert!(bg_processor.stop().is_ok());
847+
}
848+
849+
#[test]
850+
fn test_not_pruning_network_graph_until_graph_sync_completion() {
851+
let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion".to_string());
852+
let data_dir = nodes[0].persister.get_data_dir();
853+
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
854+
let persister = Arc::new(Persister::new(data_dir.clone()).with_graph_persistence_notifier(sender));
855+
let network_graph = nodes[0].network_graph.clone();
856+
let rapid_sync = Arc::new(RapidGossipSync::new(network_graph.clone()));
857+
let features = ChannelFeatures::empty();
858+
network_graph.add_channel_from_partial_announcement(42, 53, features, nodes[0].node.get_our_node_id(), nodes[1].node.get_our_node_id())
859+
.expect("Failed to update channel from partial announcement");
860+
let original_graph_description = network_graph.to_string();
861+
assert!(original_graph_description.contains("42: features: 0000, node_one:"));
862+
assert_eq!(network_graph.read_only().channels().len(), 1);
863+
864+
let event_handler = |_: &_| {};
865+
let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), Some(rapid_sync.clone()));
866+
867+
loop {
868+
let log_entries = nodes[0].logger.lines.lock().unwrap();
869+
let expected_log_a = "Assessing prunability of network graph".to_string();
870+
let expected_log_b = "Not pruning network graph due to pending gossip sync".to_string();
871+
if log_entries.get(&("lightning_background_processor".to_string(), expected_log_a)).is_some() &&
872+
log_entries.get(&("lightning_background_processor".to_string(), expected_log_b)).is_some() {
873+
break
874+
}
875+
}
876+
877+
let initialization_input = vec![
878+
76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
879+
79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
880+
0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
881+
187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
882+
157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
883+
88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
884+
204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
885+
181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
886+
110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
887+
76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
888+
226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
889+
0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
890+
0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
891+
];
892+
rapid_sync.update_network_graph(&initialization_input[..]).unwrap();
893+
894+
// this should have added two channels
895+
assert_eq!(network_graph.read_only().channels().len(), 3);
896+
897+
let _ = receiver
898+
.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5))
899+
.expect("Network graph not pruned within deadline");
900+
901+
background_processor.stop().unwrap();
902+
903+
// all channels should now be pruned
904+
assert_eq!(network_graph.read_only().channels().len(), 0);
905+
}
906+
794907
#[test]
795908
fn test_invoice_payer() {
796909
let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
@@ -803,7 +916,7 @@ mod tests {
803916
let router = DefaultRouter::new(Arc::clone(&nodes[0].network_graph), Arc::clone(&nodes[0].logger), random_seed_bytes);
804917
let invoice_payer = Arc::new(InvoicePayer::new(Arc::clone(&nodes[0].node), router, Arc::clone(&nodes[0].scorer), Arc::clone(&nodes[0].logger), |_: &_| {}, Retry::Attempts(2)));
805918
let event_handler = Arc::clone(&invoice_payer);
806-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
919+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
807920
assert!(bg_processor.stop().is_ok());
808921
}
809922
}

0 commit comments

Comments
 (0)