Skip to content

Commit 312f765

Browse files
committed
Indicate ongoing rapid sync to background processor.
Create a wrapper struct for rapid gossip sync that can be passed to BackgroundProcessor's start method, allowing it to only start pruning the network graph upon rapid gossip sync's completion.
1 parent 0b77008 commit 312f765

File tree

6 files changed

+420
-248
lines changed

6 files changed

+420
-248
lines changed

fuzz/src/process_network_graph.rs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,13 @@
1-
// Import that needs to be added manually
1+
// Imports that need to be added manually
2+
use lightning_rapid_gossip_sync::RapidGossipSync;
23
use utils::test_logger;
34

45
/// Actual fuzz test, method signature and name are fixed
56
fn do_test(data: &[u8]) {
67
let block_hash = bitcoin::BlockHash::default();
78
let network_graph = lightning::routing::network_graph::NetworkGraph::new(block_hash);
8-
lightning_rapid_gossip_sync::processing::update_network_graph(&network_graph, data);
9+
let rapid_sync = RapidGossipSync::new(&network_graph);
10+
let _ = rapid_sync.update_network_graph(data);
911
}
1012

1113
/// Method that needs to be added manually, {name}_test

lightning-background-processor/Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ rustdoc-args = ["--cfg", "docsrs"]
1616
[dependencies]
1717
bitcoin = "0.28.1"
1818
lightning = { version = "0.0.106", path = "../lightning", features = ["std"] }
19+
lightning-rapid-gossip-sync = { version = "0.0.106", path = "../lightning-rapid-gossip-sync" }
1920

2021
[dev-dependencies]
2122
lightning = { version = "0.0.106", path = "../lightning", features = ["_test_utils"] }

lightning-background-processor/src/lib.rs

Lines changed: 134 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
1010

1111
#[macro_use] extern crate lightning;
12+
extern crate lightning_rapid_gossip_sync;
1213

1314
use lightning::chain;
1415
use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
@@ -22,6 +23,7 @@ use lightning::routing::scoring::WriteableScore;
2223
use lightning::util::events::{Event, EventHandler, EventsProvider};
2324
use lightning::util::logger::Logger;
2425
use lightning::util::persist::Persister;
26+
use lightning_rapid_gossip_sync::RapidGossipSync;
2527
use std::sync::Arc;
2628
use std::sync::atomic::{AtomicBool, Ordering};
2729
use std::thread;
@@ -142,6 +144,12 @@ impl BackgroundProcessor {
142144
/// functionality implemented by other handlers.
143145
/// * [`NetGraphMsgHandler`] if given will update the [`NetworkGraph`] based on payment failures.
144146
///
147+
/// # Rapid Gossip Sync
148+
///
149+
/// If rapid gossip sync is meant to run at startup, pass an optional [`RapidGossipSync`]
150+
/// to `rapid_gossip_sync` to indicate to [`BackgroundProcessor`] not to prune the
151+
/// [`NetworkGraph`] instance until the [`RapidGossipSync`] instance completes its first sync.
152+
///
145153
/// [top-level documentation]: BackgroundProcessor
146154
/// [`join`]: Self::join
147155
/// [`stop`]: Self::stop
@@ -175,9 +183,11 @@ impl BackgroundProcessor {
175183
PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, L, UMH>> + Send + Sync,
176184
S: 'static + Deref<Target = SC> + Send + Sync,
177185
SC: WriteableScore<'a>,
186+
RGS: 'static + Deref<Target = RapidGossipSync<G>> + Send
178187
>(
179188
persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
180-
net_graph_msg_handler: Option<NG>, peer_manager: PM, logger: L, scorer: Option<S>
189+
net_graph_msg_handler: Option<NG>, peer_manager: PM, logger: L, scorer: Option<S>,
190+
rapid_gossip_sync: Option<RGS>
181191
) -> Self
182192
where
183193
CA::Target: 'static + chain::Access,
@@ -272,22 +282,37 @@ impl BackgroundProcessor {
272282
// pruning their network graph. We run once 60 seconds after startup before
273283
// continuing our normal cadence.
274284
if last_prune_call.elapsed().as_secs() > if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER } {
275-
if let Some(ref handler) = net_graph_msg_handler {
276-
log_trace!(logger, "Pruning network graph of stale entries");
277-
handler.network_graph().remove_stale_channels();
278-
if let Err(e) = persister.persist_graph(handler.network_graph()) {
285+
// The network graph must not be pruned while rapid sync completion is pending
286+
log_trace!(logger, "Assessing prunability of network graph");
287+
let graph_to_prune = match rapid_gossip_sync.as_ref() {
288+
Some(rapid_sync) => {
289+
if rapid_sync.is_initial_sync_complete() {
290+
Some(rapid_sync.network_graph())
291+
} else {
292+
None
293+
}
294+
},
295+
None => net_graph_msg_handler.as_ref().map(|handler| handler.network_graph())
296+
};
297+
298+
if let Some(network_graph_reference) = graph_to_prune {
299+
network_graph_reference.remove_stale_channels();
300+
301+
if let Err(e) = persister.persist_graph(network_graph_reference) {
279302
log_error!(logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
280303
}
304+
305+
last_prune_call = Instant::now();
306+
have_pruned = true;
307+
} else {
308+
log_trace!(logger, "Not pruning network graph, either due to pending rapid gossip sync or absence of a prunable graph.");
281309
}
282310
if let Some(ref scorer) = scorer {
283311
log_trace!(logger, "Persisting scorer");
284312
if let Err(e) = persister.persist_scorer(&scorer) {
285313
log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
286314
}
287315
}
288-
289-
last_prune_call = Instant::now();
290-
have_pruned = true;
291316
}
292317
}
293318

@@ -370,7 +395,7 @@ mod tests {
370395
use lightning::chain::transaction::OutPoint;
371396
use lightning::get_event_msg;
372397
use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, ChannelManager, SimpleArcChannelManager};
373-
use lightning::ln::features::InitFeatures;
398+
use lightning::ln::features::{ChannelFeatures, InitFeatures};
374399
use lightning::ln::msgs::{ChannelMessageHandler, Init};
375400
use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
376401
use lightning::routing::network_graph::{NetworkGraph, NetGraphMsgHandler};
@@ -385,8 +410,10 @@ mod tests {
385410
use std::fs;
386411
use std::path::PathBuf;
387412
use std::sync::{Arc, Mutex};
413+
use std::sync::mpsc::SyncSender;
388414
use std::time::Duration;
389415
use lightning::routing::scoring::{FixedPenaltyScorer};
416+
use lightning_rapid_gossip_sync::RapidGossipSync;
390417
use super::{BackgroundProcessor, FRESHNESS_TIMER};
391418

392419
const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
@@ -414,6 +441,7 @@ mod tests {
414441
logger: Arc<test_utils::TestLogger>,
415442
best_block: BestBlock,
416443
scorer: Arc<Mutex<FixedPenaltyScorer>>,
444+
rapid_gossip_sync: Option<Arc<RapidGossipSync<Arc<NetworkGraph>>>>
417445
}
418446

419447
impl Drop for Node {
@@ -428,6 +456,7 @@ mod tests {
428456

429457
struct Persister {
430458
graph_error: Option<(std::io::ErrorKind, &'static str)>,
459+
graph_persistence_notifier: Option<SyncSender<()>>,
431460
manager_error: Option<(std::io::ErrorKind, &'static str)>,
432461
scorer_error: Option<(std::io::ErrorKind, &'static str)>,
433462
filesystem_persister: FilesystemPersister,
@@ -436,13 +465,17 @@ mod tests {
436465
impl Persister {
437466
fn new(data_dir: String) -> Self {
438467
let filesystem_persister = FilesystemPersister::new(data_dir.clone());
439-
Self { graph_error: None, manager_error: None, scorer_error: None, filesystem_persister }
468+
Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
440469
}
441470

442471
fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
443472
Self { graph_error: Some((error, message)), ..self }
444473
}
445474

475+
fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
476+
Self { graph_persistence_notifier: Some(sender), ..self }
477+
}
478+
446479
fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
447480
Self { manager_error: Some((error, message)), ..self }
448481
}
@@ -461,6 +494,10 @@ mod tests {
461494
}
462495

463496
if key == "network_graph" {
497+
if let Some(sender) = &self.graph_persistence_notifier {
498+
sender.send(()).unwrap();
499+
};
500+
464501
if let Some((error, message)) = self.graph_error {
465502
return Err(std::io::Error::new(error, message))
466503
}
@@ -504,7 +541,8 @@ mod tests {
504541
let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )};
505542
let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(Recipient::Node).unwrap(), &seed, logger.clone(), IgnoringMessageHandler{}));
506543
let scorer = Arc::new(Mutex::new(test_utils::TestScorer::with_penalty(0)));
507-
let node = Node { node: manager, net_graph_msg_handler, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
544+
let rapid_gossip_sync = None;
545+
let node = Node { node: manager, net_graph_msg_handler, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer, rapid_gossip_sync };
508546
nodes.push(node);
509547
}
510548

@@ -602,7 +640,7 @@ mod tests {
602640
let data_dir = nodes[0].persister.get_data_dir();
603641
let persister = Arc::new(Persister::new(data_dir));
604642
let event_handler = |_: &_| {};
605-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
643+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
606644

607645
macro_rules! check_persisted_data {
608646
($node: expr, $filepath: expr) => {
@@ -667,7 +705,7 @@ mod tests {
667705
let data_dir = nodes[0].persister.get_data_dir();
668706
let persister = Arc::new(Persister::new(data_dir));
669707
let event_handler = |_: &_| {};
670-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
708+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
671709
loop {
672710
let log_entries = nodes[0].logger.lines.lock().unwrap();
673711
let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string();
@@ -690,7 +728,7 @@ mod tests {
690728
let data_dir = nodes[0].persister.get_data_dir();
691729
let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
692730
let event_handler = |_: &_| {};
693-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
731+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
694732
match bg_processor.join() {
695733
Ok(_) => panic!("Expected error persisting manager"),
696734
Err(e) => {
@@ -707,7 +745,7 @@ mod tests {
707745
let data_dir = nodes[0].persister.get_data_dir();
708746
let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
709747
let event_handler = |_: &_| {};
710-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
748+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
711749

712750
match bg_processor.stop() {
713751
Ok(_) => panic!("Expected error persisting network graph"),
@@ -725,7 +763,7 @@ mod tests {
725763
let data_dir = nodes[0].persister.get_data_dir();
726764
let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
727765
let event_handler = |_: &_| {};
728-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
766+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
729767

730768
match bg_processor.stop() {
731769
Ok(_) => panic!("Expected error persisting scorer"),
@@ -748,7 +786,7 @@ mod tests {
748786
let event_handler = move |event: &Event| {
749787
sender.send(handle_funding_generation_ready!(event, channel_value)).unwrap();
750788
};
751-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
789+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
752790

753791
// Open a channel and check that the FundingGenerationReady event was handled.
754792
begin_open_channel!(nodes[0], nodes[1], channel_value);
@@ -773,7 +811,7 @@ mod tests {
773811
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
774812
let event_handler = move |event: &Event| sender.send(event.clone()).unwrap();
775813
let persister = Arc::new(Persister::new(data_dir));
776-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
814+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
777815

778816
// Force close the channel and check that the SpendableOutputs event was handled.
779817
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
@@ -791,6 +829,83 @@ mod tests {
791829
assert!(bg_processor.stop().is_ok());
792830
}
793831

832+
#[test]
833+
fn test_scorer_persistence() {
834+
let nodes = create_nodes(2, "test_scorer_persistence".to_string());
835+
let data_dir = nodes[0].persister.get_data_dir();
836+
let persister = Arc::new(Persister::new(data_dir));
837+
let event_handler = |_: &_| {};
838+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
839+
840+
loop {
841+
let log_entries = nodes[0].logger.lines.lock().unwrap();
842+
let expected_log = "Persisting scorer".to_string();
843+
if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
844+
break
845+
}
846+
}
847+
848+
assert!(bg_processor.stop().is_ok());
849+
}
850+
851+
#[test]
852+
fn test_not_pruning_network_graph_until_graph_sync_completion() {
853+
let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion".to_string());
854+
let data_dir = nodes[0].persister.get_data_dir();
855+
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
856+
let persister = Arc::new(Persister::new(data_dir.clone()).with_graph_persistence_notifier(sender));
857+
let network_graph = nodes[0].network_graph.clone();
858+
let rapid_sync = Arc::new(RapidGossipSync::new(network_graph.clone()));
859+
let features = ChannelFeatures::empty();
860+
network_graph.add_channel_from_partial_announcement(42, 53, features, nodes[0].node.get_our_node_id(), nodes[1].node.get_our_node_id())
861+
.expect("Failed to update channel from partial announcement");
862+
let original_graph_description = network_graph.to_string();
863+
assert!(original_graph_description.contains("42: features: 0000, node_one:"));
864+
assert_eq!(network_graph.read_only().channels().len(), 1);
865+
866+
let event_handler = |_: &_| {};
867+
let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), Some(rapid_sync.clone()));
868+
869+
loop {
870+
let log_entries = nodes[0].logger.lines.lock().unwrap();
871+
let expected_log_a = "Assessing prunability of network graph".to_string();
872+
let expected_log_b = "Not pruning network graph, either due to pending rapid gossip sync or absence of a prunable graph.".to_string();
873+
if log_entries.get(&("lightning_background_processor".to_string(), expected_log_a)).is_some() &&
874+
log_entries.get(&("lightning_background_processor".to_string(), expected_log_b)).is_some() {
875+
break
876+
}
877+
}
878+
879+
let initialization_input = vec![
880+
76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
881+
79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
882+
0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
883+
187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
884+
157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
885+
88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
886+
204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
887+
181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
888+
110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
889+
76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
890+
226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
891+
0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
892+
0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
893+
];
894+
rapid_sync.update_network_graph(&initialization_input[..]).unwrap();
895+
896+
// this should have added two channels
897+
assert_eq!(network_graph.read_only().channels().len(), 3);
898+
899+
let _ = receiver
900+
.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5))
901+
.expect("Network graph not pruned within deadline");
902+
903+
background_processor.stop().unwrap();
904+
905+
// all channels should now be pruned
906+
assert_eq!(network_graph.read_only().channels().len(), 0);
907+
}
908+
794909
#[test]
795910
fn test_invoice_payer() {
796911
let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
@@ -803,7 +918,7 @@ mod tests {
803918
let router = DefaultRouter::new(Arc::clone(&nodes[0].network_graph), Arc::clone(&nodes[0].logger), random_seed_bytes);
804919
let invoice_payer = Arc::new(InvoicePayer::new(Arc::clone(&nodes[0].node), router, Arc::clone(&nodes[0].scorer), Arc::clone(&nodes[0].logger), |_: &_| {}, Retry::Attempts(2)));
805920
let event_handler = Arc::clone(&invoice_payer);
806-
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
921+
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone());
807922
assert!(bg_processor.stop().is_ok());
808923
}
809924
}

0 commit comments

Comments
 (0)