Skip to content

Commit ec8e1ca

Browse files
committed
Re-order async background processor tasks
Prepare for parallelization.
1 parent fa02602 commit ec8e1ca

File tree

1 file changed

+82
-78
lines changed
  • lightning-background-processor/src

1 file changed

+82
-78
lines changed

lightning-background-processor/src/lib.rs

Lines changed: 82 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -803,6 +803,15 @@ where
803803
} else {
804804
false
805805
};
806+
match check_sleeper(&mut last_freshness_call) {
807+
Some(false) => {
808+
log_trace!(logger, "Calling ChannelManager's timer_tick_occurred");
809+
channel_manager.get_cm().timer_tick_occurred();
810+
last_freshness_call = sleeper(FRESHNESS_TIMER);
811+
},
812+
Some(true) => break,
813+
None => {},
814+
}
806815
if channel_manager.get_cm().get_and_clear_needs_persistence() {
807816
log_trace!(logger, "Persisting ChannelManager...");
808817
kv_store
@@ -815,53 +824,6 @@ where
815824
.await?;
816825
log_trace!(logger, "Done persisting ChannelManager.");
817826
}
818-
match check_sleeper(&mut last_freshness_call) {
819-
Some(false) => {
820-
log_trace!(logger, "Calling ChannelManager's timer_tick_occurred");
821-
channel_manager.get_cm().timer_tick_occurred();
822-
last_freshness_call = sleeper(FRESHNESS_TIMER);
823-
},
824-
Some(true) => break,
825-
None => {},
826-
}
827-
match check_sleeper(&mut last_onion_message_handler_call) {
828-
Some(false) => {
829-
if let Some(om) = &onion_messenger {
830-
log_trace!(logger, "Calling OnionMessageHandler's timer_tick_occurred");
831-
om.get_om().timer_tick_occurred();
832-
}
833-
last_onion_message_handler_call = sleeper(ONION_MESSAGE_HANDLER_TIMER);
834-
},
835-
Some(true) => break,
836-
None => {},
837-
}
838-
if await_slow {
839-
// On various platforms, we may be starved of CPU cycles for several reasons.
840-
// E.g. on iOS, if we've been in the background, we will be entirely paused.
841-
// Similarly, if we're on a desktop platform and the device has been asleep, we
842-
// may not get any cycles.
843-
// We detect this by checking if our max-100ms-sleep, above, ran longer than a
844-
// full second, at which point we assume sockets may have been killed (they
845-
// appear to be at least on some platforms, even if it has only been a second).
846-
// Note that we have to take care to not get here just because user event
847-
// processing was slow at the top of the loop. For example, the sample client
848-
// may call Bitcoin Core RPCs during event handling, which very often takes
849-
// more than a handful of seconds to complete, and shouldn't disconnect all our
850-
// peers.
851-
log_trace!(logger, "100ms sleep took more than a second, disconnecting peers.");
852-
peer_manager.as_ref().disconnect_all_peers();
853-
last_ping_call = sleeper(PING_TIMER);
854-
} else {
855-
match check_sleeper(&mut last_ping_call) {
856-
Some(false) => {
857-
log_trace!(logger, "Calling PeerManager's timer_tick_occurred");
858-
peer_manager.as_ref().timer_tick_occurred();
859-
last_ping_call = sleeper(PING_TIMER);
860-
},
861-
Some(true) => break,
862-
_ => {},
863-
}
864-
}
865827

866828
// Note that we want to run a graph prune once not long after startup before
867829
// falling back to our usual hourly prunes. This avoids short-lived clients never
@@ -948,15 +910,6 @@ where
948910
Some(true) => break,
949911
None => {},
950912
}
951-
match check_sleeper(&mut last_rebroadcast_call) {
952-
Some(false) => {
953-
log_trace!(logger, "Rebroadcasting monitor's pending claims");
954-
chain_monitor.rebroadcast_pending_claims();
955-
last_rebroadcast_call = sleeper(REBROADCAST_TIMER);
956-
},
957-
Some(true) => break,
958-
None => {},
959-
}
960913
match check_sleeper(&mut last_sweeper_call) {
961914
Some(false) => {
962915
log_trace!(logger, "Regenerating sweeper spends if necessary");
@@ -968,6 +921,57 @@ where
968921
Some(true) => break,
969922
None => {},
970923
}
924+
match check_sleeper(&mut last_onion_message_handler_call) {
925+
Some(false) => {
926+
if let Some(om) = &onion_messenger {
927+
log_trace!(logger, "Calling OnionMessageHandler's timer_tick_occurred");
928+
om.get_om().timer_tick_occurred();
929+
}
930+
last_onion_message_handler_call = sleeper(ONION_MESSAGE_HANDLER_TIMER);
931+
},
932+
Some(true) => break,
933+
None => {},
934+
}
935+
936+
// Peer manager timer tick. If we were interrupted on a mobile platform, we disconnect all peers.
937+
if await_slow {
938+
// On various platforms, we may be starved of CPU cycles for several reasons.
939+
// E.g. on iOS, if we've been in the background, we will be entirely paused.
940+
// Similarly, if we're on a desktop platform and the device has been asleep, we
941+
// may not get any cycles.
942+
// We detect this by checking if our max-100ms-sleep, above, ran longer than a
943+
// full second, at which point we assume sockets may have been killed (they
944+
// appear to be at least on some platforms, even if it has only been a second).
945+
// Note that we have to take care to not get here just because user event
946+
// processing was slow at the top of the loop. For example, the sample client
947+
// may call Bitcoin Core RPCs during event handling, which very often takes
948+
// more than a handful of seconds to complete, and shouldn't disconnect all our
949+
// peers.
950+
log_trace!(logger, "100ms sleep took more than a second, disconnecting peers.");
951+
peer_manager.as_ref().disconnect_all_peers();
952+
last_ping_call = sleeper(PING_TIMER);
953+
} else {
954+
match check_sleeper(&mut last_ping_call) {
955+
Some(false) => {
956+
log_trace!(logger, "Calling PeerManager's timer_tick_occurred");
957+
peer_manager.as_ref().timer_tick_occurred();
958+
last_ping_call = sleeper(PING_TIMER);
959+
},
960+
Some(true) => break,
961+
_ => {},
962+
}
963+
}
964+
965+
// Rebroadcast pending claims.
966+
match check_sleeper(&mut last_rebroadcast_call) {
967+
Some(false) => {
968+
log_trace!(logger, "Rebroadcasting monitor's pending claims");
969+
chain_monitor.rebroadcast_pending_claims();
970+
last_rebroadcast_call = sleeper(REBROADCAST_TIMER);
971+
},
972+
Some(true) => break,
973+
None => {},
974+
}
971975
}
972976
log_trace!(logger, "Terminating background processor.");
973977

@@ -1292,6 +1296,11 @@ impl BackgroundProcessor {
12921296
log_trace!(logger, "Terminating background processor.");
12931297
break;
12941298
}
1299+
if last_freshness_call.elapsed() > FRESHNESS_TIMER {
1300+
log_trace!(logger, "Calling ChannelManager's timer_tick_occurred");
1301+
channel_manager.get_cm().timer_tick_occurred();
1302+
last_freshness_call = Instant::now();
1303+
}
12951304
if channel_manager.get_cm().get_and_clear_needs_persistence() {
12961305
log_trace!(logger, "Persisting ChannelManager...");
12971306
(kv_store.write(
@@ -1302,23 +1311,6 @@ impl BackgroundProcessor {
13021311
))?;
13031312
log_trace!(logger, "Done persisting ChannelManager.");
13041313
}
1305-
if last_freshness_call.elapsed() > FRESHNESS_TIMER {
1306-
log_trace!(logger, "Calling ChannelManager's timer_tick_occurred");
1307-
channel_manager.get_cm().timer_tick_occurred();
1308-
last_freshness_call = Instant::now();
1309-
}
1310-
if last_onion_message_handler_call.elapsed() > ONION_MESSAGE_HANDLER_TIMER {
1311-
if let Some(om) = &onion_messenger {
1312-
log_trace!(logger, "Calling OnionMessageHandler's timer_tick_occurred");
1313-
om.get_om().timer_tick_occurred();
1314-
}
1315-
last_onion_message_handler_call = Instant::now();
1316-
}
1317-
if last_ping_call.elapsed() > PING_TIMER {
1318-
log_trace!(logger, "Calling PeerManager's timer_tick_occurred");
1319-
peer_manager.as_ref().timer_tick_occurred();
1320-
last_ping_call = Instant::now();
1321-
}
13221314

13231315
// Note that we want to run a graph prune once not long after startup before
13241316
// falling back to our usual hourly prunes. This avoids short-lived clients never
@@ -1386,18 +1378,30 @@ impl BackgroundProcessor {
13861378
}
13871379
last_scorer_persist_call = Instant::now();
13881380
}
1389-
if last_rebroadcast_call.elapsed() > REBROADCAST_TIMER {
1390-
log_trace!(logger, "Rebroadcasting monitor's pending claims");
1391-
chain_monitor.rebroadcast_pending_claims();
1392-
last_rebroadcast_call = Instant::now();
1393-
}
13941381
if last_sweeper_call.elapsed() > SWEEPER_TIMER {
13951382
log_trace!(logger, "Regenerating sweeper spends if necessary");
13961383
if let Some(ref sweeper) = sweeper {
13971384
let _ = sweeper.regenerate_and_broadcast_spend_if_necessary();
13981385
}
13991386
last_sweeper_call = Instant::now();
14001387
}
1388+
if last_onion_message_handler_call.elapsed() > ONION_MESSAGE_HANDLER_TIMER {
1389+
if let Some(om) = &onion_messenger {
1390+
log_trace!(logger, "Calling OnionMessageHandler's timer_tick_occurred");
1391+
om.get_om().timer_tick_occurred();
1392+
}
1393+
last_onion_message_handler_call = Instant::now();
1394+
}
1395+
if last_ping_call.elapsed() > PING_TIMER {
1396+
log_trace!(logger, "Calling PeerManager's timer_tick_occurred");
1397+
peer_manager.as_ref().timer_tick_occurred();
1398+
last_ping_call = Instant::now();
1399+
}
1400+
if last_rebroadcast_call.elapsed() > REBROADCAST_TIMER {
1401+
log_trace!(logger, "Rebroadcasting monitor's pending claims");
1402+
chain_monitor.rebroadcast_pending_claims();
1403+
last_rebroadcast_call = Instant::now();
1404+
}
14011405
}
14021406

14031407
// After we exit, ensure we persist the ChannelManager one final time - this avoids

0 commit comments

Comments
 (0)