@@ -19,7 +19,6 @@ use opentelemetry::{
19
19
metrics:: { Counter , Histogram , UpDownCounter } ,
20
20
} ;
21
21
use rand:: { Rng , RngCore , distributions:: Uniform } ;
22
- use rand_chacha:: ChaChaRng ;
23
22
use serde:: de:: DeserializeOwned ;
24
23
use sqlx:: {
25
24
Acquire , Either ,
@@ -195,8 +194,6 @@ struct ScheduleDefinition {
195
194
}
196
195
197
196
pub struct QueueWorker {
198
- rng : ChaChaRng ,
199
- clock : Box < dyn Clock + Send > ,
200
197
listener : PgListener ,
201
198
registration : Worker ,
202
199
am_i_leader : bool ,
@@ -278,8 +275,6 @@ impl QueueWorker {
278
275
let cancellation_guard = cancellation_token. clone ( ) . drop_guard ( ) ;
279
276
280
277
Ok ( Self {
281
- rng,
282
- clock,
283
278
listener,
284
279
registration,
285
280
am_i_leader : false ,
@@ -397,6 +392,9 @@ impl QueueWorker {
397
392
async fn shutdown ( & mut self ) -> Result < ( ) , QueueRunnerError > {
398
393
tracing:: info!( "Shutting down worker" ) ;
399
394
395
+ let clock = self . state . clock ( ) ;
396
+ let mut rng = self . state . rng ( ) ;
397
+
400
398
// Start a transaction on the existing PgListener connection
401
399
let txn = self
402
400
. listener
@@ -421,13 +419,13 @@ impl QueueWorker {
421
419
422
420
// Wait for all the jobs to finish
423
421
self . tracker
424
- . process_jobs ( & mut self . rng , & self . clock , & mut repo, true )
422
+ . process_jobs ( & mut rng, clock, & mut repo, true )
425
423
. await ?;
426
424
427
425
// Tell the other workers we're shutting down
428
426
// This also releases the leader election lease
429
427
repo. queue_worker ( )
430
- . shutdown ( & self . clock , & self . registration )
428
+ . shutdown ( clock, & self . registration )
431
429
. await ?;
432
430
433
431
repo. into_inner ( )
@@ -440,12 +438,12 @@ impl QueueWorker {
440
438
441
439
#[ tracing:: instrument( name = "worker.wait_until_wakeup" , skip_all) ]
442
440
async fn wait_until_wakeup ( & mut self ) -> Result < ( ) , QueueRunnerError > {
441
+ let mut rng = self . state . rng ( ) ;
442
+
443
443
// This is to make sure we wake up every second to do the maintenance tasks
444
444
// We add a little bit of random jitter to the duration, so that we don't get
445
445
// fully synced workers waking up at the same time after each notification
446
- let sleep_duration = self
447
- . rng
448
- . sample ( Uniform :: new ( MIN_SLEEP_DURATION , MAX_SLEEP_DURATION ) ) ;
446
+ let sleep_duration = rng. sample ( Uniform :: new ( MIN_SLEEP_DURATION , MAX_SLEEP_DURATION ) ) ;
449
447
let wakeup_sleep = tokio:: time:: sleep ( sleep_duration) ;
450
448
451
449
tokio:: select! {
@@ -490,7 +488,9 @@ impl QueueWorker {
490
488
) ]
491
489
async fn tick ( & mut self ) -> Result < ( ) , QueueRunnerError > {
492
490
tracing:: debug!( "Tick" ) ;
493
- let now = self . clock . now ( ) ;
491
+ let clock = self . state . clock ( ) ;
492
+ let mut rng = self . state . rng ( ) ;
493
+ let now = clock. now ( ) ;
494
494
495
495
// Start a transaction on the existing PgListener connection
496
496
let txn = self
@@ -505,25 +505,25 @@ impl QueueWorker {
505
505
if now - self . last_heartbeat >= chrono:: Duration :: minutes ( 1 ) {
506
506
tracing:: info!( "Sending heartbeat" ) ;
507
507
repo. queue_worker ( )
508
- . heartbeat ( & self . clock , & self . registration )
508
+ . heartbeat ( clock, & self . registration )
509
509
. await ?;
510
510
self . last_heartbeat = now;
511
511
}
512
512
513
513
// Remove any dead worker leader leases
514
514
repo. queue_worker ( )
515
- . remove_leader_lease_if_expired ( & self . clock )
515
+ . remove_leader_lease_if_expired ( clock)
516
516
. await ?;
517
517
518
518
// Try to become (or stay) the leader
519
519
let leader = repo
520
520
. queue_worker ( )
521
- . try_get_leader_lease ( & self . clock , & self . registration )
521
+ . try_get_leader_lease ( clock, & self . registration )
522
522
. await ?;
523
523
524
524
// Process any job task which finished
525
525
self . tracker
526
- . process_jobs ( & mut self . rng , & self . clock , & mut repo, false )
526
+ . process_jobs ( & mut rng, clock, & mut repo, false )
527
527
. await ?;
528
528
529
529
// Compute how many jobs we should fetch at most
@@ -538,7 +538,7 @@ impl QueueWorker {
538
538
let queues = self . tracker . queues ( ) ;
539
539
let jobs = repo
540
540
. queue_job ( )
541
- . reserve ( & self . clock , & self . registration , & queues, max_jobs_to_fetch)
541
+ . reserve ( clock, & self . registration , & queues, max_jobs_to_fetch)
542
542
. await ?;
543
543
544
544
for Job {
@@ -592,6 +592,9 @@ impl QueueWorker {
592
592
return Err ( QueueRunnerError :: NotLeader ) ;
593
593
}
594
594
595
+ let clock = self . state . clock ( ) ;
596
+ let mut rng = self . state . rng ( ) ;
597
+
595
598
// Start a transaction on the existing PgListener connection
596
599
let txn = self
597
600
. listener
@@ -633,7 +636,7 @@ impl QueueWorker {
633
636
// Look at the state of schedules in the database
634
637
let schedules_status = repo. queue_schedule ( ) . list ( ) . await ?;
635
638
636
- let now = self . clock . now ( ) ;
639
+ let now = clock. now ( ) ;
637
640
for schedule in & self . schedules {
638
641
// Find the schedule status from the database
639
642
let Some ( schedule_status) = schedules_status
@@ -670,8 +673,8 @@ impl QueueWorker {
670
673
671
674
repo. queue_job ( )
672
675
. schedule_later (
673
- & mut self . rng ,
674
- & self . clock ,
676
+ & mut rng,
677
+ clock,
675
678
schedule. queue_name ,
676
679
schedule. payload . clone ( ) ,
677
680
serde_json:: json!( { } ) ,
@@ -684,16 +687,13 @@ impl QueueWorker {
684
687
// We also check if the worker is dead, and if so, we shutdown all the dead
685
688
// workers that haven't checked in the last two minutes
686
689
repo. queue_worker ( )
687
- . shutdown_dead_workers ( & self . clock , Duration :: minutes ( 2 ) )
690
+ . shutdown_dead_workers ( clock, Duration :: minutes ( 2 ) )
688
691
. await ?;
689
692
690
693
// TODO: mark tasks those workers had as lost
691
694
692
695
// Mark all the scheduled jobs as available
693
- let scheduled = repo
694
- . queue_job ( )
695
- . schedule_available_jobs ( & self . clock )
696
- . await ?;
696
+ let scheduled = repo. queue_job ( ) . schedule_available_jobs ( clock) . await ?;
697
697
match scheduled {
698
698
0 => { }
699
699
1 => tracing:: info!( "One scheduled job marked as available" ) ,
0 commit comments