@@ -20,6 +20,7 @@ import (
20
20
"context"
21
21
"errors"
22
22
"fmt"
23
+ "math"
23
24
"sort"
24
25
"sync"
25
26
"time"
@@ -460,6 +461,15 @@ func (m *Manager) ManageVolume(volumeID string) error {
460
461
stopCh := make (chan struct {})
461
462
m .managedVolumes [volumeID ] = stopCh
462
463
464
+ // Create a context that will be cancelled when the stopCh is closed
465
+ ctx , cancel := context .WithCancel (context .Background ())
466
+ go func () {
467
+ select {
468
+ case <- stopCh :
469
+ cancel ()
470
+ }
471
+ }()
472
+
463
473
go func () {
464
474
// check every volume once per second
465
475
// TODO: optimise this to not check so often
@@ -477,13 +487,24 @@ func (m *Manager) ManageVolume(volumeID string) error {
477
487
}
478
488
479
489
if meta .NextIssuanceTime == nil || m .clock .Now ().After (* meta .NextIssuanceTime ) {
480
- log .Info ("Triggering new issuance" )
481
- if err := m .issue (volumeID ); err != nil {
482
- log .Error (err , "Failed to issue certificate" )
483
- // retry the request in 1 second time
484
- // TODO: exponentially back-off
485
- continue
486
- }
490
+ wait .ExponentialBackoffWithContext (ctx , wait.Backoff {
491
+ Duration : time .Second * 2 ,
492
+ Factor : 2.0 ,
493
+ Jitter : 0.5 ,
494
+ // Set this to the maximum int value to avoid resetting the exponential backoff
495
+ // timer early.
496
+ // This will mean that once the back-off hits 1 minute, we will constantly retry once
497
+ // per minute rather than resetting back to `Duration` (2s).
498
+ Steps : math .MaxInt32 ,
499
+ Cap : time .Minute ,
500
+ }, func () (bool , error ) {
501
+ log .Info ("Triggering new issuance" )
502
+ if err := m .issue (volumeID ); err != nil {
503
+ log .Error (err , "Failed to issue certificate, retrying after applying exponential backoff" )
504
+ return false , nil
505
+ }
506
+ return true , nil
507
+ })
487
508
}
488
509
}
489
510
}
0 commit comments