@@ -82,6 +82,9 @@ type Options struct {
82
82
SignRequest SignRequestFunc
83
83
WriteKeypair WriteKeypairFunc
84
84
ReadyToRequest ReadyToRequestFunc
85
+
86
+ // BackoffConfig configures the exponential backoff applied to certificate renewal failures.
87
+ BackoffConfig * wait.Backoff
85
88
}
86
89
87
90
// NewManager constructs a new manager used to manage volumes containing
@@ -100,6 +103,22 @@ func NewManager(opts Options) (*Manager, error) {
100
103
if opts .Clock == nil {
101
104
opts .Clock = clock.RealClock {}
102
105
}
106
+ if opts .BackoffConfig == nil {
107
+ opts .BackoffConfig = & wait.Backoff {
108
+ // the 'base' amount of time for the backoff
109
+ Duration : time .Second * 30 ,
110
+ // We multiply the 'duration' by 2.0 if the attempt fails/errors
111
+ Factor : 2.0 ,
112
+ // Add a jitter of +/- 0.5 of the 'duration'
113
+ Jitter : 0.5 ,
114
+ // 'Steps' controls what the maximum number of backoff attempts is before we
115
+ // reset back to the 'base duration'. Set this to the MaxInt32, as we never want to
116
+ // reset this unless we get a successful attempt.
117
+ Steps : math .MaxInt32 ,
118
+ // The maximum time between calls will be 5 minutes
119
+ Cap : time .Minute * 5 ,
120
+ }
121
+ }
103
122
if opts .Log == nil {
104
123
return nil , errors .New ("Log must be set" )
105
124
}
@@ -167,6 +186,7 @@ func NewManager(opts Options) (*Manager, error) {
167
186
168
187
maxRequestsPerVolume : opts .MaxRequestsPerVolume ,
169
188
nodeNameHash : nodeNameHash ,
189
+ backoffConfig : * opts .BackoffConfig ,
170
190
}
171
191
172
192
vols , err := opts .MetadataReader .ListVolumes ()
@@ -263,6 +283,9 @@ type Manager struct {
263
283
264
284
// maximum number of CertificateRequests that should exist at any time for each volume
265
285
maxRequestsPerVolume int
286
+
287
+ // backoffConfig configures the exponential backoff applied to certificate renewal failures.
288
+ backoffConfig wait.Backoff
266
289
}
267
290
268
291
// issue will step through the entire issuance flow for a volume.
@@ -589,20 +612,7 @@ func (m *Manager) startRenewalRoutine(volumeID string) (started bool) {
589
612
// Instead, retry within the same iteration of the for loop and apply an exponential backoff.
590
613
// Because we pass ctx through to the 'wait' package, if the stopCh is closed/context is cancelled,
591
614
// we'll immediately stop waiting and 'continue' which will then hit the `case <-stopCh` case in the `select`.
592
- if err := wait .ExponentialBackoffWithContext (ctx , wait.Backoff {
593
- // 8s is the 'base' amount of time for the backoff
594
- Duration : time .Second * 8 ,
595
- // We multiple the 'duration' by 2.0 if the attempt fails/errors
596
- Factor : 2.0 ,
597
- // Add a jitter of +/- 1s (0.5 of the 'duration')
598
- Jitter : 0.5 ,
599
- // 'Steps' controls what the maximum number of backoff attempts is before we
600
- // reset back to the 'base duration'. Set this to the MaxInt32, as we never want to
601
- // reset this unless we get a successful attempt.
602
- Steps : math .MaxInt32 ,
603
- // The maximum time between calls will be 5 minutes
604
- Cap : time .Minute * 5 ,
605
- }, func () (bool , error ) {
615
+ if err := wait .ExponentialBackoffWithContext (ctx , m .backoffConfig , func () (bool , error ) {
606
616
log .Info ("Triggering new issuance" )
607
617
if err := m .issue (ctx , volumeID ); err != nil {
608
618
log .Error (err , "Failed to issue certificate, retrying after applying exponential backoff" )
0 commit comments