|
8 | 8 | "encoding/pem"
|
9 | 9 | "fmt"
|
10 | 10 | "math/big"
|
| 11 | + "sync/atomic" |
11 | 12 | "testing"
|
12 | 13 | "time"
|
13 | 14 |
|
@@ -285,6 +286,71 @@ func TestManager_ManageVolume_beginsManagingAndProceedsIfNotReady(t *testing.T)
|
285 | 286 | }
|
286 | 287 | }
|
287 | 288 |
|
| 289 | +func TestManager_ManageVolume_exponentialBackOffRetryOnIssueErrors(t *testing.T) { |
| 290 | + expBackOffDuration := 100 * time.Millisecond |
| 291 | + expBackOffCap := 5 * expBackOffDuration |
| 292 | + expBackOffFactor := 2.0 // We multiply the 'duration' by 2.0 if the attempt fails/errors |
| 293 | + expBackOffJitter := 0.0 // No jitter to the 'duration', so we could calculate number of retries easily |
| 294 | + expBackOffSteps := 100 // The maximum number of backoff attempts |
| 295 | + issueRenewalTimeout := expBackOffDuration |
| 296 | + |
| 297 | + // Expected number of retries in each expBackOff cycle := |
| 298 | + // ⌈log base expBackOffFactor of (expBackOffCap/expBackOffDuration)⌉ |
| 299 | + var expectNumOfRetries int32 = 3 // ⌈log2(500/100)⌉ |
| 300 | + |
| 301 | + // Because in startRenewalRoutine, ticker := time.NewTicker(time.Second) |
| 302 | + // 2 seconds should complete an expBackOff cycle |
| 303 | + // ticker start time (1s) + expBackOffCap (0.5s) + expectNumOfRetries (3) * issueRenewalTimeout (0.1) |
| 304 | + expectGlobalTimeout := 2 * time.Second |
| 305 | + |
| 306 | + var numOfRetries int32 = 0 // init |
| 307 | + |
| 308 | + opts := newDefaultTestOptions(t) |
| 309 | + opts.RenewalBackoffConfig = &wait.Backoff{ |
| 310 | + Duration: expBackOffDuration, |
| 311 | + Cap: expBackOffCap, |
| 312 | + Factor: expBackOffFactor, |
| 313 | + Jitter: expBackOffJitter, |
| 314 | + Steps: expBackOffSteps, |
| 315 | + } |
| 316 | + opts.ReadyToRequest = func(meta metadata.Metadata) (bool, string) { |
| 317 | + // ReadyToRequest will be called by issue() |
| 318 | + atomic.AddInt32(&numOfRetries, 1) // run in a goroutine, thus increment it atomically |
| 319 | + return true, "" // AlwaysReadyToRequest |
| 320 | + } |
| 321 | + m, err := NewManager(opts) |
| 322 | + m.issueRenewalTimeout = issueRenewalTimeout |
| 323 | + if err != nil { |
| 324 | + t.Fatal(err) |
| 325 | + } |
| 326 | + |
| 327 | + // Register a new volume with the metadata store |
| 328 | + store := opts.MetadataReader.(storage.Interface) |
| 329 | + meta := metadata.Metadata{ |
| 330 | + VolumeID: "vol-id", |
| 331 | + TargetPath: "/fake/path", |
| 332 | + } |
| 333 | + store.RegisterMetadata(meta) |
| 334 | + // Ensure we stop managing the volume after the test |
| 335 | + defer func() { |
| 336 | + store.RemoveVolume(meta.VolumeID) |
| 337 | + m.UnmanageVolume(meta.VolumeID) |
| 338 | + }() |
| 339 | + |
| 340 | + // Put the certificate under management |
| 341 | + managed := m.ManageVolume(meta.VolumeID) |
| 342 | + if !managed { |
| 343 | + t.Errorf("expected management to have started, but it did not") |
| 344 | + } |
| 345 | + |
| 346 | + time.Sleep(expectGlobalTimeout) |
| 347 | + |
| 348 | + actualNumOfRetries := atomic.LoadInt32(&numOfRetries) // read atomically |
| 349 | + if actualNumOfRetries != expectNumOfRetries { |
| 350 | + t.Errorf("expect %d of retires, but got %d", expectNumOfRetries, actualNumOfRetries) |
| 351 | + } |
| 352 | +} |
| 353 | + |
288 | 354 | func TestManager_cleanupStaleRequests(t *testing.T) {
|
289 | 355 | type fields struct {
|
290 | 356 | nodeID string
|
|
0 commit comments