Skip to content

Commit 8184bc4

Browse files
committed
TUN-8427: Fix BackoffHandler's internally shared clock structure
A clock structure was used to help support unit testing timetravel but it is a globally shared object and is likely unsafe to share across tests. Reordering of the tests seemed to have intermittent failures for the TestWaitForBackoffFallback specifically on windows builds. Adjusting this to be a shim inside the BackoffHandler struct should resolve shared object overrides in unit testing. Additionally, added the reset retries functionality to be inline with the ResetNow function of the BackoffHandler to align better with expected functionality of the method. Removes unused reconnectCredentialManager.
1 parent 2db0021 commit 8184bc4

File tree

8 files changed

+83
-335
lines changed

8 files changed

+83
-335
lines changed

retry/backoffhandler.go

Lines changed: 32 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -6,17 +6,16 @@ import (
66
"time"
77
)
88

9+
const (
10+
DefaultBaseTime time.Duration = time.Second
11+
)
12+
913
// Redeclare time functions so they can be overridden in tests.
10-
type clock struct {
14+
type Clock struct {
1115
Now func() time.Time
1216
After func(d time.Duration) <-chan time.Time
1317
}
1418

15-
var Clock = clock{
16-
Now: time.Now,
17-
After: time.After,
18-
}
19-
2019
// BackoffHandler manages exponential backoff and limits the maximum number of retries.
2120
// The base time period is 1 second, doubling with each retry.
2221
// After initial success, a grace period can be set to reset the backoff timer if
@@ -25,15 +24,26 @@ var Clock = clock{
2524
type BackoffHandler struct {
2625
// MaxRetries sets the maximum number of retries to perform. The default value
2726
// of 0 disables retry completely.
28-
MaxRetries uint
27+
maxRetries uint
2928
// RetryForever caps the exponential backoff period according to MaxRetries
3029
// but allows you to retry indefinitely.
31-
RetryForever bool
30+
retryForever bool
3231
// BaseTime sets the initial backoff period.
33-
BaseTime time.Duration
32+
baseTime time.Duration
3433

3534
retries uint
3635
resetDeadline time.Time
36+
37+
Clock Clock
38+
}
39+
40+
func NewBackoff(maxRetries uint, baseTime time.Duration, retryForever bool) BackoffHandler {
41+
return BackoffHandler{
42+
maxRetries: maxRetries,
43+
baseTime: baseTime,
44+
retryForever: retryForever,
45+
Clock: Clock{Now: time.Now, After: time.After},
46+
}
3747
}
3848

3949
func (b BackoffHandler) GetMaxBackoffDuration(ctx context.Context) (time.Duration, bool) {
@@ -44,11 +54,11 @@ func (b BackoffHandler) GetMaxBackoffDuration(ctx context.Context) (time.Duratio
4454
return time.Duration(0), false
4555
default:
4656
}
47-
if !b.resetDeadline.IsZero() && Clock.Now().After(b.resetDeadline) {
57+
if !b.resetDeadline.IsZero() && b.Clock.Now().After(b.resetDeadline) {
4858
// b.retries would be set to 0 at this point
4959
return time.Second, true
5060
}
51-
if b.retries >= b.MaxRetries && !b.RetryForever {
61+
if b.retries >= b.maxRetries && !b.retryForever {
5262
return time.Duration(0), false
5363
}
5464
maxTimeToWait := b.GetBaseTime() * 1 << (b.retries + 1)
@@ -58,20 +68,20 @@ func (b BackoffHandler) GetMaxBackoffDuration(ctx context.Context) (time.Duratio
5868
// BackoffTimer returns a channel that sends the current time when the exponential backoff timeout expires.
5969
// Returns nil if the maximum number of retries have been used.
6070
func (b *BackoffHandler) BackoffTimer() <-chan time.Time {
61-
if !b.resetDeadline.IsZero() && Clock.Now().After(b.resetDeadline) {
71+
if !b.resetDeadline.IsZero() && b.Clock.Now().After(b.resetDeadline) {
6272
b.retries = 0
6373
b.resetDeadline = time.Time{}
6474
}
65-
if b.retries >= b.MaxRetries {
66-
if !b.RetryForever {
75+
if b.retries >= b.maxRetries {
76+
if !b.retryForever {
6777
return nil
6878
}
6979
} else {
7080
b.retries++
7181
}
7282
maxTimeToWait := time.Duration(b.GetBaseTime() * 1 << (b.retries))
7383
timeToWait := time.Duration(rand.Int63n(maxTimeToWait.Nanoseconds()))
74-
return Clock.After(timeToWait)
84+
return b.Clock.After(timeToWait)
7585
}
7686

7787
// Backoff is used to wait according to exponential backoff. Returns false if the
@@ -94,16 +104,16 @@ func (b *BackoffHandler) Backoff(ctx context.Context) bool {
94104
func (b *BackoffHandler) SetGracePeriod() time.Duration {
95105
maxTimeToWait := b.GetBaseTime() * 2 << (b.retries + 1)
96106
timeToWait := time.Duration(rand.Int63n(maxTimeToWait.Nanoseconds()))
97-
b.resetDeadline = Clock.Now().Add(timeToWait)
107+
b.resetDeadline = b.Clock.Now().Add(timeToWait)
98108

99109
return timeToWait
100110
}
101111

102112
func (b BackoffHandler) GetBaseTime() time.Duration {
103-
if b.BaseTime == 0 {
104-
return time.Second
113+
if b.baseTime == 0 {
114+
return DefaultBaseTime
105115
}
106-
return b.BaseTime
116+
return b.baseTime
107117
}
108118

109119
// Retries returns the number of retries consumed so far.
@@ -112,9 +122,10 @@ func (b *BackoffHandler) Retries() int {
112122
}
113123

114124
func (b *BackoffHandler) ReachedMaxRetries() bool {
115-
return b.retries == b.MaxRetries
125+
return b.retries == b.maxRetries
116126
}
117127

118128
func (b *BackoffHandler) ResetNow() {
119-
b.resetDeadline = time.Now()
129+
b.resetDeadline = b.Clock.Now()
130+
b.retries = 0
120131
}

retry/backoffhandler_test.go

Lines changed: 14 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,9 @@ func immediateTimeAfter(time.Duration) <-chan time.Time {
1313
}
1414

1515
func TestBackoffRetries(t *testing.T) {
16-
// make backoff return immediately
17-
Clock.After = immediateTimeAfter
1816
ctx := context.Background()
19-
backoff := BackoffHandler{MaxRetries: 3}
17+
// make backoff return immediately
18+
backoff := BackoffHandler{maxRetries: 3, Clock: Clock{time.Now, immediateTimeAfter}}
2019
if !backoff.Backoff(ctx) {
2120
t.Fatalf("backoff failed immediately")
2221
}
@@ -32,10 +31,10 @@ func TestBackoffRetries(t *testing.T) {
3231
}
3332

3433
func TestBackoffCancel(t *testing.T) {
35-
// prevent backoff from returning normally
36-
Clock.After = func(time.Duration) <-chan time.Time { return make(chan time.Time) }
3734
ctx, cancelFunc := context.WithCancel(context.Background())
38-
backoff := BackoffHandler{MaxRetries: 3}
35+
// prevent backoff from returning normally
36+
after := func(time.Duration) <-chan time.Time { return make(chan time.Time) }
37+
backoff := BackoffHandler{maxRetries: 3, Clock: Clock{time.Now, after}}
3938
cancelFunc()
4039
if backoff.Backoff(ctx) {
4140
t.Fatalf("backoff allowed after cancel")
@@ -46,13 +45,12 @@ func TestBackoffCancel(t *testing.T) {
4645
}
4746

4847
func TestBackoffGracePeriod(t *testing.T) {
48+
ctx := context.Background()
4949
currentTime := time.Now()
5050
// make Clock.Now return whatever we like
51-
Clock.Now = func() time.Time { return currentTime }
51+
now := func() time.Time { return currentTime }
5252
// make backoff return immediately
53-
Clock.After = immediateTimeAfter
54-
ctx := context.Background()
55-
backoff := BackoffHandler{MaxRetries: 1}
53+
backoff := BackoffHandler{maxRetries: 1, Clock: Clock{now, immediateTimeAfter}}
5654
if !backoff.Backoff(ctx) {
5755
t.Fatalf("backoff failed immediately")
5856
}
@@ -70,10 +68,9 @@ func TestBackoffGracePeriod(t *testing.T) {
7068
}
7169

7270
func TestGetMaxBackoffDurationRetries(t *testing.T) {
73-
// make backoff return immediately
74-
Clock.After = immediateTimeAfter
7571
ctx := context.Background()
76-
backoff := BackoffHandler{MaxRetries: 3}
72+
// make backoff return immediately
73+
backoff := BackoffHandler{maxRetries: 3, Clock: Clock{time.Now, immediateTimeAfter}}
7774
if _, ok := backoff.GetMaxBackoffDuration(ctx); !ok {
7875
t.Fatalf("backoff failed immediately")
7976
}
@@ -95,10 +92,9 @@ func TestGetMaxBackoffDurationRetries(t *testing.T) {
9592
}
9693

9794
func TestGetMaxBackoffDuration(t *testing.T) {
98-
// make backoff return immediately
99-
Clock.After = immediateTimeAfter
10095
ctx := context.Background()
101-
backoff := BackoffHandler{MaxRetries: 3}
96+
// make backoff return immediately
97+
backoff := BackoffHandler{maxRetries: 3, Clock: Clock{time.Now, immediateTimeAfter}}
10298
if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*2 {
10399
t.Fatalf("backoff (%s) didn't return < 2 seconds on first retry", duration)
104100
}
@@ -117,10 +113,9 @@ func TestGetMaxBackoffDuration(t *testing.T) {
117113
}
118114

119115
func TestBackoffRetryForever(t *testing.T) {
120-
// make backoff return immediately
121-
Clock.After = immediateTimeAfter
122116
ctx := context.Background()
123-
backoff := BackoffHandler{MaxRetries: 3, RetryForever: true}
117+
// make backoff return immediately
118+
backoff := BackoffHandler{maxRetries: 3, retryForever: true, Clock: Clock{time.Now, immediateTimeAfter}}
124119
if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*2 {
125120
t.Fatalf("backoff (%s) didn't return < 2 seconds on first retry", duration)
126121
}

supervisor/reconnect.go

Lines changed: 0 additions & 138 deletions
This file was deleted.

0 commit comments

Comments
 (0)