Skip to content

Commit 9dc679e

Browse files
authored
Update poolmonitor to support batch size of 1 (#1492)
updates to poolmonitor to support batch size of 1 Signed-off-by: Evan Baker <[email protected]>
1 parent 637d5af commit 9dc679e

File tree

2 files changed

+133
-49
lines changed

2 files changed

+133
-49
lines changed

cns/ipampool/monitor.go

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -433,14 +433,16 @@ func (pm *Monitor) clampScaler(scaler *v1alpha.Scaler) {
433433

434434
// CalculateMinFreeIPs calculates the minimum free IP quantity based on the Scaler
435435
// in the passed NodeNetworkConfig.
436+
// Half of odd batches are rounded up!
436437
//nolint:gocritic // ignore hugeparam
437438
func CalculateMinFreeIPs(scaler v1alpha.Scaler) int64 {
438-
return int64(float64(scaler.BatchSize) * (float64(scaler.RequestThresholdPercent) / 100)) //nolint:gomnd // it's a percent
439+
return int64(float64(scaler.BatchSize)*(float64(scaler.RequestThresholdPercent)/100) + .5) //nolint:gomnd // it's a percent
439440
}
440441

441442
// CalculateMaxFreeIPs calculates the maximum free IP quantity based on the Scaler
442443
// in the passed NodeNetworkConfig.
444+
// Half of odd batches are rounded up!
443445
//nolint:gocritic // ignore hugeparam
444446
func CalculateMaxFreeIPs(scaler v1alpha.Scaler) int64 {
445-
return int64(float64(scaler.BatchSize) * (float64(scaler.ReleaseThresholdPercent) / 100)) //nolint:gomnd // it's a percent
447+
return int64(float64(scaler.BatchSize)*(float64(scaler.ReleaseThresholdPercent)/100) + .5) //nolint:gomnd // it's a percent
446448
}

cns/ipampool/monitor_test.go

Lines changed: 129 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -78,36 +78,63 @@ func initFakes(state testState) (*fakes.HTTPServiceFake, *fakes.RequestControlle
7878
}
7979

8080
func TestPoolSizeIncrease(t *testing.T) {
81-
initState := testState{
82-
batch: 10,
83-
assigned: 8,
84-
allocated: 10,
85-
requestThresholdPercent: 50,
86-
releaseThresholdPercent: 150,
87-
max: 30,
81+
tests := []struct {
82+
name string
83+
in testState
84+
want int64
85+
}{
86+
{
87+
name: "typ",
88+
in: testState{
89+
batch: 10,
90+
assigned: 8,
91+
allocated: 10,
92+
requestThresholdPercent: 50,
93+
releaseThresholdPercent: 150,
94+
max: 30,
95+
},
96+
want: 20,
97+
},
98+
{
99+
name: "starvation",
100+
in: testState{
101+
batch: 1,
102+
assigned: 10,
103+
allocated: 10,
104+
requestThresholdPercent: 50,
105+
releaseThresholdPercent: 150,
106+
max: 30,
107+
},
108+
want: 11,
109+
},
88110
}
89111

90-
fakecns, fakerc, poolmonitor := initFakes(initState)
91-
assert.NoError(t, fakerc.Reconcile(true))
112+
for _, tt := range tests {
113+
tt := tt
114+
t.Run(tt.name, func(t *testing.T) {
115+
fakecns, fakerc, poolmonitor := initFakes(tt.in)
116+
assert.NoError(t, fakerc.Reconcile(true))
92117

93-
// When poolmonitor reconcile is called, trigger increase and cache goal state
94-
assert.NoError(t, poolmonitor.reconcile(context.Background()))
118+
// When poolmonitor reconcile is called, trigger increase and cache goal state
119+
assert.NoError(t, poolmonitor.reconcile(context.Background()))
95120

96-
// ensure pool monitor has reached quorum with cns
97-
assert.Equal(t, initState.allocated+(1*initState.batch), poolmonitor.spec.RequestedIPCount)
121+
// ensure pool monitor has reached quorum with cns
122+
assert.Equal(t, tt.want, poolmonitor.spec.RequestedIPCount)
98123

99-
// request controller reconciles, carves new IPs from the test subnet and adds to CNS state
100-
assert.NoError(t, fakerc.Reconcile(true))
124+
// request controller reconciles, carves new IPs from the test subnet and adds to CNS state
125+
assert.NoError(t, fakerc.Reconcile(true))
101126

102-
// when poolmonitor reconciles again here, the IP count will be within the thresholds
103-
// so no CRD update and nothing pending
104-
assert.NoError(t, poolmonitor.reconcile(context.Background()))
127+
// when poolmonitor reconciles again here, the IP count will be within the thresholds
128+
// so no CRD update and nothing pending
129+
assert.NoError(t, poolmonitor.reconcile(context.Background()))
105130

106-
// ensure pool monitor has reached quorum with cns
107-
assert.Equal(t, initState.allocated+(1*initState.batch), poolmonitor.spec.RequestedIPCount)
131+
// ensure pool monitor has reached quorum with cns
132+
assert.Equal(t, tt.want, poolmonitor.spec.RequestedIPCount)
108133

109-
// make sure IPConfig state size reflects the new pool size
110-
assert.Len(t, fakecns.GetPodIPConfigState(), int(initState.allocated+(1*initState.batch)))
134+
// make sure IPConfig state size reflects the new pool size
135+
assert.Len(t, fakecns.GetPodIPConfigState(), int(tt.want))
136+
})
137+
}
111138
}
112139

113140
func TestPoolIncreaseDoesntChangeWhenIncreaseIsAlreadyInProgress(t *testing.T) {
@@ -237,37 +264,66 @@ func TestIncreaseWithPendingRelease(t *testing.T) {
237264
}
238265

239266
func TestPoolDecrease(t *testing.T) {
240-
initState := testState{
241-
batch: 10,
242-
allocated: 20,
243-
assigned: 15,
244-
requestThresholdPercent: 50,
245-
releaseThresholdPercent: 150,
246-
max: 30,
267+
tests := []struct {
268+
name string
269+
in testState
270+
want int64
271+
wantReleased int
272+
}{
273+
{
274+
name: "typ",
275+
in: testState{
276+
batch: 10,
277+
allocated: 20,
278+
assigned: 15,
279+
requestThresholdPercent: 50,
280+
releaseThresholdPercent: 150,
281+
max: 30,
282+
},
283+
want: 10,
284+
wantReleased: 10,
285+
},
286+
{
287+
name: "starvation",
288+
in: testState{
289+
batch: 1,
290+
allocated: 20,
291+
assigned: 19,
292+
requestThresholdPercent: 50,
293+
releaseThresholdPercent: 150,
294+
max: 30,
295+
},
296+
want: 19,
297+
wantReleased: 1,
298+
},
247299
}
300+
for _, tt := range tests {
301+
tt := tt
302+
t.Run(tt.name, func(t *testing.T) {
303+
fakecns, fakerc, poolmonitor := initFakes(tt.in)
304+
assert.NoError(t, fakerc.Reconcile(true))
248305

249-
fakecns, fakerc, poolmonitor := initFakes(initState)
250-
assert.NoError(t, fakerc.Reconcile(true))
306+
// Pool monitor does nothing, as the current number of IPs falls in the threshold
307+
assert.NoError(t, poolmonitor.reconcile(context.Background()))
251308

252-
// Pool monitor does nothing, as the current number of IPs falls in the threshold
253-
assert.NoError(t, poolmonitor.reconcile(context.Background()))
309+
// Decrease the number of allocated IPs down to 5. This should trigger a scale down
310+
assert.NoError(t, fakecns.SetNumberOfAssignedIPs(4))
254311

255-
// Decrease the number of allocated IPs down to 5. This should trigger a scale down
256-
assert.NoError(t, fakecns.SetNumberOfAssignedIPs(4))
312+
// Pool monitor will adjust the spec so the pool size will be 1 batch size smaller
313+
assert.NoError(t, poolmonitor.reconcile(context.Background()))
257314

258-
// Pool monitor will adjust the spec so the pool size will be 1 batch size smaller
259-
assert.NoError(t, poolmonitor.reconcile(context.Background()))
315+
// ensure that the adjusted spec is smaller than the initial pool size
316+
assert.Len(t, poolmonitor.spec.IPsNotInUse, tt.wantReleased)
260317

261-
// ensure that the adjusted spec is smaller than the initial pool size
262-
assert.Len(t, poolmonitor.spec.IPsNotInUse, int(initState.allocated-initState.batch))
318+
// reconcile the fake request controller
319+
assert.NoError(t, fakerc.Reconcile(true))
263320

264-
// reconcile the fake request controller
265-
assert.NoError(t, fakerc.Reconcile(true))
266-
267-
// CNS won't actually clean up the IPsNotInUse until it changes the spec for some other reason (i.e. scale up)
268-
// so instead we should just verify that the CNS state has no more PendingReleaseIPConfigs,
269-
// and that they were cleaned up.
270-
assert.Empty(t, fakecns.GetPendingReleaseIPConfigs())
321+
// CNS won't actually clean up the IPsNotInUse until it changes the spec for some other reason (i.e. scale up)
322+
// so instead we should just verify that the CNS state has no more PendingReleaseIPConfigs,
323+
// and that they were cleaned up.
324+
assert.Empty(t, fakecns.GetPendingReleaseIPConfigs())
325+
})
326+
}
271327
}
272328

273329
func TestPoolSizeDecreaseWhenDecreaseHasAlreadyBeenRequested(t *testing.T) {
@@ -504,11 +560,37 @@ func TestCalculateIPs(t *testing.T) {
504560
wantMinFree: 16,
505561
wantMaxFree: 32,
506562
},
563+
{
564+
name: "odd batch",
565+
in: v1alpha.Scaler{
566+
BatchSize: 3,
567+
RequestThresholdPercent: 50,
568+
ReleaseThresholdPercent: 150,
569+
MaxIPCount: 250,
570+
},
571+
wantMinFree: 2,
572+
wantMaxFree: 5,
573+
},
574+
{
575+
name: "starvation",
576+
in: v1alpha.Scaler{
577+
BatchSize: 1,
578+
RequestThresholdPercent: 50,
579+
ReleaseThresholdPercent: 150,
580+
MaxIPCount: 250,
581+
},
582+
wantMinFree: 1,
583+
wantMaxFree: 2,
584+
},
507585
}
508586
for _, tt := range tests {
509587
tt := tt
510-
t.Run(tt.name, func(t *testing.T) {
588+
t.Run(tt.name+" min free", func(t *testing.T) {
511589
assert.Equal(t, tt.wantMinFree, CalculateMinFreeIPs(tt.in))
590+
t.Parallel()
591+
})
592+
t.Run(tt.name+" max free", func(t *testing.T) {
593+
t.Parallel()
512594
assert.Equal(t, tt.wantMaxFree, CalculateMaxFreeIPs(tt.in))
513595
})
514596
}

0 commit comments

Comments
 (0)