Skip to content

Commit a0ebb28

Browse files
committed
WIP update to include unschedulable nodes
1 parent b8f910f commit a0ebb28

26 files changed

+156
-99
lines changed

cluster-autoscaler/core/scaledown/actuation/actuator_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1228,7 +1228,7 @@ func runStartDeletionTest(t *testing.T, tc startDeletionTestCase, force bool) {
12281228
t.Fatalf("Couldn't create daemonset lister")
12291229
}
12301230

1231-
registry := kube_util.NewListerRegistry(nil, nil, podLister, pdbLister, dsLister, nil, nil, nil, nil)
1231+
registry := kube_util.NewListerRegistry(nil, nil, nil, podLister, pdbLister, dsLister, nil, nil, nil, nil)
12321232
ctx, err := NewScaleTestAutoscalingContext(opts, fakeClient, registry, provider, nil, nil)
12331233
if err != nil {
12341234
t.Fatalf("Couldn't set up autoscaling context: %v", err)
@@ -1541,7 +1541,7 @@ func TestStartDeletionInBatchBasic(t *testing.T) {
15411541

15421542
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
15431543
pdbLister := kube_util.NewTestPodDisruptionBudgetLister([]*policyv1.PodDisruptionBudget{})
1544-
registry := kube_util.NewListerRegistry(nil, nil, podLister, pdbLister, nil, nil, nil, nil, nil)
1544+
registry := kube_util.NewListerRegistry(nil, nil, nil, podLister, pdbLister, nil, nil, nil, nil, nil)
15451545
ctx, err := NewScaleTestAutoscalingContext(opts, fakeClient, registry, provider, nil, nil)
15461546
if err != nil {
15471547
t.Fatalf("Couldn't set up autoscaling context: %v", err)

cluster-autoscaler/core/scaledown/actuation/drain_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
139139
provider := testprovider.NewTestCloudProviderBuilder().Build()
140140
provider.AddNodeGroup("ng1", 1, 10, 1)
141141
provider.AddNode("ng1", n1)
142-
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil)
142+
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
143143

144144
context, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil)
145145
assert.NoError(t, err)

cluster-autoscaler/core/scaledown/actuation/group_deletion_scheduler_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ func TestScheduleDeletion(t *testing.T) {
146146
if err != nil {
147147
t.Fatalf("Couldn't create daemonset lister")
148148
}
149-
registry := kube_util.NewListerRegistry(nil, nil, podLister, pdbLister, dsLister, nil, nil, nil, nil)
149+
registry := kube_util.NewListerRegistry(nil, nil, nil, podLister, pdbLister, dsLister, nil, nil, nil, nil)
150150
ctx, err := NewScaleTestAutoscalingContext(opts, fakeClient, registry, provider, nil, nil)
151151
if err != nil {
152152
t.Fatalf("Couldn't set up autoscaling context: %v", err)

cluster-autoscaler/core/scaledown/actuation/softtaint_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ func TestSoftTaintUpdate(t *testing.T) {
6767
MaxBulkSoftTaintCount: 1,
6868
MaxBulkSoftTaintTime: 3 * time.Second,
6969
}
70-
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil)
70+
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
7171

7272
actx, err := test.NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil)
7373
assert.NoError(t, err)
@@ -151,7 +151,7 @@ func TestSoftTaintTimeLimit(t *testing.T) {
151151
MaxBulkSoftTaintCount: 10,
152152
MaxBulkSoftTaintTime: maxSoftTaintDuration,
153153
}
154-
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil)
154+
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
155155

156156
actx, err := test.NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil)
157157
assert.NoError(t, err)

cluster-autoscaler/core/scaledown/planner/controller_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ func TestReplicasCounter(t *testing.T) {
125125
jobLister, _ := kube_util.NewTestJobLister([]*batchv1.Job{job, unsetJob, jobWithSucceededReplicas})
126126
rsLister, _ := kube_util.NewTestReplicaSetLister([]*appsv1.ReplicaSet{rs, unsetRs})
127127
ssLister, _ := kube_util.NewTestStatefulSetLister([]*appsv1.StatefulSet{sS})
128-
listers := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, rcLister, jobLister, rsLister, ssLister)
128+
listers := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, rcLister, jobLister, rsLister, ssLister)
129129
testCases := []struct {
130130
name string
131131
ownerRef metav1.OwnerReference

cluster-autoscaler/core/scaledown/planner/planner_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -484,7 +484,7 @@ func TestUpdateClusterState(t *testing.T) {
484484
}
485485
rsLister, err := kube_util.NewTestReplicaSetLister(tc.replicasSets)
486486
assert.NoError(t, err)
487-
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, rsLister, nil)
487+
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, rsLister, nil)
488488
provider := testprovider.NewTestCloudProviderBuilder().Build()
489489
provider.AddNodeGroup("ng1", 0, 0, 0)
490490
for _, node := range tc.nodes {

cluster-autoscaler/core/scaledown/unneeded/nodes_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ func TestRemovableAt(t *testing.T) {
197197

198198
rsLister, err := kube_util.NewTestReplicaSetLister(nil)
199199
assert.NoError(t, err)
200-
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, rsLister, nil)
200+
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, rsLister, nil)
201201
ctx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{ScaleDownSimulationTimeout: 5 * time.Minute}, &fake.Clientset{}, registry, provider, nil, nil)
202202
assert.NoError(t, err)
203203

cluster-autoscaler/core/scaleup/orchestrator/async_initializer_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ func TestNodePoolAsyncInitialization(t *testing.T) {
9292
},
9393
},
9494
}
95-
listers := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil)
95+
listers := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
9696
upcomingNodeGroup := provider.BuildNodeGroup("upcoming-ng", 0, 100, 0, false, true, "T1", nil)
9797
options := config.AutoscalingOptions{AsyncNodeGroupsEnabled: true}
9898
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)

cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -977,7 +977,7 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleUpTestConfig) *ScaleUpTestR
977977
extraPods[i] = buildTestPod(p)
978978
}
979979
podLister := kube_util.NewTestPodLister(pods)
980-
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
980+
listers := kube_util.NewListerRegistry(nil, nil, nil, podLister, nil, nil, nil, nil, nil, nil)
981981

982982
// setup node groups
983983
var provider *testprovider.TestCloudProvider
@@ -1046,7 +1046,7 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleUpTestConfig) *ScaleUpTestR
10461046
err = context.ClusterSnapshot.SetClusterState(nodes, kube_util.ScheduledPods(pods), nil)
10471047
assert.NoError(t, err)
10481048
nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
1049-
Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
1049+
Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
10501050
assert.NoError(t, err)
10511051
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(options.NodeGroupDefaults), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
10521052
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
@@ -1135,7 +1135,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
11351135
pods := []*apiv1.Pod{p1, p2}
11361136

11371137
podLister := kube_util.NewTestPodLister(pods)
1138-
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
1138+
listers := kube_util.NewListerRegistry(nil, nil, nil, podLister, nil, nil, nil, nil, nil, nil)
11391139

11401140
provider := testprovider.NewTestCloudProviderBuilder().WithOnScaleUp(func(nodeGroup string, increase int) error {
11411141
t.Fatalf("No expansion is expected, but increased %s by %d", nodeGroup, increase)
@@ -1155,7 +1155,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
11551155
assert.NoError(t, err)
11561156
err = context.ClusterSnapshot.SetClusterState(nodes, pods, nil)
11571157
assert.NoError(t, err)
1158-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
1158+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
11591159
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
11601160
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
11611161
p3 := BuildTestPod("p-new", 550, 0)
@@ -1181,7 +1181,7 @@ func TestBinpackingLimiter(t *testing.T) {
11811181
nodes := []*apiv1.Node{n1, n2}
11821182

11831183
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
1184-
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
1184+
listers := kube_util.NewListerRegistry(nil, nil, nil, podLister, nil, nil, nil, nil, nil, nil)
11851185

11861186
provider := testprovider.NewTestCloudProviderBuilder().WithOnScaleUp(func(nodeGroup string, increase int) error {
11871187
return nil
@@ -1199,7 +1199,7 @@ func TestBinpackingLimiter(t *testing.T) {
11991199
err = context.ClusterSnapshot.SetClusterState(nodes, nil, nil)
12001200
assert.NoError(t, err)
12011201
nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
1202-
Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
1202+
Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
12031203
assert.NoError(t, err)
12041204

12051205
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
@@ -1239,7 +1239,7 @@ func TestScaleUpNoHelp(t *testing.T) {
12391239
pods := []*apiv1.Pod{p1}
12401240

12411241
podLister := kube_util.NewTestPodLister(pods)
1242-
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
1242+
listers := kube_util.NewListerRegistry(nil, nil, nil, podLister, nil, nil, nil, nil, nil, nil)
12431243

12441244
provider := testprovider.NewTestCloudProviderBuilder().WithOnScaleUp(func(nodeGroup string, increase int) error {
12451245
t.Fatalf("No expansion is expected")
@@ -1258,7 +1258,7 @@ func TestScaleUpNoHelp(t *testing.T) {
12581258
assert.NoError(t, err)
12591259
err = context.ClusterSnapshot.SetClusterState(nodes, pods, nil)
12601260
assert.NoError(t, err)
1261-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
1261+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
12621262
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
12631263
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
12641264
p3 := BuildTestPod("p-new", 500, 0)
@@ -1408,12 +1408,12 @@ func TestComputeSimilarNodeGroups(t *testing.T) {
14081408
nodeGroupSetProcessor.similarNodeGroups = append(nodeGroupSetProcessor.similarNodeGroups, provider.GetNodeGroup(ng))
14091409
}
14101410

1411-
listers := kube_util.NewListerRegistry(nil, nil, kube_util.NewTestPodLister(nil), nil, nil, nil, nil, nil, nil)
1411+
listers := kube_util.NewListerRegistry(nil, nil, nil, kube_util.NewTestPodLister(nil), nil, nil, nil, nil, nil, nil)
14121412
ctx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{BalanceSimilarNodeGroups: tc.balancingEnabled}, &fake.Clientset{}, listers, provider, nil, nil)
14131413
assert.NoError(t, err)
14141414
err = ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
14151415
assert.NoError(t, err)
1416-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
1416+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
14171417
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
14181418
assert.NoError(t, clusterState.UpdateNodes(nodes, nodeInfos, time.Now()))
14191419

@@ -1485,7 +1485,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
14851485
}
14861486

14871487
podLister := kube_util.NewTestPodLister(podList)
1488-
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
1488+
listers := kube_util.NewListerRegistry(nil, nil, nil, podLister, nil, nil, nil, nil, nil, nil)
14891489

14901490
options := config.AutoscalingOptions{
14911491
EstimatorName: estimator.BinpackingEstimatorName,
@@ -1497,7 +1497,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
14971497
assert.NoError(t, err)
14981498
err = context.ClusterSnapshot.SetClusterState(nodes, podList, nil)
14991499
assert.NoError(t, err)
1500-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
1500+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
15011501
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
15021502
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
15031503

@@ -1557,7 +1557,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
15571557
MaxMemoryTotal: 5000 * 64 * 20,
15581558
}
15591559
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
1560-
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
1560+
listers := kube_util.NewListerRegistry(nil, nil, nil, podLister, nil, nil, nil, nil, nil, nil)
15611561
context, err := NewScaleTestAutoscalingContext(options, fakeClient, listers, provider, nil, nil)
15621562
assert.NoError(t, err)
15631563

@@ -1568,7 +1568,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
15681568
processors.NodeGroupManager = &MockAutoprovisioningNodeGroupManager{T: t, ExtraGroups: 0}
15691569

15701570
nodes := []*apiv1.Node{}
1571-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
1571+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
15721572

15731573
suOrchestrator := New()
15741574
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
@@ -1608,7 +1608,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
16081608
MaxMemoryTotal: 5000 * 64 * 20,
16091609
}
16101610
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
1611-
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
1611+
listers := kube_util.NewListerRegistry(nil, nil, nil, podLister, nil, nil, nil, nil, nil, nil)
16121612
context, err := NewScaleTestAutoscalingContext(options, fakeClient, listers, provider, nil, nil)
16131613
assert.NoError(t, err)
16141614

@@ -1619,7 +1619,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
16191619
processors.NodeGroupManager = &MockAutoprovisioningNodeGroupManager{T: t, ExtraGroups: 2}
16201620

16211621
nodes := []*apiv1.Node{}
1622-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
1622+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
16231623

16241624
suOrchestrator := New()
16251625
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
@@ -1638,7 +1638,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
16381638

16391639
func TestScaleUpToMeetNodeGroupMinSize(t *testing.T) {
16401640
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
1641-
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
1641+
listers := kube_util.NewListerRegistry(nil, nil, nil, podLister, nil, nil, nil, nil, nil, nil)
16421642
provider := testprovider.NewTestCloudProviderBuilder().WithOnScaleUp(func(nodeGroup string, increase int) error {
16431643
assert.Equal(t, "ng1", nodeGroup)
16441644
assert.Equal(t, 1, increase)
@@ -1673,7 +1673,7 @@ func TestScaleUpToMeetNodeGroupMinSize(t *testing.T) {
16731673
nodes := []*apiv1.Node{n1, n2}
16741674
err = context.ClusterSnapshot.SetClusterState(nodes, nil, nil)
16751675
assert.NoError(t, err)
1676-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
1676+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
16771677
processors := processorstest.NewTestProcessors(&context)
16781678
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
16791679
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
@@ -1756,7 +1756,7 @@ func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) {
17561756
AsyncNodeGroupsEnabled: true,
17571757
}
17581758
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
1759-
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
1759+
listers := kube_util.NewListerRegistry(nil, nil, nil, podLister, nil, nil, nil, nil, nil, nil)
17601760
context, err := NewScaleTestAutoscalingContext(options, fakeClient, listers, provider, nil, nil)
17611761
assert.NoError(t, err)
17621762

@@ -1768,7 +1768,7 @@ func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) {
17681768
processors.AsyncNodeGroupStateChecker = &asyncnodegroups.MockAsyncNodeGroupStateChecker{IsUpcomingNodeGroup: tc.isUpcomingMockMap}
17691769

17701770
nodes := []*apiv1.Node{}
1771-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
1771+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
17721772

17731773
suOrchestrator := New()
17741774
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})

0 commit comments

Comments
 (0)