Skip to content

Commit fd53c0b

Browse files
committed
update node info processor to include unschedulable nodes
This change updates the `Process` function of the node info processor interface so that it can accept a second list of nodes. The second list contains all the nodes that are not in the first list. This will allow the mixed node info processor to properly detect unready and unschedulable nodes for use as templates.
1 parent b8f910f commit fd53c0b

File tree

10 files changed

+37
-32
lines changed

10 files changed

+37
-32
lines changed

cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1046,7 +1046,7 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleUpTestConfig) *ScaleUpTestR
10461046
err = context.ClusterSnapshot.SetClusterState(nodes, kube_util.ScheduledPods(pods), nil)
10471047
assert.NoError(t, err)
10481048
nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
1049-
Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
1049+
Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
10501050
assert.NoError(t, err)
10511051
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(options.NodeGroupDefaults), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
10521052
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
@@ -1155,7 +1155,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
11551155
assert.NoError(t, err)
11561156
err = context.ClusterSnapshot.SetClusterState(nodes, pods, nil)
11571157
assert.NoError(t, err)
1158-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
1158+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
11591159
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
11601160
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
11611161
p3 := BuildTestPod("p-new", 550, 0)
@@ -1199,7 +1199,7 @@ func TestBinpackingLimiter(t *testing.T) {
11991199
err = context.ClusterSnapshot.SetClusterState(nodes, nil, nil)
12001200
assert.NoError(t, err)
12011201
nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
1202-
Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
1202+
Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
12031203
assert.NoError(t, err)
12041204

12051205
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
@@ -1258,7 +1258,7 @@ func TestScaleUpNoHelp(t *testing.T) {
12581258
assert.NoError(t, err)
12591259
err = context.ClusterSnapshot.SetClusterState(nodes, pods, nil)
12601260
assert.NoError(t, err)
1261-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
1261+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
12621262
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
12631263
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
12641264
p3 := BuildTestPod("p-new", 500, 0)
@@ -1413,7 +1413,7 @@ func TestComputeSimilarNodeGroups(t *testing.T) {
14131413
assert.NoError(t, err)
14141414
err = ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
14151415
assert.NoError(t, err)
1416-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
1416+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
14171417
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
14181418
assert.NoError(t, clusterState.UpdateNodes(nodes, nodeInfos, time.Now()))
14191419

@@ -1497,7 +1497,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
14971497
assert.NoError(t, err)
14981498
err = context.ClusterSnapshot.SetClusterState(nodes, podList, nil)
14991499
assert.NoError(t, err)
1500-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
1500+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
15011501
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
15021502
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
15031503

@@ -1568,7 +1568,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
15681568
processors.NodeGroupManager = &MockAutoprovisioningNodeGroupManager{T: t, ExtraGroups: 0}
15691569

15701570
nodes := []*apiv1.Node{}
1571-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
1571+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
15721572

15731573
suOrchestrator := New()
15741574
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
@@ -1619,7 +1619,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
16191619
processors.NodeGroupManager = &MockAutoprovisioningNodeGroupManager{T: t, ExtraGroups: 2}
16201620

16211621
nodes := []*apiv1.Node{}
1622-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
1622+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
16231623

16241624
suOrchestrator := New()
16251625
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
@@ -1673,7 +1673,7 @@ func TestScaleUpToMeetNodeGroupMinSize(t *testing.T) {
16731673
nodes := []*apiv1.Node{n1, n2}
16741674
err = context.ClusterSnapshot.SetClusterState(nodes, nil, nil)
16751675
assert.NoError(t, err)
1676-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
1676+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
16771677
processors := processorstest.NewTestProcessors(&context)
16781678
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
16791679
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
@@ -1768,7 +1768,7 @@ func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) {
17681768
processors.AsyncNodeGroupStateChecker = &asyncnodegroups.MockAsyncNodeGroupStateChecker{IsUpcomingNodeGroup: tc.isUpcomingMockMap}
17691769

17701770
nodes := []*apiv1.Node{}
1771-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
1771+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
17721772

17731773
suOrchestrator := New()
17741774
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})

cluster-autoscaler/core/scaleup/resource/manager_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ func TestDeltaForNode(t *testing.T) {
7373
group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
7474
err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
7575
assert.NoError(t, err)
76-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
76+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*corev1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
7777

7878
rm := NewManager(processors.CustomResourcesProcessor)
7979
delta, err := rm.DeltaForNode(&ctx, nodeInfos[ng.Name], group)
@@ -116,7 +116,7 @@ func TestResourcesLeft(t *testing.T) {
116116
_, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
117117
err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
118118
assert.NoError(t, err)
119-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
119+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*corev1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
120120

121121
rm := NewManager(processors.CustomResourcesProcessor)
122122
left, err := rm.ResourcesLeft(&ctx, nodeInfos, nodes)
@@ -169,7 +169,7 @@ func TestApplyLimits(t *testing.T) {
169169
group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
170170
err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
171171
assert.NoError(t, err)
172-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
172+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*corev1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
173173

174174
rm := NewManager(processors.CustomResourcesProcessor)
175175
newCount, err := rm.ApplyLimits(&ctx, testCase.newNodeCount, testCase.resourcesLeft, nodeInfos[testCase.nodeGroupConfig.Name], group)
@@ -236,7 +236,7 @@ func TestResourceManagerWithGpuResource(t *testing.T) {
236236
nodes := []*corev1.Node{n1}
237237
err = context.ClusterSnapshot.SetClusterState(nodes, nil, nil)
238238
assert.NoError(t, err)
239-
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
239+
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*corev1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
240240

241241
rm := NewManager(processors.CustomResourcesProcessor)
242242

cluster-autoscaler/core/static_autoscaler.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
353353
return typedErr.AddPrefix("failed to initialize RemainingPdbTracker: ")
354354
}
355355

356-
nodeInfosForGroups, autoscalerError := a.processors.TemplateNodeInfoProvider.Process(autoscalingContext, readyNodes, daemonsets, a.taintConfig, currentTime)
356+
nodeInfosForGroups, autoscalerError := a.processors.TemplateNodeInfoProvider.Process(autoscalingContext, readyNodes, allNodes, daemonsets, a.taintConfig, currentTime)
357357
if autoscalerError != nil {
358358
klog.Errorf("Failed to get node infos for groups: %v", autoscalerError)
359359
return autoscalerError.AddPrefix("failed to build node infos for node groups: ")

cluster-autoscaler/processors/nodeinfosprovider/annotation_node_info_provider.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,8 @@ func NewCustomAnnotationNodeInfoProvider(templateNodeInfoProvider TemplateNodeIn
4747
}
4848

4949
// Process returns the nodeInfos set for this cluster.
50-
func (p *AnnotationNodeInfoProvider) Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, currentTime time.Time) (map[string]*framework.NodeInfo, errors.AutoscalerError) {
51-
nodeInfos, err := p.templateNodeInfoProvider.Process(ctx, nodes, daemonsets, taintConfig, currentTime)
50+
func (p *AnnotationNodeInfoProvider) Process(ctx *context.AutoscalingContext, readyNodes []*apiv1.Node, allNodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, currentTime time.Time) (map[string]*framework.NodeInfo, errors.AutoscalerError) {
51+
nodeInfos, err := p.templateNodeInfoProvider.Process(ctx, readyNodes, allNodes, daemonsets, taintConfig, currentTime)
5252
if err != nil {
5353
return nil, err
5454
}

cluster-autoscaler/processors/nodeinfosprovider/asg_tag_resource_node_info_provider.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,8 @@ func NewAsgTagResourceNodeInfoProvider(t *time.Duration, forceDaemonSets bool) *
4040
}
4141

4242
// Process returns the nodeInfos set for this cluster.
43-
func (p *AsgTagResourceNodeInfoProvider) Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, currentTime time.Time) (map[string]*framework.NodeInfo, errors.AutoscalerError) {
44-
nodeInfos, err := p.mixedTemplateNodeInfoProvider.Process(ctx, nodes, daemonsets, taintConfig, currentTime)
43+
func (p *AsgTagResourceNodeInfoProvider) Process(ctx *context.AutoscalingContext, readyNodes []*apiv1.Node, allNodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, currentTime time.Time) (map[string]*framework.NodeInfo, errors.AutoscalerError) {
44+
nodeInfos, err := p.mixedTemplateNodeInfoProvider.Process(ctx, readyNodes, allNodes, daemonsets, taintConfig, currentTime)
4545
if err != nil {
4646
return nil, err
4747
}

cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor.go

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ func (p *MixedTemplateNodeInfoProvider) CleanUp() {
7272
}
7373

7474
// Process returns the nodeInfos set for this cluster
75-
func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, now time.Time) (map[string]*framework.NodeInfo, caerror.AutoscalerError) {
75+
func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, readyNodes []*apiv1.Node, allNodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, now time.Time) (map[string]*framework.NodeInfo, caerror.AutoscalerError) {
7676
// TODO(mwielgus): This returns map keyed by url, while most code (including scheduler) uses node.Name for a key.
7777
// TODO(mwielgus): Review error policy - sometimes we may continue with partial errors.
7878
result := make(map[string]*framework.NodeInfo)
@@ -103,7 +103,7 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext,
103103
return false, "", nil
104104
}
105105

106-
for _, node := range nodes {
106+
for _, node := range readyNodes {
107107
// Broken nodes might have some stuff missing. Skipping.
108108
if !isNodeGoodTemplateCandidate(node, now) {
109109
continue
@@ -156,7 +156,10 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext,
156156
}
157157

158158
// Last resort - unready/unschedulable nodes.
159-
for _, node := range nodes {
159+
// we want to check not only the ready nodes, but also ready unschedulable nodes.
160+
// this needs to combine readyNodes and allNodes due to filtering that occurs at
161+
// a higher level.
162+
for _, node := range append(readyNodes, allNodes...) {
160163
// Allowing broken nodes
161164
if isNodeGoodTemplateCandidate(node, now) {
162165
continue

cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ func TestGetNodeInfosForGroups(t *testing.T) {
9595
ListerRegistry: registry,
9696
},
9797
}
98-
res, err := NewMixedTemplateNodeInfoProvider(&cacheTtl, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
98+
res, err := NewMixedTemplateNodeInfoProvider(&cacheTtl, false).Process(&ctx, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
9999
assert.NoError(t, err)
100100
assert.Equal(t, 6, len(res))
101101
info, found := res["ng1"]
@@ -125,7 +125,7 @@ func TestGetNodeInfosForGroups(t *testing.T) {
125125
ListerRegistry: registry,
126126
},
127127
}
128-
res, err = NewMixedTemplateNodeInfoProvider(&cacheTtl, false).Process(&ctx, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
128+
res, err = NewMixedTemplateNodeInfoProvider(&cacheTtl, false).Process(&ctx, []*apiv1.Node{}, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
129129
assert.NoError(t, err)
130130
assert.Equal(t, 0, len(res))
131131
}
@@ -184,7 +184,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
184184
},
185185
}
186186
niProcessor := NewMixedTemplateNodeInfoProvider(&cacheTtl, false)
187-
res, err := niProcessor.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
187+
res, err := niProcessor.Process(&ctx, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
188188
assert.NoError(t, err)
189189
// Check results
190190
assert.Equal(t, 4, len(res))
@@ -218,7 +218,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
218218
assert.Equal(t, "ng3", lastDeletedGroup)
219219

220220
// Check cache with all nodes removed
221-
res, err = niProcessor.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
221+
res, err = niProcessor.Process(&ctx, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
222222
assert.NoError(t, err)
223223
// Check results
224224
assert.Equal(t, 2, len(res))
@@ -239,7 +239,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
239239
// Fill cache manually
240240
infoNg4Node6 := framework.NewTestNodeInfo(ready6.DeepCopy())
241241
niProcessor.nodeInfoCache = map[string]cacheItem{"ng4": {NodeInfo: infoNg4Node6, added: now}}
242-
res, err = niProcessor.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
242+
res, err = niProcessor.Process(&ctx, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
243243
// Check if cache was used
244244
assert.NoError(t, err)
245245
assert.Equal(t, 2, len(res))
@@ -285,7 +285,7 @@ func TestGetNodeInfosCacheExpired(t *testing.T) {
285285
provider.AddNode("ng1", ready1)
286286

287287
assert.Equal(t, 2, len(niProcessor1.nodeInfoCache))
288-
_, err = niProcessor1.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
288+
_, err = niProcessor1.Process(&ctx, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
289289
assert.NoError(t, err)
290290
assert.Equal(t, 1, len(niProcessor1.nodeInfoCache))
291291

@@ -296,7 +296,7 @@ func TestGetNodeInfosCacheExpired(t *testing.T) {
296296
"ng2": {NodeInfo: tni, added: now.Add(-2 * time.Second)},
297297
}
298298
assert.Equal(t, 2, len(niProcessor2.nodeInfoCache))
299-
_, err = niProcessor1.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
299+
_, err = niProcessor1.Process(&ctx, nodes, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
300300
assert.NoError(t, err)
301301
assert.Equal(t, 2, len(niProcessor2.nodeInfoCache))
302302

@@ -319,7 +319,7 @@ func TestProcessHandlesTemplateNodeInfoErrors(t *testing.T) {
319319
ClusterSnapshot: testsnapshot.NewTestSnapshotOrDie(t),
320320
}
321321

322-
res, err := NewMixedTemplateNodeInfoProvider(&cacheTtl, false).Process(&ctx, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
322+
res, err := NewMixedTemplateNodeInfoProvider(&cacheTtl, false).Process(&ctx, []*apiv1.Node{}, []*apiv1.Node{}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
323323

324324
// Should not fail despite ng1 error - continues processing
325325
assert.NoError(t, err)

cluster-autoscaler/processors/nodeinfosprovider/node_info_provider_processor.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ import (
3131
// TemplateNodeInfoProvider is provides the initial nodeInfos set.
3232
type TemplateNodeInfoProvider interface {
3333
// Process returns a map of nodeInfos for node groups.
34-
Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, currentTime time.Time) (map[string]*framework.NodeInfo, errors.AutoscalerError)
34+
Process(ctx *context.AutoscalingContext, readyNodes []*apiv1.Node, allNodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, currentTime time.Time) (map[string]*framework.NodeInfo, errors.AutoscalerError)
3535
// CleanUp cleans up processor's internal structures.
3636
CleanUp()
3737
}

0 commit comments

Comments
 (0)