@@ -151,13 +151,13 @@ func (g *genericScheduler) Schedule(ctx context.Context, prof *profile.Profile,
151
151
}
152
152
153
153
startPredicateEvalTime := time .Now ()
154
- filteredNodes , filteredNodesStatuses , err := g .findNodesThatFitPod (ctx , prof , state , pod )
154
+ feasibleNodes , filteredNodesStatuses , err := g .findNodesThatFitPod (ctx , prof , state , pod )
155
155
if err != nil {
156
156
return result , err
157
157
}
158
158
trace .Step ("Computing predicates done" )
159
159
160
- if len (filteredNodes ) == 0 {
160
+ if len (feasibleNodes ) == 0 {
161
161
return result , & FitError {
162
162
Pod : pod ,
163
163
NumAllNodes : g .nodeInfoSnapshot .NumNodes (),
@@ -170,16 +170,16 @@ func (g *genericScheduler) Schedule(ctx context.Context, prof *profile.Profile,
170
170
171
171
startPriorityEvalTime := time .Now ()
172
172
// When only one node after predicate, just use it.
173
- if len (filteredNodes ) == 1 {
173
+ if len (feasibleNodes ) == 1 {
174
174
metrics .DeprecatedSchedulingAlgorithmPriorityEvaluationSecondsDuration .Observe (metrics .SinceInSeconds (startPriorityEvalTime ))
175
175
return ScheduleResult {
176
- SuggestedHost : filteredNodes [0 ].Name ,
176
+ SuggestedHost : feasibleNodes [0 ].Name ,
177
177
EvaluatedNodes : 1 + len (filteredNodesStatuses ),
178
178
FeasibleNodes : 1 ,
179
179
}, nil
180
180
}
181
181
182
- priorityList , err := g .prioritizeNodes (ctx , prof , state , pod , filteredNodes )
182
+ priorityList , err := g .prioritizeNodes (ctx , prof , state , pod , feasibleNodes )
183
183
if err != nil {
184
184
return result , err
185
185
}
@@ -192,8 +192,8 @@ func (g *genericScheduler) Schedule(ctx context.Context, prof *profile.Profile,
192
192
193
193
return ScheduleResult {
194
194
SuggestedHost : host ,
195
- EvaluatedNodes : len (filteredNodes ) + len (filteredNodesStatuses ),
196
- FeasibleNodes : len (filteredNodes ),
195
+ EvaluatedNodes : len (feasibleNodes ) + len (filteredNodesStatuses ),
196
+ FeasibleNodes : len (feasibleNodes ),
197
197
}, err
198
198
}
199
199
@@ -253,23 +253,37 @@ func (g *genericScheduler) numFeasibleNodesToFind(numAllNodes int32) (numNodes i
253
253
// Filters the nodes to find the ones that fit the pod based on the framework
254
254
// filter plugins and filter extenders.
255
255
func (g * genericScheduler ) findNodesThatFitPod (ctx context.Context , prof * profile.Profile , state * framework.CycleState , pod * v1.Pod ) ([]* v1.Node , framework.NodeToStatusMap , error ) {
256
+ filteredNodesStatuses := make (framework.NodeToStatusMap )
257
+
256
258
// Run "prefilter" plugins.
257
259
s := prof .RunPreFilterPlugins (ctx , state , pod )
258
260
if ! s .IsSuccess () {
259
- return nil , nil , s .AsError ()
261
+ if ! s .IsUnschedulable () {
262
+ return nil , nil , s .AsError ()
263
+ }
264
+ // All nodes will have the same status. Some non trivial refactoring is
265
+ // needed to avoid this copy.
266
+ allNodes , err := g .nodeInfoSnapshot .NodeInfos ().List ()
267
+ if err != nil {
268
+ return nil , nil , err
269
+ }
270
+ for _ , n := range allNodes {
271
+ filteredNodesStatuses [n .Node ().Name ] = s
272
+ }
273
+ return nil , filteredNodesStatuses , nil
274
+
260
275
}
261
276
262
- filteredNodesStatuses := make (framework.NodeToStatusMap )
263
- filtered , err := g .findNodesThatPassFilters (ctx , prof , state , pod , filteredNodesStatuses )
277
+ feasibleNodes , err := g .findNodesThatPassFilters (ctx , prof , state , pod , filteredNodesStatuses )
264
278
if err != nil {
265
279
return nil , nil , err
266
280
}
267
281
268
- filtered , err = g .findNodesThatPassExtenders (pod , filtered , filteredNodesStatuses )
282
+ feasibleNodes , err = g .findNodesThatPassExtenders (pod , feasibleNodes , filteredNodesStatuses )
269
283
if err != nil {
270
284
return nil , nil , err
271
285
}
272
- return filtered , filteredNodesStatuses , nil
286
+ return feasibleNodes , filteredNodesStatuses , nil
273
287
}
274
288
275
289
// findNodesThatPassFilters finds the nodes that fit the filter plugins.
@@ -281,22 +295,22 @@ func (g *genericScheduler) findNodesThatPassFilters(ctx context.Context, prof *p
281
295
282
296
numNodesToFind := g .numFeasibleNodesToFind (int32 (len (allNodes )))
283
297
284
- // Create filtered list with enough space to avoid growing it
298
+ // Create feasible list with enough space to avoid growing it
285
299
// and allow assigning.
286
- filtered := make ([]* v1.Node , numNodesToFind )
300
+ feasibleNodes := make ([]* v1.Node , numNodesToFind )
287
301
288
302
if ! prof .HasFilterPlugins () {
289
303
length := len (allNodes )
290
- for i := range filtered {
291
- filtered [i ] = allNodes [(g .nextStartNodeIndex + i )% length ].Node ()
304
+ for i := range feasibleNodes {
305
+ feasibleNodes [i ] = allNodes [(g .nextStartNodeIndex + i )% length ].Node ()
292
306
}
293
- g .nextStartNodeIndex = (g .nextStartNodeIndex + len (filtered )) % length
294
- return filtered , nil
307
+ g .nextStartNodeIndex = (g .nextStartNodeIndex + len (feasibleNodes )) % length
308
+ return feasibleNodes , nil
295
309
}
296
310
297
311
errCh := parallelize .NewErrorChannel ()
298
312
var statusesLock sync.Mutex
299
- var filteredLen int32
313
+ var feasibleNodesLen int32
300
314
ctx , cancel := context .WithCancel (ctx )
301
315
checkNode := func (i int ) {
302
316
// We check the nodes starting from where we left off in the previous scheduling cycle,
@@ -308,12 +322,12 @@ func (g *genericScheduler) findNodesThatPassFilters(ctx context.Context, prof *p
308
322
return
309
323
}
310
324
if fits {
311
- length := atomic .AddInt32 (& filteredLen , 1 )
325
+ length := atomic .AddInt32 (& feasibleNodesLen , 1 )
312
326
if length > numNodesToFind {
313
327
cancel ()
314
- atomic .AddInt32 (& filteredLen , - 1 )
328
+ atomic .AddInt32 (& feasibleNodesLen , - 1 )
315
329
} else {
316
- filtered [length - 1 ] = nodeInfo .Node ()
330
+ feasibleNodes [length - 1 ] = nodeInfo .Node ()
317
331
}
318
332
} else {
319
333
statusesLock .Lock ()
@@ -336,26 +350,26 @@ func (g *genericScheduler) findNodesThatPassFilters(ctx context.Context, prof *p
336
350
// Stops searching for more nodes once the configured number of feasible nodes
337
351
// are found.
338
352
parallelize .Until (ctx , len (allNodes ), checkNode )
339
- processedNodes := int (filteredLen ) + len (statuses )
353
+ processedNodes := int (feasibleNodesLen ) + len (statuses )
340
354
g .nextStartNodeIndex = (g .nextStartNodeIndex + processedNodes ) % len (allNodes )
341
355
342
- filtered = filtered [: filteredLen ]
356
+ feasibleNodes = feasibleNodes [: feasibleNodesLen ]
343
357
if err := errCh .ReceiveError (); err != nil {
344
358
statusCode = framework .Error
345
359
return nil , err
346
360
}
347
- return filtered , nil
361
+ return feasibleNodes , nil
348
362
}
349
363
350
- func (g * genericScheduler ) findNodesThatPassExtenders (pod * v1.Pod , filtered []* v1.Node , statuses framework.NodeToStatusMap ) ([]* v1.Node , error ) {
364
+ func (g * genericScheduler ) findNodesThatPassExtenders (pod * v1.Pod , feasibleNodes []* v1.Node , statuses framework.NodeToStatusMap ) ([]* v1.Node , error ) {
351
365
for _ , extender := range g .extenders {
352
- if len (filtered ) == 0 {
366
+ if len (feasibleNodes ) == 0 {
353
367
break
354
368
}
355
369
if ! extender .IsInterested (pod ) {
356
370
continue
357
371
}
358
- filteredList , failedMap , err := extender .Filter (pod , filtered )
372
+ feasibleList , failedMap , err := extender .Filter (pod , feasibleNodes )
359
373
if err != nil {
360
374
if extender .IsIgnorable () {
361
375
klog .Warningf ("Skipping extender %v as it returned error %v and has ignorable flag set" ,
@@ -372,9 +386,9 @@ func (g *genericScheduler) findNodesThatPassExtenders(pod *v1.Pod, filtered []*v
372
386
statuses [failedNodeName ].AppendReason (failedMsg )
373
387
}
374
388
}
375
- filtered = filteredList
389
+ feasibleNodes = feasibleList
376
390
}
377
- return filtered , nil
391
+ return feasibleNodes , nil
378
392
}
379
393
380
394
// addNominatedPods adds pods with equal or greater priority which are nominated
0 commit comments