@@ -154,13 +154,13 @@ func (g *genericScheduler) Schedule(ctx context.Context, prof *profile.Profile,
154
154
}
155
155
156
156
startPredicateEvalTime := time .Now ()
157
- filteredNodes , filteredNodesStatuses , err := g .findNodesThatFitPod (ctx , prof , state , pod )
157
+ feasibleNodes , filteredNodesStatuses , err := g .findNodesThatFitPod (ctx , prof , state , pod )
158
158
if err != nil {
159
159
return result , err
160
160
}
161
161
trace .Step ("Computing predicates done" )
162
162
163
- if len (filteredNodes ) == 0 {
163
+ if len (feasibleNodes ) == 0 {
164
164
return result , & FitError {
165
165
Pod : pod ,
166
166
NumAllNodes : g .nodeInfoSnapshot .NumNodes (),
@@ -173,16 +173,16 @@ func (g *genericScheduler) Schedule(ctx context.Context, prof *profile.Profile,
173
173
174
174
startPriorityEvalTime := time .Now ()
175
175
// When only one node after predicate, just use it.
176
- if len (filteredNodes ) == 1 {
176
+ if len (feasibleNodes ) == 1 {
177
177
metrics .DeprecatedSchedulingAlgorithmPriorityEvaluationSecondsDuration .Observe (metrics .SinceInSeconds (startPriorityEvalTime ))
178
178
return ScheduleResult {
179
- SuggestedHost : filteredNodes [0 ].Name ,
179
+ SuggestedHost : feasibleNodes [0 ].Name ,
180
180
EvaluatedNodes : 1 + len (filteredNodesStatuses ),
181
181
FeasibleNodes : 1 ,
182
182
}, nil
183
183
}
184
184
185
- priorityList , err := g .prioritizeNodes (ctx , prof , state , pod , filteredNodes )
185
+ priorityList , err := g .prioritizeNodes (ctx , prof , state , pod , feasibleNodes )
186
186
if err != nil {
187
187
return result , err
188
188
}
@@ -195,8 +195,8 @@ func (g *genericScheduler) Schedule(ctx context.Context, prof *profile.Profile,
195
195
196
196
return ScheduleResult {
197
197
SuggestedHost : host ,
198
- EvaluatedNodes : len (filteredNodes ) + len (filteredNodesStatuses ),
199
- FeasibleNodes : len (filteredNodes ),
198
+ EvaluatedNodes : len (feasibleNodes ) + len (filteredNodesStatuses ),
199
+ FeasibleNodes : len (feasibleNodes ),
200
200
}, err
201
201
}
202
202
@@ -256,23 +256,37 @@ func (g *genericScheduler) numFeasibleNodesToFind(numAllNodes int32) (numNodes i
256
256
// Filters the nodes to find the ones that fit the pod based on the framework
257
257
// filter plugins and filter extenders.
258
258
func (g * genericScheduler ) findNodesThatFitPod (ctx context.Context , prof * profile.Profile , state * framework.CycleState , pod * v1.Pod ) ([]* v1.Node , framework.NodeToStatusMap , error ) {
259
+ filteredNodesStatuses := make (framework.NodeToStatusMap )
260
+
259
261
// Run "prefilter" plugins.
260
262
s := prof .RunPreFilterPlugins (ctx , state , pod )
261
263
if ! s .IsSuccess () {
262
- return nil , nil , s .AsError ()
264
+ if ! s .IsUnschedulable () {
265
+ return nil , nil , s .AsError ()
266
+ }
267
+ // All nodes will have the same status. Some non trivial refactoring is
268
+ // needed to avoid this copy.
269
+ allNodes , err := g .nodeInfoSnapshot .NodeInfos ().List ()
270
+ if err != nil {
271
+ return nil , nil , err
272
+ }
273
+ for _ , n := range allNodes {
274
+ filteredNodesStatuses [n .Node ().Name ] = s
275
+ }
276
+ return nil , filteredNodesStatuses , nil
277
+
263
278
}
264
279
265
- filteredNodesStatuses := make (framework.NodeToStatusMap )
266
- filtered , err := g .findNodesThatPassFilters (ctx , prof , state , pod , filteredNodesStatuses )
280
+ feasibleNodes , err := g .findNodesThatPassFilters (ctx , prof , state , pod , filteredNodesStatuses )
267
281
if err != nil {
268
282
return nil , nil , err
269
283
}
270
284
271
- filtered , err = g .findNodesThatPassExtenders (pod , filtered , filteredNodesStatuses )
285
+ feasibleNodes , err = g .findNodesThatPassExtenders (pod , feasibleNodes , filteredNodesStatuses )
272
286
if err != nil {
273
287
return nil , nil , err
274
288
}
275
- return filtered , filteredNodesStatuses , nil
289
+ return feasibleNodes , filteredNodesStatuses , nil
276
290
}
277
291
278
292
// findNodesThatPassFilters finds the nodes that fit the filter plugins.
@@ -284,22 +298,22 @@ func (g *genericScheduler) findNodesThatPassFilters(ctx context.Context, prof *p
284
298
285
299
numNodesToFind := g .numFeasibleNodesToFind (int32 (len (allNodes )))
286
300
287
- // Create filtered list with enough space to avoid growing it
301
+ // Create feasible list with enough space to avoid growing it
288
302
// and allow assigning.
289
- filtered := make ([]* v1.Node , numNodesToFind )
303
+ feasibleNodes := make ([]* v1.Node , numNodesToFind )
290
304
291
305
if ! prof .HasFilterPlugins () {
292
306
length := len (allNodes )
293
- for i := range filtered {
294
- filtered [i ] = allNodes [(g .nextStartNodeIndex + i )% length ].Node ()
307
+ for i := range feasibleNodes {
308
+ feasibleNodes [i ] = allNodes [(g .nextStartNodeIndex + i )% length ].Node ()
295
309
}
296
- g .nextStartNodeIndex = (g .nextStartNodeIndex + len (filtered )) % length
297
- return filtered , nil
310
+ g .nextStartNodeIndex = (g .nextStartNodeIndex + len (feasibleNodes )) % length
311
+ return feasibleNodes , nil
298
312
}
299
313
300
314
errCh := parallelize .NewErrorChannel ()
301
315
var statusesLock sync.Mutex
302
- var filteredLen int32
316
+ var feasibleNodesLen int32
303
317
ctx , cancel := context .WithCancel (ctx )
304
318
checkNode := func (i int ) {
305
319
// We check the nodes starting from where we left off in the previous scheduling cycle,
@@ -311,12 +325,12 @@ func (g *genericScheduler) findNodesThatPassFilters(ctx context.Context, prof *p
311
325
return
312
326
}
313
327
if fits {
314
- length := atomic .AddInt32 (& filteredLen , 1 )
328
+ length := atomic .AddInt32 (& feasibleNodesLen , 1 )
315
329
if length > numNodesToFind {
316
330
cancel ()
317
- atomic .AddInt32 (& filteredLen , - 1 )
331
+ atomic .AddInt32 (& feasibleNodesLen , - 1 )
318
332
} else {
319
- filtered [length - 1 ] = nodeInfo .Node ()
333
+ feasibleNodes [length - 1 ] = nodeInfo .Node ()
320
334
}
321
335
} else {
322
336
statusesLock .Lock ()
@@ -339,26 +353,26 @@ func (g *genericScheduler) findNodesThatPassFilters(ctx context.Context, prof *p
339
353
// Stops searching for more nodes once the configured number of feasible nodes
340
354
// are found.
341
355
parallelize .Until (ctx , len (allNodes ), checkNode )
342
- processedNodes := int (filteredLen ) + len (statuses )
356
+ processedNodes := int (feasibleNodesLen ) + len (statuses )
343
357
g .nextStartNodeIndex = (g .nextStartNodeIndex + processedNodes ) % len (allNodes )
344
358
345
- filtered = filtered [: filteredLen ]
359
+ feasibleNodes = feasibleNodes [: feasibleNodesLen ]
346
360
if err := errCh .ReceiveError (); err != nil {
347
361
statusCode = framework .Error
348
362
return nil , err
349
363
}
350
- return filtered , nil
364
+ return feasibleNodes , nil
351
365
}
352
366
353
- func (g * genericScheduler ) findNodesThatPassExtenders (pod * v1.Pod , filtered []* v1.Node , statuses framework.NodeToStatusMap ) ([]* v1.Node , error ) {
367
+ func (g * genericScheduler ) findNodesThatPassExtenders (pod * v1.Pod , feasibleNodes []* v1.Node , statuses framework.NodeToStatusMap ) ([]* v1.Node , error ) {
354
368
for _ , extender := range g .extenders {
355
- if len (filtered ) == 0 {
369
+ if len (feasibleNodes ) == 0 {
356
370
break
357
371
}
358
372
if ! extender .IsInterested (pod ) {
359
373
continue
360
374
}
361
- filteredList , failedMap , err := extender .Filter (pod , filtered )
375
+ feasibleList , failedMap , err := extender .Filter (pod , feasibleNodes )
362
376
if err != nil {
363
377
if extender .IsIgnorable () {
364
378
klog .Warningf ("Skipping extender %v as it returned error %v and has ignorable flag set" ,
@@ -375,9 +389,9 @@ func (g *genericScheduler) findNodesThatPassExtenders(pod *v1.Pod, filtered []*v
375
389
statuses [failedNodeName ].AppendReason (failedMsg )
376
390
}
377
391
}
378
- filtered = filteredList
392
+ feasibleNodes = feasibleList
379
393
}
380
- return filtered , nil
394
+ return feasibleNodes , nil
381
395
}
382
396
383
397
// addNominatedPods adds pods with equal or greater priority which are nominated
0 commit comments