@@ -66,15 +66,10 @@ const (
66
66
minFeasibleNodesPercentageToFind = 5
67
67
)
68
68
69
- // FailedPredicateMap declares a map[string][]algorithm.PredicateFailureReason type.
70
- type FailedPredicateMap map [string ][]predicates.PredicateFailureReason
71
-
72
69
// FitError describes a fit error of a pod.
73
70
type FitError struct {
74
- Pod * v1.Pod
75
- NumAllNodes int
76
- // TODO(Huang-Wei): remove 'FailedPredicates'
77
- FailedPredicates FailedPredicateMap
71
+ Pod * v1.Pod
72
+ NumAllNodes int
78
73
FilteredNodesStatuses framework.NodeToStatusMap
79
74
}
80
75
@@ -89,20 +84,14 @@ const (
89
84
// Error returns detailed information of why the pod failed to fit on each node
90
85
func (f * FitError ) Error () string {
91
86
reasons := make (map [string ]int )
92
- for _ , predicates := range f .FailedPredicates {
93
- for _ , pred := range predicates {
94
- reasons [pred .GetReason ()]++
95
- }
96
- }
97
-
98
87
for _ , status := range f .FilteredNodesStatuses {
99
88
for _ , reason := range status .Reasons () {
100
89
reasons [reason ]++
101
90
}
102
91
}
103
92
104
93
sortReasonsHistogram := func () []string {
105
- reasonStrings := []string {}
94
+ var reasonStrings []string
106
95
for k , v := range reasons {
107
96
reasonStrings = append (reasonStrings , fmt .Sprintf ("%v %v" , v , k ))
108
97
}
@@ -210,7 +199,7 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
210
199
trace .Step ("Running prefilter plugins done" )
211
200
212
201
startPredicateEvalTime := time .Now ()
213
- filteredNodes , failedPredicateMap , filteredNodesStatuses , err := g .findNodesThatFit (ctx , state , pod )
202
+ filteredNodes , filteredNodesStatuses , err := g .findNodesThatFit (ctx , state , pod )
214
203
if err != nil {
215
204
return result , err
216
205
}
@@ -226,7 +215,6 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
226
215
return result , & FitError {
227
216
Pod : pod ,
228
217
NumAllNodes : len (g .nodeInfoSnapshot .NodeInfoList ),
229
- FailedPredicates : failedPredicateMap ,
230
218
FilteredNodesStatuses : filteredNodesStatuses ,
231
219
}
232
220
}
@@ -243,7 +231,7 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
243
231
metrics .DeprecatedSchedulingAlgorithmPriorityEvaluationDuration .Observe (metrics .SinceInMicroseconds (startPriorityEvalTime ))
244
232
return ScheduleResult {
245
233
SuggestedHost : filteredNodes [0 ].Name ,
246
- EvaluatedNodes : 1 + len (failedPredicateMap ) + len ( filteredNodesStatuses ),
234
+ EvaluatedNodes : 1 + len (filteredNodesStatuses ),
247
235
FeasibleNodes : 1 ,
248
236
}, nil
249
237
}
@@ -264,7 +252,7 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
264
252
265
253
return ScheduleResult {
266
254
SuggestedHost : host ,
267
- EvaluatedNodes : len (filteredNodes ) + len (failedPredicateMap ) + len ( filteredNodesStatuses ),
255
+ EvaluatedNodes : len (filteredNodes ) + len (filteredNodesStatuses ),
268
256
FeasibleNodes : len (filteredNodes ),
269
257
}, err
270
258
}
@@ -471,10 +459,8 @@ func (g *genericScheduler) numFeasibleNodesToFind(numAllNodes int32) (numNodes i
471
459
472
460
// Filters the nodes to find the ones that fit based on the given predicate functions
473
461
// Each node is passed through the predicate functions to determine if it is a fit
474
- // TODO(Huang-Wei): remove 'FailedPredicateMap' from the return parameters.
475
- func (g * genericScheduler ) findNodesThatFit (ctx context.Context , state * framework.CycleState , pod * v1.Pod ) ([]* v1.Node , FailedPredicateMap , framework.NodeToStatusMap , error ) {
462
+ func (g * genericScheduler ) findNodesThatFit (ctx context.Context , state * framework.CycleState , pod * v1.Pod ) ([]* v1.Node , framework.NodeToStatusMap , error ) {
476
463
var filtered []* v1.Node
477
- failedPredicateMap := FailedPredicateMap {}
478
464
filteredNodesStatuses := framework.NodeToStatusMap {}
479
465
480
466
if ! g .framework .HasFilterPlugins () {
@@ -497,7 +483,7 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
497
483
// We check the nodes starting from where we left off in the previous scheduling cycle,
498
484
// this is to make sure all nodes have the same chance of being examined across pods.
499
485
nodeInfo := g .nodeInfoSnapshot .NodeInfoList [(g .nextStartNodeIndex + i )% allNodes ]
500
- fits , _ , status , err := g .podFitsOnNode (ctx , state , pod , nodeInfo )
486
+ fits , status , err := g .podFitsOnNode (ctx , state , pod , nodeInfo )
501
487
if err != nil {
502
488
errCh .SendErrorWithCancel (err , cancel )
503
489
return
@@ -522,12 +508,12 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
522
508
// Stops searching for more nodes once the configured number of feasible nodes
523
509
// are found.
524
510
workqueue .ParallelizeUntil (ctx , 16 , allNodes , checkNode )
525
- processedNodes := int (filteredLen ) + len (filteredNodesStatuses ) + len ( failedPredicateMap )
511
+ processedNodes := int (filteredLen ) + len (filteredNodesStatuses )
526
512
g .nextStartNodeIndex = (g .nextStartNodeIndex + processedNodes ) % allNodes
527
513
528
514
filtered = filtered [:filteredLen ]
529
515
if err := errCh .ReceiveError (); err != nil {
530
- return []* v1.Node {}, FailedPredicateMap {}, framework.NodeToStatusMap {}, err
516
+ return []* v1.Node {}, framework.NodeToStatusMap {}, err
531
517
}
532
518
}
533
519
@@ -544,23 +530,23 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
544
530
continue
545
531
}
546
532
547
- return []* v1.Node {}, FailedPredicateMap {}, framework.NodeToStatusMap {}, err
533
+ return []* v1.Node {}, framework.NodeToStatusMap {}, err
548
534
}
549
535
550
- // TODO(Huang-Wei): refactor this to fill 'filteredNodesStatuses' instead of 'failedPredicateMap'.
551
536
for failedNodeName , failedMsg := range failedMap {
552
- if _ , found := failedPredicateMap [failedNodeName ]; ! found {
553
- failedPredicateMap [failedNodeName ] = []predicates.PredicateFailureReason {}
537
+ if _ , found := filteredNodesStatuses [failedNodeName ]; ! found {
538
+ filteredNodesStatuses [failedNodeName ] = framework .NewStatus (framework .Unschedulable , failedMsg )
539
+ } else {
540
+ filteredNodesStatuses [failedNodeName ].AppendReason (failedMsg )
554
541
}
555
- failedPredicateMap [failedNodeName ] = append (failedPredicateMap [failedNodeName ], predicates .NewPredicateFailureError (extender .Name (), failedMsg ))
556
542
}
557
543
filtered = filteredList
558
544
if len (filtered ) == 0 {
559
545
break
560
546
}
561
547
}
562
548
}
563
- return filtered , failedPredicateMap , filteredNodesStatuses , nil
549
+ return filtered , filteredNodesStatuses , nil
564
550
}
565
551
566
552
// addNominatedPods adds pods with equal or greater priority which are nominated
@@ -607,8 +593,7 @@ func (g *genericScheduler) podFitsOnNode(
607
593
state * framework.CycleState ,
608
594
pod * v1.Pod ,
609
595
info * schedulernodeinfo.NodeInfo ,
610
- ) (bool , []predicates.PredicateFailureReason , * framework.Status , error ) {
611
- var failedPredicates []predicates.PredicateFailureReason
596
+ ) (bool , * framework.Status , error ) {
612
597
var status * framework.Status
613
598
614
599
podsAdded := false
@@ -637,19 +622,19 @@ func (g *genericScheduler) podFitsOnNode(
637
622
var err error
638
623
podsAdded , stateToUse , nodeInfoToUse , err = g .addNominatedPods (ctx , pod , state , info )
639
624
if err != nil {
640
- return false , []predicates. PredicateFailureReason {}, nil , err
625
+ return false , nil , err
641
626
}
642
- } else if ! podsAdded || len ( failedPredicates ) != 0 || ! status .IsSuccess () {
627
+ } else if ! podsAdded || ! status .IsSuccess () {
643
628
break
644
629
}
645
630
646
631
status = g .framework .RunFilterPlugins (ctx , stateToUse , pod , nodeInfoToUse )
647
632
if ! status .IsSuccess () && ! status .IsUnschedulable () {
648
- return false , failedPredicates , status , status .AsError ()
633
+ return false , status , status .AsError ()
649
634
}
650
635
}
651
636
652
- return len ( failedPredicates ) == 0 && status .IsSuccess (), failedPredicates , status , nil
637
+ return status .IsSuccess (), status , nil
653
638
}
654
639
655
640
// prioritizeNodes prioritizes the nodes by running the score plugins,
@@ -1012,7 +997,7 @@ func (g *genericScheduler) selectVictimsOnNode(
1012
997
// inter-pod affinity to one or more victims, but we have decided not to
1013
998
// support this case for performance reasons. Having affinity to lower
1014
999
// priority pods is not a recommended configuration anyway.
1015
- if fits , _ , _ , err := g .podFitsOnNode (ctx , state , pod , nodeInfo ); ! fits {
1000
+ if fits , _ , err := g .podFitsOnNode (ctx , state , pod , nodeInfo ); ! fits {
1016
1001
if err != nil {
1017
1002
klog .Warningf ("Encountered error while selecting victims on node %v: %v" , nodeInfo .Node ().Name , err )
1018
1003
}
@@ -1030,7 +1015,7 @@ func (g *genericScheduler) selectVictimsOnNode(
1030
1015
if err := addPod (p ); err != nil {
1031
1016
return false , err
1032
1017
}
1033
- fits , _ , _ , _ := g .podFitsOnNode (ctx , state , pod , nodeInfo )
1018
+ fits , _ , _ := g .podFitsOnNode (ctx , state , pod , nodeInfo )
1034
1019
if ! fits {
1035
1020
if err := removePod (p ); err != nil {
1036
1021
return false , err
@@ -1061,22 +1046,15 @@ func (g *genericScheduler) selectVictimsOnNode(
1061
1046
// nodesWherePreemptionMightHelp returns a list of nodes with failed predicates
1062
1047
// that may be satisfied by removing pods from the node.
1063
1048
func nodesWherePreemptionMightHelp (nodeNameToInfo map [string ]* schedulernodeinfo.NodeInfo , fitErr * FitError ) []* v1.Node {
1064
- potentialNodes := []* v1.Node {}
1049
+ var potentialNodes []* v1.Node
1065
1050
for name , node := range nodeNameToInfo {
1051
+ // We reply on the status by each plugin - 'Unschedulable' or 'UnschedulableAndUnresolvable'
1052
+ // to determine whether preemption may help or not on the node.
1066
1053
if fitErr .FilteredNodesStatuses [name ].Code () == framework .UnschedulableAndUnresolvable {
1067
1054
continue
1068
1055
}
1069
- failedPredicates := fitErr .FailedPredicates [name ]
1070
-
1071
- // If we assume that scheduler looks at all nodes and populates the failedPredicateMap
1072
- // (which is the case today), the !found case should never happen, but we'd prefer
1073
- // to rely less on such assumptions in the code when checking does not impose
1074
- // significant overhead.
1075
- // Also, we currently assume all failures returned by extender as resolvable.
1076
- if ! predicates .UnresolvablePredicateExists (failedPredicates ) {
1077
- klog .V (3 ).Infof ("Node %v is a potential node for preemption." , name )
1078
- potentialNodes = append (potentialNodes , node .Node ())
1079
- }
1056
+ klog .V (3 ).Infof ("Node %v is a potential node for preemption." , name )
1057
+ potentialNodes = append (potentialNodes , node .Node ())
1080
1058
}
1081
1059
return potentialNodes
1082
1060
}
0 commit comments