@@ -105,7 +105,7 @@ type ScheduleAlgorithm interface {
105
105
// the pod by preempting lower priority pods if possible.
106
106
// It returns the node where preemption happened, a list of preempted pods, a
107
107
// list of pods whose nominated node name should be removed, and error if any.
108
- Preempt (context.Context , * profile.Profile , * framework.CycleState , * v1.Pod , error ) (selectedNode * v1. Node , preemptedPods []* v1.Pod , cleanupNominatedPods []* v1.Pod , err error )
108
+ Preempt (context.Context , * profile.Profile , * framework.CycleState , * v1.Pod , error ) (selectedNode string , preemptedPods []* v1.Pod , cleanupNominatedPods []* v1.Pod , err error )
109
109
// Extenders returns a slice of extender config. This is exposed for
110
110
// testing.
111
111
Extenders () []SchedulerExtender
@@ -251,74 +251,74 @@ func (g *genericScheduler) selectHost(nodeScoreList framework.NodeScoreList) (st
251
251
// other pods with the same priority. The nominated pod prevents other pods from
252
252
// using the nominated resources and the nominated pod could take a long time
253
253
// before it is retried after many other pending pods.
254
- func (g * genericScheduler ) Preempt (ctx context.Context , prof * profile.Profile , state * framework.CycleState , pod * v1.Pod , scheduleErr error ) (* v1. Node , []* v1.Pod , []* v1.Pod , error ) {
254
+ func (g * genericScheduler ) Preempt (ctx context.Context , prof * profile.Profile , state * framework.CycleState , pod * v1.Pod , scheduleErr error ) (string , []* v1.Pod , []* v1.Pod , error ) {
255
255
// Scheduler may return various types of errors. Consider preemption only if
256
256
// the error is of type FitError.
257
257
fitError , ok := scheduleErr .(* FitError )
258
258
if ! ok || fitError == nil {
259
- return nil , nil , nil , nil
259
+ return "" , nil , nil , nil
260
260
}
261
261
if ! podEligibleToPreemptOthers (pod , g .nodeInfoSnapshot .NodeInfos ()) {
262
262
klog .V (5 ).Infof ("Pod %v/%v is not eligible for more preemption." , pod .Namespace , pod .Name )
263
- return nil , nil , nil , nil
263
+ return "" , nil , nil , nil
264
264
}
265
265
allNodes , err := g .nodeInfoSnapshot .NodeInfos ().List ()
266
266
if err != nil {
267
- return nil , nil , nil , err
267
+ return "" , nil , nil , err
268
268
}
269
269
if len (allNodes ) == 0 {
270
- return nil , nil , nil , ErrNoNodesAvailable
270
+ return "" , nil , nil , ErrNoNodesAvailable
271
271
}
272
272
potentialNodes := nodesWherePreemptionMightHelp (allNodes , fitError )
273
273
if len (potentialNodes ) == 0 {
274
274
klog .V (3 ).Infof ("Preemption will not help schedule pod %v/%v on any node." , pod .Namespace , pod .Name )
275
275
// In this case, we should clean-up any existing nominated node name of the pod.
276
- return nil , nil , []* v1.Pod {pod }, nil
276
+ return "" , nil , []* v1.Pod {pod }, nil
277
277
}
278
278
var pdbs []* policy.PodDisruptionBudget
279
279
if g .pdbLister != nil {
280
280
pdbs , err = g .pdbLister .List (labels .Everything ())
281
281
if err != nil {
282
- return nil , nil , nil , err
282
+ return "" , nil , nil , err
283
283
}
284
284
}
285
- nodeToVictims , err := g .selectNodesForPreemption (ctx , prof , state , pod , potentialNodes , pdbs )
285
+ nodeNameToVictims , err := g .selectNodesForPreemption (ctx , prof , state , pod , potentialNodes , pdbs )
286
286
if err != nil {
287
- return nil , nil , nil , err
287
+ return "" , nil , nil , err
288
288
}
289
289
290
- // We will only check nodeToVictims with extenders that support preemption.
290
+ // We will only check nodeNameToVictims with extenders that support preemption.
291
291
// Extenders which do not support preemption may later prevent preemptor from being scheduled on the nominated
292
292
// node. In that case, scheduler will find a different host for the preemptor in subsequent scheduling cycles.
293
- nodeToVictims , err = g .processPreemptionWithExtenders (pod , nodeToVictims )
293
+ nodeNameToVictims , err = g .processPreemptionWithExtenders (pod , nodeNameToVictims )
294
294
if err != nil {
295
- return nil , nil , nil , err
295
+ return "" , nil , nil , err
296
296
}
297
297
298
- candidateNode := pickOneNodeForPreemption (nodeToVictims )
299
- if candidateNode == nil {
300
- return nil , nil , nil , nil
298
+ candidateNode := pickOneNodeForPreemption (nodeNameToVictims )
299
+ if len ( candidateNode ) == 0 {
300
+ return "" , nil , nil , nil
301
301
}
302
302
303
303
// Lower priority pods nominated to run on this node, may no longer fit on
304
304
// this node. So, we should remove their nomination. Removing their
305
305
// nomination updates these pods and moves them to the active queue. It
306
306
// lets scheduler find another place for them.
307
- nominatedPods := g .getLowerPriorityNominatedPods (pod , candidateNode . Name )
308
- return candidateNode , nodeToVictims [candidateNode ].Pods , nominatedPods , nil
307
+ nominatedPods := g .getLowerPriorityNominatedPods (pod , candidateNode )
308
+ return candidateNode , nodeNameToVictims [candidateNode ].Pods , nominatedPods , nil
309
309
}
310
310
311
311
// processPreemptionWithExtenders processes preemption with extenders
312
312
func (g * genericScheduler ) processPreemptionWithExtenders (
313
313
pod * v1.Pod ,
314
- nodeToVictims map [* v1. Node ]* extenderv1.Victims ,
315
- ) (map [* v1. Node ]* extenderv1.Victims , error ) {
316
- if len (nodeToVictims ) > 0 {
314
+ nodeNameToVictims map [string ]* extenderv1.Victims ,
315
+ ) (map [string ]* extenderv1.Victims , error ) {
316
+ if len (nodeNameToVictims ) > 0 {
317
317
for _ , extender := range g .extenders {
318
318
if extender .SupportsPreemption () && extender .IsInterested (pod ) {
319
- newNodeToVictims , err := extender .ProcessPreemption (
319
+ newNodeNameToVictims , err := extender .ProcessPreemption (
320
320
pod ,
321
- nodeToVictims ,
321
+ nodeNameToVictims ,
322
322
g .nodeInfoSnapshot .NodeInfos (),
323
323
)
324
324
if err != nil {
@@ -330,19 +330,19 @@ func (g *genericScheduler) processPreemptionWithExtenders(
330
330
return nil , err
331
331
}
332
332
333
- // Replace nodeToVictims with new result after preemption. So the
333
+ // Replace nodeNameToVictims with new result after preemption. So the
334
334
// rest of extenders can continue use it as parameter.
335
- nodeToVictims = newNodeToVictims
335
+ nodeNameToVictims = newNodeNameToVictims
336
336
337
337
// If node list becomes empty, no preemption can happen regardless of other extenders.
338
- if len (nodeToVictims ) == 0 {
338
+ if len (nodeNameToVictims ) == 0 {
339
339
break
340
340
}
341
341
}
342
342
}
343
343
}
344
344
345
- return nodeToVictims , nil
345
+ return nodeNameToVictims , nil
346
346
}
347
347
348
348
// getLowerPriorityNominatedPods returns pods whose priority is smaller than the
@@ -719,12 +719,12 @@ func (g *genericScheduler) prioritizeNodes(
719
719
// 6. If there are still ties, the first such node is picked (sort of randomly).
720
720
// The 'minNodes1' and 'minNodes2' are being reused here to save the memory
721
721
// allocation and garbage collection time.
722
- func pickOneNodeForPreemption (nodesToVictims map [* v1. Node ]* extenderv1.Victims ) * v1. Node {
722
+ func pickOneNodeForPreemption (nodesToVictims map [string ]* extenderv1.Victims ) string {
723
723
if len (nodesToVictims ) == 0 {
724
- return nil
724
+ return ""
725
725
}
726
726
minNumPDBViolatingPods := int64 (math .MaxInt32 )
727
- var minNodes1 []* v1. Node
727
+ var minNodes1 []string
728
728
lenNodes1 := 0
729
729
for node , victims := range nodesToVictims {
730
730
if len (victims .Pods ) == 0 {
@@ -752,7 +752,7 @@ func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*extenderv1.Victims) *
752
752
// There are more than one node with minimum number PDB violating pods. Find
753
753
// the one with minimum highest priority victim.
754
754
minHighestPriority := int32 (math .MaxInt32 )
755
- var minNodes2 = make ([]* v1. Node , lenNodes1 )
755
+ var minNodes2 = make ([]string , lenNodes1 )
756
756
lenNodes2 := 0
757
757
for i := 0 ; i < lenNodes1 ; i ++ {
758
758
node := minNodes1 [i ]
@@ -855,8 +855,8 @@ func (g *genericScheduler) selectNodesForPreemption(
855
855
pod * v1.Pod ,
856
856
potentialNodes []* framework.NodeInfo ,
857
857
pdbs []* policy.PodDisruptionBudget ,
858
- ) (map [* v1. Node ]* extenderv1.Victims , error ) {
859
- nodeToVictims := map [* v1. Node ]* extenderv1.Victims {}
858
+ ) (map [string ]* extenderv1.Victims , error ) {
859
+ nodeNameToVictims := map [string ]* extenderv1.Victims {}
860
860
var resultLock sync.Mutex
861
861
862
862
checkNode := func (i int ) {
@@ -869,12 +869,12 @@ func (g *genericScheduler) selectNodesForPreemption(
869
869
Pods : pods ,
870
870
NumPDBViolations : int64 (numPDBViolations ),
871
871
}
872
- nodeToVictims [potentialNodes [i ].Node ()] = & victims
872
+ nodeNameToVictims [potentialNodes [i ].Node (). Name ] = & victims
873
873
resultLock .Unlock ()
874
874
}
875
875
}
876
876
parallelize .Until (ctx , len (potentialNodes ), checkNode )
877
- return nodeToVictims , nil
877
+ return nodeNameToVictims , nil
878
878
}
879
879
880
880
// filterPodsWithPDBViolation groups the given "pods" into two groups of "violatingPods"
0 commit comments