@@ -28,14 +28,14 @@ import (
28
28
"time"
29
29
30
30
"k8s.io/klog"
31
+ "k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
31
32
32
33
v1 "k8s.io/api/core/v1"
33
34
policy "k8s.io/api/policy/v1beta1"
34
35
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
35
36
"k8s.io/apimachinery/pkg/labels"
36
37
corelisters "k8s.io/client-go/listers/core/v1"
37
38
policylisters "k8s.io/client-go/listers/policy/v1beta1"
38
- "k8s.io/client-go/util/workqueue"
39
39
extenderv1 "k8s.io/kube-scheduler/extender/v1"
40
40
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
41
41
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
@@ -479,7 +479,7 @@ func (g *genericScheduler) findNodesThatPassFilters(ctx context.Context, prof *p
479
479
480
480
// Stops searching for more nodes once the configured number of feasible nodes
481
481
// are found.
482
- workqueue . ParallelizeUntil (ctx , 16 , len (allNodes ), checkNode )
482
+ parallelize . Until (ctx , len (allNodes ), checkNode )
483
483
processedNodes := int (filteredLen ) + len (statuses )
484
484
g .nextStartNodeIndex = (g .nextStartNodeIndex + processedNodes ) % len (allNodes )
485
485
@@ -870,7 +870,7 @@ func (g *genericScheduler) selectNodesForPreemption(
870
870
resultLock .Unlock ()
871
871
}
872
872
}
873
- workqueue . ParallelizeUntil ( context . TODO (), 16 , len (potentialNodes ), checkNode )
873
+ parallelize . Until ( ctx , len (potentialNodes ), checkNode )
874
874
return nodeToVictims , nil
875
875
}
876
876
0 commit comments