Skip to content

Commit 0bcbc3b

Browse files
authored
Merge pull request kubernetes#124003 from carlory/scheduler-rm-non-csi-limit
kube-scheduler remove non-csi volumelimit plugins
2 parents 9fe0662 + cba2b3f commit 0bcbc3b

File tree

9 files changed

+52
-1735
lines changed

9 files changed

+52
-1735
lines changed

pkg/scheduler/framework/plugins/names/names.go

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,6 @@ const (
3030
NodeResourcesFit = "NodeResourcesFit"
3131
NodeUnschedulable = "NodeUnschedulable"
3232
NodeVolumeLimits = "NodeVolumeLimits"
33-
AzureDiskLimits = "AzureDiskLimits"
34-
CinderLimits = "CinderLimits"
35-
EBSLimits = "EBSLimits"
36-
GCEPDLimits = "GCEPDLimits"
3733
PodTopologySpread = "PodTopologySpread"
3834
SchedulingGates = "SchedulingGates"
3935
TaintToleration = "TaintToleration"

pkg/scheduler/framework/plugins/nodevolumelimits/csi.go

Lines changed: 31 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,11 @@ import (
3535
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
3636
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
3737
"k8s.io/kubernetes/pkg/scheduler/util"
38-
volumeutil "k8s.io/kubernetes/pkg/volume/util"
38+
)
39+
40+
const (
41+
// ErrReasonMaxVolumeCountExceeded is used for MaxVolumeCount predicate error.
42+
ErrReasonMaxVolumeCountExceeded = "node(s) exceed max volume count"
3943
)
4044

4145
// InTreeToCSITranslator contains methods required to check migratable status
@@ -173,7 +177,6 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v
173177

174178
logger := klog.FromContext(ctx)
175179

176-
// If CSINode doesn't exist, the predicate may read the limits from Node object
177180
csiNode, err := pl.csiNodeLister.Get(node.Name)
178181
if err != nil {
179182
// TODO: return the error once CSINode is created by default (2 releases)
@@ -195,7 +198,7 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v
195198
}
196199

197200
// If the node doesn't have volume limits, the predicate will always be true
198-
nodeVolumeLimits := getVolumeLimits(nodeInfo, csiNode)
201+
nodeVolumeLimits := getVolumeLimits(csiNode)
199202
if len(nodeVolumeLimits) == 0 {
200203
return nil
201204
}
@@ -208,22 +211,23 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v
208211
}
209212

210213
attachedVolumeCount := map[string]int{}
211-
for volumeUniqueName, volumeLimitKey := range attachedVolumes {
214+
for volumeUniqueName, driverName := range attachedVolumes {
212215
// Don't count single volume used in multiple pods more than once
213216
delete(newVolumes, volumeUniqueName)
214-
attachedVolumeCount[volumeLimitKey]++
217+
attachedVolumeCount[driverName]++
215218
}
216219

220+
// Count the new volumes count per driver
217221
newVolumeCount := map[string]int{}
218-
for _, volumeLimitKey := range newVolumes {
219-
newVolumeCount[volumeLimitKey]++
222+
for _, driverName := range newVolumes {
223+
newVolumeCount[driverName]++
220224
}
221225

222-
for volumeLimitKey, count := range newVolumeCount {
223-
maxVolumeLimit, ok := nodeVolumeLimits[v1.ResourceName(volumeLimitKey)]
226+
for driverName, count := range newVolumeCount {
227+
maxVolumeLimit, ok := nodeVolumeLimits[driverName]
224228
if ok {
225-
currentVolumeCount := attachedVolumeCount[volumeLimitKey]
226-
logger.V(5).Info("Found plugin volume limits", "node", node.Name, "volumeLimitKey", volumeLimitKey,
229+
currentVolumeCount := attachedVolumeCount[driverName]
230+
logger.V(5).Info("Found plugin volume limits", "node", node.Name, "driverName", driverName,
227231
"maxLimits", maxVolumeLimit, "currentVolumeCount", currentVolumeCount, "newVolumeCount", count,
228232
"pod", klog.KObj(pod))
229233
if currentVolumeCount+count > int(maxVolumeLimit) {
@@ -235,6 +239,9 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v
235239
return nil
236240
}
237241

242+
// filterAttachableVolumes filters the attachable volumes from the pod and adds them to the result map.
243+
// The result map is a map of volumeUniqueName to driver name. The volumeUniqueName is a unique name for
244+
// the volume in the format of "driverName/volumeHandle". And driver name is the CSI driver name.
238245
func (pl *CSILimits) filterAttachableVolumes(
239246
logger klog.Logger, pod *v1.Pod, csiNode *storagev1.CSINode, newPod bool, result map[string]string) error {
240247
for _, vol := range pod.Spec.Volumes {
@@ -297,8 +304,7 @@ func (pl *CSILimits) filterAttachableVolumes(
297304
}
298305

299306
volumeUniqueName := fmt.Sprintf("%s/%s", driverName, volumeHandle)
300-
volumeLimitKey := volumeutil.GetCSIAttachLimitKey(driverName)
301-
result[volumeUniqueName] = volumeLimitKey
307+
result[volumeUniqueName] = driverName
302308
}
303309
return nil
304310
}
@@ -339,8 +345,7 @@ func (pl *CSILimits) checkAttachableInlineVolume(logger klog.Logger, vol *v1.Vol
339345
return nil
340346
}
341347
volumeUniqueName := fmt.Sprintf("%s/%s", driverName, translatedPV.Spec.PersistentVolumeSource.CSI.VolumeHandle)
342-
volumeLimitKey := volumeutil.GetCSIAttachLimitKey(driverName)
343-
result[volumeUniqueName] = volumeLimitKey
348+
result[volumeUniqueName] = driverName
344349
return nil
345350
}
346351

@@ -460,17 +465,17 @@ func NewCSI(_ context.Context, _ runtime.Object, handle framework.Handle, fts fe
460465
}, nil
461466
}
462467

463-
func getVolumeLimits(nodeInfo *framework.NodeInfo, csiNode *storagev1.CSINode) map[v1.ResourceName]int64 {
464-
// TODO: stop getting values from Node object in v1.18
465-
nodeVolumeLimits := volumeLimits(nodeInfo)
466-
if csiNode != nil {
467-
for i := range csiNode.Spec.Drivers {
468-
d := csiNode.Spec.Drivers[i]
469-
if d.Allocatable != nil && d.Allocatable.Count != nil {
470-
// TODO: drop GetCSIAttachLimitKey once we don't get values from Node object (v1.18)
471-
k := v1.ResourceName(volumeutil.GetCSIAttachLimitKey(d.Name))
472-
nodeVolumeLimits[k] = int64(*d.Allocatable.Count)
473-
}
468+
// getVolumeLimits reads the volume limits from CSINode object and returns a map of volume limits.
469+
// The key is the driver name and the value is the maximum number of volumes that can be attached to the node.
470+
// If a key is not found in the map, it means there is no limit for the driver on the node.
471+
func getVolumeLimits(csiNode *storagev1.CSINode) map[string]int64 {
472+
nodeVolumeLimits := make(map[string]int64)
473+
if csiNode == nil {
474+
return nodeVolumeLimits
475+
}
476+
for _, d := range csiNode.Spec.Drivers {
477+
if d.Allocatable != nil && d.Allocatable.Count != nil {
478+
nodeVolumeLimits[d.Name] = int64(*d.Allocatable.Count)
474479
}
475480
}
476481
return nodeVolumeLimits

pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go

Lines changed: 20 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ import (
2626
"github.com/google/go-cmp/cmp"
2727
v1 "k8s.io/api/core/v1"
2828
storagev1 "k8s.io/api/storage/v1"
29-
"k8s.io/apimachinery/pkg/api/resource"
3029
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3130
"k8s.io/apimachinery/pkg/util/rand"
3231
"k8s.io/apimachinery/pkg/util/sets"
@@ -35,7 +34,6 @@ import (
3534
"k8s.io/kubernetes/pkg/scheduler/framework"
3635
st "k8s.io/kubernetes/pkg/scheduler/testing"
3736
tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
38-
volumeutil "k8s.io/kubernetes/pkg/volume/util"
3937
"k8s.io/kubernetes/test/utils/ktesting"
4038
"k8s.io/utils/ptr"
4139
)
@@ -51,22 +49,6 @@ var (
5149
scName = "csi-sc"
5250
)
5351

54-
// getVolumeLimitKey returns a ResourceName by filter type
55-
func getVolumeLimitKey(filterType string) v1.ResourceName {
56-
switch filterType {
57-
case ebsVolumeFilterType:
58-
return v1.ResourceName(volumeutil.EBSVolumeLimitKey)
59-
case gcePDVolumeFilterType:
60-
return v1.ResourceName(volumeutil.GCEVolumeLimitKey)
61-
case azureDiskVolumeFilterType:
62-
return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
63-
case cinderVolumeFilterType:
64-
return v1.ResourceName(volumeutil.CinderVolumeLimitKey)
65-
default:
66-
return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
67-
}
68-
}
69-
7052
func TestCSILimits(t *testing.T) {
7153
runningPod := st.MakePod().PVC("csi-ebs.csi.aws.com-3").Obj()
7254
pendingVolumePod := st.MakePod().PVC("csi-4").Obj()
@@ -297,7 +279,7 @@ func TestCSILimits(t *testing.T) {
297279
maxVols: 4,
298280
driverNames: []string{ebsCSIDriverName},
299281
test: "fits when node volume limit >= new pods CSI volume",
300-
limitSource: "node",
282+
limitSource: "csinode",
301283
},
302284
{
303285
newPod: csiEBSOneVolPod,
@@ -306,7 +288,7 @@ func TestCSILimits(t *testing.T) {
306288
maxVols: 2,
307289
driverNames: []string{ebsCSIDriverName},
308290
test: "doesn't when node volume limit <= pods CSI volume",
309-
limitSource: "node",
291+
limitSource: "csinode",
310292
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
311293
},
312294
{
@@ -326,7 +308,7 @@ func TestCSILimits(t *testing.T) {
326308
maxVols: 2,
327309
driverNames: []string{ebsCSIDriverName},
328310
test: "count pending PVCs towards volume limit <= pods CSI volume",
329-
limitSource: "node",
311+
limitSource: "csinode",
330312
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
331313
},
332314
// two same pending PVCs should be counted as 1
@@ -337,7 +319,7 @@ func TestCSILimits(t *testing.T) {
337319
maxVols: 4,
338320
driverNames: []string{ebsCSIDriverName},
339321
test: "count multiple pending pvcs towards volume limit >= pods CSI volume",
340-
limitSource: "node",
322+
limitSource: "csinode",
341323
},
342324
// should count PVCs with invalid PV name but valid SC
343325
{
@@ -347,7 +329,7 @@ func TestCSILimits(t *testing.T) {
347329
maxVols: 2,
348330
driverNames: []string{ebsCSIDriverName},
349331
test: "should count PVCs with invalid PV name but valid SC",
350-
limitSource: "node",
332+
limitSource: "csinode",
351333
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
352334
},
353335
// don't count a volume which has storageclass missing
@@ -358,7 +340,7 @@ func TestCSILimits(t *testing.T) {
358340
maxVols: 2,
359341
driverNames: []string{ebsCSIDriverName},
360342
test: "don't count pvcs with missing SC towards volume limit",
361-
limitSource: "node",
343+
limitSource: "csinode",
362344
},
363345
// don't count multiple volume types
364346
{
@@ -368,7 +350,7 @@ func TestCSILimits(t *testing.T) {
368350
maxVols: 2,
369351
driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
370352
test: "count pvcs with the same type towards volume limit",
371-
limitSource: "node",
353+
limitSource: "csinode",
372354
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
373355
},
374356
{
@@ -378,7 +360,7 @@ func TestCSILimits(t *testing.T) {
378360
maxVols: 2,
379361
driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
380362
test: "don't count pvcs with different type towards volume limit",
381-
limitSource: "node",
363+
limitSource: "csinode",
382364
},
383365
// Tests for in-tree volume migration
384366
{
@@ -396,10 +378,8 @@ func TestCSILimits(t *testing.T) {
396378
newPod: inTreeInlineVolPod,
397379
existingPods: []*v1.Pod{inTreeTwoVolPod},
398380
filterName: "csi",
399-
maxVols: 2,
400381
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
401382
migrationEnabled: true,
402-
limitSource: "node",
403383
test: "nil csi node",
404384
},
405385
{
@@ -494,6 +474,7 @@ func TestCSILimits(t *testing.T) {
494474
filterName: "csi",
495475
ephemeralEnabled: true,
496476
driverNames: []string{ebsCSIDriverName},
477+
limitSource: "csinode-with-no-limit",
497478
test: "ephemeral volume missing",
498479
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `looking up PVC test/abc-xyz: persistentvolumeclaims "abc-xyz" not found`),
499480
},
@@ -503,6 +484,7 @@ func TestCSILimits(t *testing.T) {
503484
ephemeralEnabled: true,
504485
extraClaims: []v1.PersistentVolumeClaim{*conflictingClaim},
505486
driverNames: []string{ebsCSIDriverName},
487+
limitSource: "csinode-with-no-limit",
506488
test: "ephemeral volume not owned",
507489
wantStatus: framework.AsStatus(errors.New("PVC test/abc-xyz was not created for pod test/abc (pod is not owner)")),
508490
},
@@ -512,6 +494,7 @@ func TestCSILimits(t *testing.T) {
512494
ephemeralEnabled: true,
513495
extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim},
514496
driverNames: []string{ebsCSIDriverName},
497+
limitSource: "csinode-with-no-limit",
515498
test: "ephemeral volume unbound",
516499
},
517500
{
@@ -522,7 +505,7 @@ func TestCSILimits(t *testing.T) {
522505
driverNames: []string{ebsCSIDriverName},
523506
existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
524507
maxVols: 2,
525-
limitSource: "node",
508+
limitSource: "csinode",
526509
test: "ephemeral doesn't when node volume limit <= pods CSI volume",
527510
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
528511
},
@@ -534,7 +517,7 @@ func TestCSILimits(t *testing.T) {
534517
driverNames: []string{ebsCSIDriverName},
535518
existingPods: []*v1.Pod{runningPod, ephemeralTwoVolumePod},
536519
maxVols: 2,
537-
limitSource: "node",
520+
limitSource: "csinode",
538521
test: "ephemeral doesn't when node volume limit <= pods ephemeral CSI volume",
539522
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
540523
},
@@ -546,7 +529,7 @@ func TestCSILimits(t *testing.T) {
546529
driverNames: []string{ebsCSIDriverName},
547530
existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod},
548531
maxVols: 3,
549-
limitSource: "node",
532+
limitSource: "csinode",
550533
test: "persistent doesn't when node volume limit <= pods ephemeral CSI volume + persistent volume, ephemeral disabled",
551534
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
552535
},
@@ -558,7 +541,7 @@ func TestCSILimits(t *testing.T) {
558541
driverNames: []string{ebsCSIDriverName},
559542
existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod},
560543
maxVols: 3,
561-
limitSource: "node",
544+
limitSource: "csinode",
562545
test: "persistent doesn't when node volume limit <= pods ephemeral CSI volume + persistent volume",
563546
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
564547
},
@@ -569,7 +552,8 @@ func TestCSILimits(t *testing.T) {
569552
extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim},
570553
driverNames: []string{ebsCSIDriverName},
571554
existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod},
572-
maxVols: 4,
555+
maxVols: 5,
556+
limitSource: "csinode",
573557
test: "persistent okay when node volume limit > pods ephemeral CSI volume + persistent volume",
574558
},
575559
{
@@ -578,7 +562,7 @@ func TestCSILimits(t *testing.T) {
578562
maxVols: 2,
579563
driverNames: []string{ebsCSIDriverName},
580564
test: "skip Filter when the pod only uses secrets and configmaps",
581-
limitSource: "node",
565+
limitSource: "csinode",
582566
wantPreFilterStatus: framework.NewStatus(framework.Skip),
583567
},
584568
{
@@ -587,13 +571,14 @@ func TestCSILimits(t *testing.T) {
587571
maxVols: 2,
588572
driverNames: []string{ebsCSIDriverName},
589573
test: "don't skip Filter when the pod has pvcs",
590-
limitSource: "node",
574+
limitSource: "csinode",
591575
},
592576
{
593577
newPod: ephemeralPodWithConfigmapAndSecret,
594578
filterName: "csi",
595579
ephemeralEnabled: true,
596580
driverNames: []string{ebsCSIDriverName},
581+
limitSource: "csinode-with-no-limit",
597582
test: "don't skip Filter when the pod has ephemeral volumes",
598583
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `looking up PVC test/abc-xyz: persistentvolumeclaims "abc-xyz" not found`),
599584
},
@@ -898,12 +883,6 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
898883
}
899884
var csiNode *storagev1.CSINode
900885

901-
addLimitToNode := func() {
902-
for _, driver := range driverNames {
903-
node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(int64(limit), resource.DecimalSI)
904-
}
905-
}
906-
907886
initCSINode := func() {
908887
csiNode = &storagev1.CSINode{
909888
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
@@ -930,13 +909,8 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
930909
}
931910

932911
switch limitSource {
933-
case "node":
934-
addLimitToNode()
935912
case "csinode":
936913
addDriversCSINode(true)
937-
case "both":
938-
addLimitToNode()
939-
addDriversCSINode(true)
940914
case "csinode-with-no-limit":
941915
addDriversCSINode(false)
942916
case "no-csi-driver":

0 commit comments

Comments
 (0)