Skip to content

Commit cba2b3f

Browse files
committed
kube-scheduler remove non-csi volumelimit plugins
1 parent 00236ae commit cba2b3f

File tree

9 files changed

+52
-1735
lines changed

9 files changed

+52
-1735
lines changed

pkg/scheduler/framework/plugins/names/names.go

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,6 @@ const (
3030
NodeResourcesFit = "NodeResourcesFit"
3131
NodeUnschedulable = "NodeUnschedulable"
3232
NodeVolumeLimits = "NodeVolumeLimits"
33-
AzureDiskLimits = "AzureDiskLimits"
34-
CinderLimits = "CinderLimits"
35-
EBSLimits = "EBSLimits"
36-
GCEPDLimits = "GCEPDLimits"
3733
PodTopologySpread = "PodTopologySpread"
3834
SchedulingGates = "SchedulingGates"
3935
TaintToleration = "TaintToleration"

pkg/scheduler/framework/plugins/nodevolumelimits/csi.go

Lines changed: 31 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,11 @@ import (
3535
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
3636
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
3737
"k8s.io/kubernetes/pkg/scheduler/util"
38-
volumeutil "k8s.io/kubernetes/pkg/volume/util"
38+
)
39+
40+
const (
41+
// ErrReasonMaxVolumeCountExceeded is used for MaxVolumeCount predicate error.
42+
ErrReasonMaxVolumeCountExceeded = "node(s) exceed max volume count"
3943
)
4044

4145
// InTreeToCSITranslator contains methods required to check migratable status
@@ -141,7 +145,6 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v
141145

142146
logger := klog.FromContext(ctx)
143147

144-
// If CSINode doesn't exist, the predicate may read the limits from Node object
145148
csiNode, err := pl.csiNodeLister.Get(node.Name)
146149
if err != nil {
147150
// TODO: return the error once CSINode is created by default (2 releases)
@@ -163,7 +166,7 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v
163166
}
164167

165168
// If the node doesn't have volume limits, the predicate will always be true
166-
nodeVolumeLimits := getVolumeLimits(nodeInfo, csiNode)
169+
nodeVolumeLimits := getVolumeLimits(csiNode)
167170
if len(nodeVolumeLimits) == 0 {
168171
return nil
169172
}
@@ -176,22 +179,23 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v
176179
}
177180

178181
attachedVolumeCount := map[string]int{}
179-
for volumeUniqueName, volumeLimitKey := range attachedVolumes {
182+
for volumeUniqueName, driverName := range attachedVolumes {
180183
// Don't count single volume used in multiple pods more than once
181184
delete(newVolumes, volumeUniqueName)
182-
attachedVolumeCount[volumeLimitKey]++
185+
attachedVolumeCount[driverName]++
183186
}
184187

188+
// Count the new volumes count per driver
185189
newVolumeCount := map[string]int{}
186-
for _, volumeLimitKey := range newVolumes {
187-
newVolumeCount[volumeLimitKey]++
190+
for _, driverName := range newVolumes {
191+
newVolumeCount[driverName]++
188192
}
189193

190-
for volumeLimitKey, count := range newVolumeCount {
191-
maxVolumeLimit, ok := nodeVolumeLimits[v1.ResourceName(volumeLimitKey)]
194+
for driverName, count := range newVolumeCount {
195+
maxVolumeLimit, ok := nodeVolumeLimits[driverName]
192196
if ok {
193-
currentVolumeCount := attachedVolumeCount[volumeLimitKey]
194-
logger.V(5).Info("Found plugin volume limits", "node", node.Name, "volumeLimitKey", volumeLimitKey,
197+
currentVolumeCount := attachedVolumeCount[driverName]
198+
logger.V(5).Info("Found plugin volume limits", "node", node.Name, "driverName", driverName,
195199
"maxLimits", maxVolumeLimit, "currentVolumeCount", currentVolumeCount, "newVolumeCount", count,
196200
"pod", klog.KObj(pod))
197201
if currentVolumeCount+count > int(maxVolumeLimit) {
@@ -203,6 +207,9 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v
203207
return nil
204208
}
205209

210+
// filterAttachableVolumes filters the attachable volumes from the pod and adds them to the result map.
211+
// The result map is a map of volumeUniqueName to driver name. The volumeUniqueName is a unique name for
212+
// the volume in the format of "driverName/volumeHandle". And driver name is the CSI driver name.
206213
func (pl *CSILimits) filterAttachableVolumes(
207214
logger klog.Logger, pod *v1.Pod, csiNode *storagev1.CSINode, newPod bool, result map[string]string) error {
208215
for _, vol := range pod.Spec.Volumes {
@@ -265,8 +272,7 @@ func (pl *CSILimits) filterAttachableVolumes(
265272
}
266273

267274
volumeUniqueName := fmt.Sprintf("%s/%s", driverName, volumeHandle)
268-
volumeLimitKey := volumeutil.GetCSIAttachLimitKey(driverName)
269-
result[volumeUniqueName] = volumeLimitKey
275+
result[volumeUniqueName] = driverName
270276
}
271277
return nil
272278
}
@@ -307,8 +313,7 @@ func (pl *CSILimits) checkAttachableInlineVolume(logger klog.Logger, vol *v1.Vol
307313
return nil
308314
}
309315
volumeUniqueName := fmt.Sprintf("%s/%s", driverName, translatedPV.Spec.PersistentVolumeSource.CSI.VolumeHandle)
310-
volumeLimitKey := volumeutil.GetCSIAttachLimitKey(driverName)
311-
result[volumeUniqueName] = volumeLimitKey
316+
result[volumeUniqueName] = driverName
312317
return nil
313318
}
314319

@@ -428,17 +433,17 @@ func NewCSI(_ context.Context, _ runtime.Object, handle framework.Handle, fts fe
428433
}, nil
429434
}
430435

431-
func getVolumeLimits(nodeInfo *framework.NodeInfo, csiNode *storagev1.CSINode) map[v1.ResourceName]int64 {
432-
// TODO: stop getting values from Node object in v1.18
433-
nodeVolumeLimits := volumeLimits(nodeInfo)
434-
if csiNode != nil {
435-
for i := range csiNode.Spec.Drivers {
436-
d := csiNode.Spec.Drivers[i]
437-
if d.Allocatable != nil && d.Allocatable.Count != nil {
438-
// TODO: drop GetCSIAttachLimitKey once we don't get values from Node object (v1.18)
439-
k := v1.ResourceName(volumeutil.GetCSIAttachLimitKey(d.Name))
440-
nodeVolumeLimits[k] = int64(*d.Allocatable.Count)
441-
}
436+
// getVolumeLimits reads the volume limits from CSINode object and returns a map of volume limits.
437+
// The key is the driver name and the value is the maximum number of volumes that can be attached to the node.
438+
// If a key is not found in the map, it means there is no limit for the driver on the node.
439+
func getVolumeLimits(csiNode *storagev1.CSINode) map[string]int64 {
440+
nodeVolumeLimits := make(map[string]int64)
441+
if csiNode == nil {
442+
return nodeVolumeLimits
443+
}
444+
for _, d := range csiNode.Spec.Drivers {
445+
if d.Allocatable != nil && d.Allocatable.Count != nil {
446+
nodeVolumeLimits[d.Name] = int64(*d.Allocatable.Count)
442447
}
443448
}
444449
return nodeVolumeLimits

pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go

Lines changed: 20 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ import (
2626
"github.com/google/go-cmp/cmp"
2727
v1 "k8s.io/api/core/v1"
2828
storagev1 "k8s.io/api/storage/v1"
29-
"k8s.io/apimachinery/pkg/api/resource"
3029
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3130
"k8s.io/apimachinery/pkg/util/rand"
3231
"k8s.io/apimachinery/pkg/util/sets"
@@ -35,7 +34,6 @@ import (
3534
"k8s.io/kubernetes/pkg/scheduler/framework"
3635
st "k8s.io/kubernetes/pkg/scheduler/testing"
3736
tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
38-
volumeutil "k8s.io/kubernetes/pkg/volume/util"
3937
"k8s.io/kubernetes/test/utils/ktesting"
4038
"k8s.io/utils/ptr"
4139
)
@@ -51,22 +49,6 @@ var (
5149
scName = "csi-sc"
5250
)
5351

54-
// getVolumeLimitKey returns a ResourceName by filter type
55-
func getVolumeLimitKey(filterType string) v1.ResourceName {
56-
switch filterType {
57-
case ebsVolumeFilterType:
58-
return v1.ResourceName(volumeutil.EBSVolumeLimitKey)
59-
case gcePDVolumeFilterType:
60-
return v1.ResourceName(volumeutil.GCEVolumeLimitKey)
61-
case azureDiskVolumeFilterType:
62-
return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
63-
case cinderVolumeFilterType:
64-
return v1.ResourceName(volumeutil.CinderVolumeLimitKey)
65-
default:
66-
return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
67-
}
68-
}
69-
7052
func TestCSILimits(t *testing.T) {
7153
runningPod := st.MakePod().PVC("csi-ebs.csi.aws.com-3").Obj()
7254
pendingVolumePod := st.MakePod().PVC("csi-4").Obj()
@@ -297,7 +279,7 @@ func TestCSILimits(t *testing.T) {
297279
maxVols: 4,
298280
driverNames: []string{ebsCSIDriverName},
299281
test: "fits when node volume limit >= new pods CSI volume",
300-
limitSource: "node",
282+
limitSource: "csinode",
301283
},
302284
{
303285
newPod: csiEBSOneVolPod,
@@ -306,7 +288,7 @@ func TestCSILimits(t *testing.T) {
306288
maxVols: 2,
307289
driverNames: []string{ebsCSIDriverName},
308290
test: "doesn't when node volume limit <= pods CSI volume",
309-
limitSource: "node",
291+
limitSource: "csinode",
310292
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
311293
},
312294
{
@@ -326,7 +308,7 @@ func TestCSILimits(t *testing.T) {
326308
maxVols: 2,
327309
driverNames: []string{ebsCSIDriverName},
328310
test: "count pending PVCs towards volume limit <= pods CSI volume",
329-
limitSource: "node",
311+
limitSource: "csinode",
330312
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
331313
},
332314
// two same pending PVCs should be counted as 1
@@ -337,7 +319,7 @@ func TestCSILimits(t *testing.T) {
337319
maxVols: 4,
338320
driverNames: []string{ebsCSIDriverName},
339321
test: "count multiple pending pvcs towards volume limit >= pods CSI volume",
340-
limitSource: "node",
322+
limitSource: "csinode",
341323
},
342324
// should count PVCs with invalid PV name but valid SC
343325
{
@@ -347,7 +329,7 @@ func TestCSILimits(t *testing.T) {
347329
maxVols: 2,
348330
driverNames: []string{ebsCSIDriverName},
349331
test: "should count PVCs with invalid PV name but valid SC",
350-
limitSource: "node",
332+
limitSource: "csinode",
351333
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
352334
},
353335
// don't count a volume which has storageclass missing
@@ -358,7 +340,7 @@ func TestCSILimits(t *testing.T) {
358340
maxVols: 2,
359341
driverNames: []string{ebsCSIDriverName},
360342
test: "don't count pvcs with missing SC towards volume limit",
361-
limitSource: "node",
343+
limitSource: "csinode",
362344
},
363345
// don't count multiple volume types
364346
{
@@ -368,7 +350,7 @@ func TestCSILimits(t *testing.T) {
368350
maxVols: 2,
369351
driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
370352
test: "count pvcs with the same type towards volume limit",
371-
limitSource: "node",
353+
limitSource: "csinode",
372354
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
373355
},
374356
{
@@ -378,7 +360,7 @@ func TestCSILimits(t *testing.T) {
378360
maxVols: 2,
379361
driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
380362
test: "don't count pvcs with different type towards volume limit",
381-
limitSource: "node",
363+
limitSource: "csinode",
382364
},
383365
// Tests for in-tree volume migration
384366
{
@@ -396,10 +378,8 @@ func TestCSILimits(t *testing.T) {
396378
newPod: inTreeInlineVolPod,
397379
existingPods: []*v1.Pod{inTreeTwoVolPod},
398380
filterName: "csi",
399-
maxVols: 2,
400381
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
401382
migrationEnabled: true,
402-
limitSource: "node",
403383
test: "nil csi node",
404384
},
405385
{
@@ -494,6 +474,7 @@ func TestCSILimits(t *testing.T) {
494474
filterName: "csi",
495475
ephemeralEnabled: true,
496476
driverNames: []string{ebsCSIDriverName},
477+
limitSource: "csinode-with-no-limit",
497478
test: "ephemeral volume missing",
498479
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `looking up PVC test/abc-xyz: persistentvolumeclaims "abc-xyz" not found`),
499480
},
@@ -503,6 +484,7 @@ func TestCSILimits(t *testing.T) {
503484
ephemeralEnabled: true,
504485
extraClaims: []v1.PersistentVolumeClaim{*conflictingClaim},
505486
driverNames: []string{ebsCSIDriverName},
487+
limitSource: "csinode-with-no-limit",
506488
test: "ephemeral volume not owned",
507489
wantStatus: framework.AsStatus(errors.New("PVC test/abc-xyz was not created for pod test/abc (pod is not owner)")),
508490
},
@@ -512,6 +494,7 @@ func TestCSILimits(t *testing.T) {
512494
ephemeralEnabled: true,
513495
extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim},
514496
driverNames: []string{ebsCSIDriverName},
497+
limitSource: "csinode-with-no-limit",
515498
test: "ephemeral volume unbound",
516499
},
517500
{
@@ -522,7 +505,7 @@ func TestCSILimits(t *testing.T) {
522505
driverNames: []string{ebsCSIDriverName},
523506
existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
524507
maxVols: 2,
525-
limitSource: "node",
508+
limitSource: "csinode",
526509
test: "ephemeral doesn't when node volume limit <= pods CSI volume",
527510
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
528511
},
@@ -534,7 +517,7 @@ func TestCSILimits(t *testing.T) {
534517
driverNames: []string{ebsCSIDriverName},
535518
existingPods: []*v1.Pod{runningPod, ephemeralTwoVolumePod},
536519
maxVols: 2,
537-
limitSource: "node",
520+
limitSource: "csinode",
538521
test: "ephemeral doesn't when node volume limit <= pods ephemeral CSI volume",
539522
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
540523
},
@@ -546,7 +529,7 @@ func TestCSILimits(t *testing.T) {
546529
driverNames: []string{ebsCSIDriverName},
547530
existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod},
548531
maxVols: 3,
549-
limitSource: "node",
532+
limitSource: "csinode",
550533
test: "persistent doesn't when node volume limit <= pods ephemeral CSI volume + persistent volume, ephemeral disabled",
551534
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
552535
},
@@ -558,7 +541,7 @@ func TestCSILimits(t *testing.T) {
558541
driverNames: []string{ebsCSIDriverName},
559542
existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod},
560543
maxVols: 3,
561-
limitSource: "node",
544+
limitSource: "csinode",
562545
test: "persistent doesn't when node volume limit <= pods ephemeral CSI volume + persistent volume",
563546
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
564547
},
@@ -569,7 +552,8 @@ func TestCSILimits(t *testing.T) {
569552
extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim},
570553
driverNames: []string{ebsCSIDriverName},
571554
existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod},
572-
maxVols: 4,
555+
maxVols: 5,
556+
limitSource: "csinode",
573557
test: "persistent okay when node volume limit > pods ephemeral CSI volume + persistent volume",
574558
},
575559
{
@@ -578,7 +562,7 @@ func TestCSILimits(t *testing.T) {
578562
maxVols: 2,
579563
driverNames: []string{ebsCSIDriverName},
580564
test: "skip Filter when the pod only uses secrets and configmaps",
581-
limitSource: "node",
565+
limitSource: "csinode",
582566
wantPreFilterStatus: framework.NewStatus(framework.Skip),
583567
},
584568
{
@@ -587,13 +571,14 @@ func TestCSILimits(t *testing.T) {
587571
maxVols: 2,
588572
driverNames: []string{ebsCSIDriverName},
589573
test: "don't skip Filter when the pod has pvcs",
590-
limitSource: "node",
574+
limitSource: "csinode",
591575
},
592576
{
593577
newPod: ephemeralPodWithConfigmapAndSecret,
594578
filterName: "csi",
595579
ephemeralEnabled: true,
596580
driverNames: []string{ebsCSIDriverName},
581+
limitSource: "csinode-with-no-limit",
597582
test: "don't skip Filter when the pod has ephemeral volumes",
598583
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `looking up PVC test/abc-xyz: persistentvolumeclaims "abc-xyz" not found`),
599584
},
@@ -821,12 +806,6 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
821806
}
822807
var csiNode *storagev1.CSINode
823808

824-
addLimitToNode := func() {
825-
for _, driver := range driverNames {
826-
node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(int64(limit), resource.DecimalSI)
827-
}
828-
}
829-
830809
initCSINode := func() {
831810
csiNode = &storagev1.CSINode{
832811
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
@@ -853,13 +832,8 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
853832
}
854833

855834
switch limitSource {
856-
case "node":
857-
addLimitToNode()
858835
case "csinode":
859836
addDriversCSINode(true)
860-
case "both":
861-
addLimitToNode()
862-
addDriversCSINode(true)
863837
case "csinode-with-no-limit":
864838
addDriversCSINode(false)
865839
case "no-csi-driver":

0 commit comments

Comments
 (0)