@@ -26,7 +26,6 @@ import (
26
26
"github.com/google/go-cmp/cmp"
27
27
v1 "k8s.io/api/core/v1"
28
28
storagev1 "k8s.io/api/storage/v1"
29
- "k8s.io/apimachinery/pkg/api/resource"
30
29
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31
30
"k8s.io/apimachinery/pkg/util/rand"
32
31
"k8s.io/apimachinery/pkg/util/sets"
@@ -35,7 +34,6 @@ import (
35
34
"k8s.io/kubernetes/pkg/scheduler/framework"
36
35
st "k8s.io/kubernetes/pkg/scheduler/testing"
37
36
tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
38
- volumeutil "k8s.io/kubernetes/pkg/volume/util"
39
37
"k8s.io/kubernetes/test/utils/ktesting"
40
38
"k8s.io/utils/ptr"
41
39
)
51
49
scName = "csi-sc"
52
50
)
53
51
54
- // getVolumeLimitKey returns a ResourceName by filter type
55
- func getVolumeLimitKey (filterType string ) v1.ResourceName {
56
- switch filterType {
57
- case ebsVolumeFilterType :
58
- return v1 .ResourceName (volumeutil .EBSVolumeLimitKey )
59
- case gcePDVolumeFilterType :
60
- return v1 .ResourceName (volumeutil .GCEVolumeLimitKey )
61
- case azureDiskVolumeFilterType :
62
- return v1 .ResourceName (volumeutil .AzureVolumeLimitKey )
63
- case cinderVolumeFilterType :
64
- return v1 .ResourceName (volumeutil .CinderVolumeLimitKey )
65
- default :
66
- return v1 .ResourceName (volumeutil .GetCSIAttachLimitKey (filterType ))
67
- }
68
- }
69
-
70
52
func TestCSILimits (t * testing.T ) {
71
53
runningPod := st .MakePod ().PVC ("csi-ebs.csi.aws.com-3" ).Obj ()
72
54
pendingVolumePod := st .MakePod ().PVC ("csi-4" ).Obj ()
@@ -297,7 +279,7 @@ func TestCSILimits(t *testing.T) {
297
279
maxVols : 4 ,
298
280
driverNames : []string {ebsCSIDriverName },
299
281
test : "fits when node volume limit >= new pods CSI volume" ,
300
- limitSource : "node " ,
282
+ limitSource : "csinode " ,
301
283
},
302
284
{
303
285
newPod : csiEBSOneVolPod ,
@@ -306,7 +288,7 @@ func TestCSILimits(t *testing.T) {
306
288
maxVols : 2 ,
307
289
driverNames : []string {ebsCSIDriverName },
308
290
test : "doesn't when node volume limit <= pods CSI volume" ,
309
- limitSource : "node " ,
291
+ limitSource : "csinode " ,
310
292
wantStatus : framework .NewStatus (framework .Unschedulable , ErrReasonMaxVolumeCountExceeded ),
311
293
},
312
294
{
@@ -326,7 +308,7 @@ func TestCSILimits(t *testing.T) {
326
308
maxVols : 2 ,
327
309
driverNames : []string {ebsCSIDriverName },
328
310
test : "count pending PVCs towards volume limit <= pods CSI volume" ,
329
- limitSource : "node " ,
311
+ limitSource : "csinode " ,
330
312
wantStatus : framework .NewStatus (framework .Unschedulable , ErrReasonMaxVolumeCountExceeded ),
331
313
},
332
314
// two same pending PVCs should be counted as 1
@@ -337,7 +319,7 @@ func TestCSILimits(t *testing.T) {
337
319
maxVols : 4 ,
338
320
driverNames : []string {ebsCSIDriverName },
339
321
test : "count multiple pending pvcs towards volume limit >= pods CSI volume" ,
340
- limitSource : "node " ,
322
+ limitSource : "csinode " ,
341
323
},
342
324
// should count PVCs with invalid PV name but valid SC
343
325
{
@@ -347,7 +329,7 @@ func TestCSILimits(t *testing.T) {
347
329
maxVols : 2 ,
348
330
driverNames : []string {ebsCSIDriverName },
349
331
test : "should count PVCs with invalid PV name but valid SC" ,
350
- limitSource : "node " ,
332
+ limitSource : "csinode " ,
351
333
wantStatus : framework .NewStatus (framework .Unschedulable , ErrReasonMaxVolumeCountExceeded ),
352
334
},
353
335
// don't count a volume which has storageclass missing
@@ -358,7 +340,7 @@ func TestCSILimits(t *testing.T) {
358
340
maxVols : 2 ,
359
341
driverNames : []string {ebsCSIDriverName },
360
342
test : "don't count pvcs with missing SC towards volume limit" ,
361
- limitSource : "node " ,
343
+ limitSource : "csinode " ,
362
344
},
363
345
// don't count multiple volume types
364
346
{
@@ -368,7 +350,7 @@ func TestCSILimits(t *testing.T) {
368
350
maxVols : 2 ,
369
351
driverNames : []string {ebsCSIDriverName , gceCSIDriverName },
370
352
test : "count pvcs with the same type towards volume limit" ,
371
- limitSource : "node " ,
353
+ limitSource : "csinode " ,
372
354
wantStatus : framework .NewStatus (framework .Unschedulable , ErrReasonMaxVolumeCountExceeded ),
373
355
},
374
356
{
@@ -378,7 +360,7 @@ func TestCSILimits(t *testing.T) {
378
360
maxVols : 2 ,
379
361
driverNames : []string {ebsCSIDriverName , gceCSIDriverName },
380
362
test : "don't count pvcs with different type towards volume limit" ,
381
- limitSource : "node " ,
363
+ limitSource : "csinode " ,
382
364
},
383
365
// Tests for in-tree volume migration
384
366
{
@@ -396,10 +378,8 @@ func TestCSILimits(t *testing.T) {
396
378
newPod : inTreeInlineVolPod ,
397
379
existingPods : []* v1.Pod {inTreeTwoVolPod },
398
380
filterName : "csi" ,
399
- maxVols : 2 ,
400
381
driverNames : []string {csilibplugins .AWSEBSInTreePluginName , ebsCSIDriverName },
401
382
migrationEnabled : true ,
402
- limitSource : "node" ,
403
383
test : "nil csi node" ,
404
384
},
405
385
{
@@ -494,6 +474,7 @@ func TestCSILimits(t *testing.T) {
494
474
filterName : "csi" ,
495
475
ephemeralEnabled : true ,
496
476
driverNames : []string {ebsCSIDriverName },
477
+ limitSource : "csinode-with-no-limit" ,
497
478
test : "ephemeral volume missing" ,
498
479
wantStatus : framework .NewStatus (framework .UnschedulableAndUnresolvable , `looking up PVC test/abc-xyz: persistentvolumeclaims "abc-xyz" not found` ),
499
480
},
@@ -503,6 +484,7 @@ func TestCSILimits(t *testing.T) {
503
484
ephemeralEnabled : true ,
504
485
extraClaims : []v1.PersistentVolumeClaim {* conflictingClaim },
505
486
driverNames : []string {ebsCSIDriverName },
487
+ limitSource : "csinode-with-no-limit" ,
506
488
test : "ephemeral volume not owned" ,
507
489
wantStatus : framework .AsStatus (errors .New ("PVC test/abc-xyz was not created for pod test/abc (pod is not owner)" )),
508
490
},
@@ -512,6 +494,7 @@ func TestCSILimits(t *testing.T) {
512
494
ephemeralEnabled : true ,
513
495
extraClaims : []v1.PersistentVolumeClaim {* ephemeralClaim },
514
496
driverNames : []string {ebsCSIDriverName },
497
+ limitSource : "csinode-with-no-limit" ,
515
498
test : "ephemeral volume unbound" ,
516
499
},
517
500
{
@@ -522,7 +505,7 @@ func TestCSILimits(t *testing.T) {
522
505
driverNames : []string {ebsCSIDriverName },
523
506
existingPods : []* v1.Pod {runningPod , csiEBSTwoVolPod },
524
507
maxVols : 2 ,
525
- limitSource : "node " ,
508
+ limitSource : "csinode " ,
526
509
test : "ephemeral doesn't when node volume limit <= pods CSI volume" ,
527
510
wantStatus : framework .NewStatus (framework .Unschedulable , ErrReasonMaxVolumeCountExceeded ),
528
511
},
@@ -534,7 +517,7 @@ func TestCSILimits(t *testing.T) {
534
517
driverNames : []string {ebsCSIDriverName },
535
518
existingPods : []* v1.Pod {runningPod , ephemeralTwoVolumePod },
536
519
maxVols : 2 ,
537
- limitSource : "node " ,
520
+ limitSource : "csinode " ,
538
521
test : "ephemeral doesn't when node volume limit <= pods ephemeral CSI volume" ,
539
522
wantStatus : framework .NewStatus (framework .Unschedulable , ErrReasonMaxVolumeCountExceeded ),
540
523
},
@@ -546,7 +529,7 @@ func TestCSILimits(t *testing.T) {
546
529
driverNames : []string {ebsCSIDriverName },
547
530
existingPods : []* v1.Pod {runningPod , ephemeralVolumePod , csiEBSTwoVolPod },
548
531
maxVols : 3 ,
549
- limitSource : "node " ,
532
+ limitSource : "csinode " ,
550
533
test : "persistent doesn't when node volume limit <= pods ephemeral CSI volume + persistent volume, ephemeral disabled" ,
551
534
wantStatus : framework .NewStatus (framework .Unschedulable , ErrReasonMaxVolumeCountExceeded ),
552
535
},
@@ -558,7 +541,7 @@ func TestCSILimits(t *testing.T) {
558
541
driverNames : []string {ebsCSIDriverName },
559
542
existingPods : []* v1.Pod {runningPod , ephemeralVolumePod , csiEBSTwoVolPod },
560
543
maxVols : 3 ,
561
- limitSource : "node " ,
544
+ limitSource : "csinode " ,
562
545
test : "persistent doesn't when node volume limit <= pods ephemeral CSI volume + persistent volume" ,
563
546
wantStatus : framework .NewStatus (framework .Unschedulable , ErrReasonMaxVolumeCountExceeded ),
564
547
},
@@ -569,7 +552,8 @@ func TestCSILimits(t *testing.T) {
569
552
extraClaims : []v1.PersistentVolumeClaim {* ephemeralClaim },
570
553
driverNames : []string {ebsCSIDriverName },
571
554
existingPods : []* v1.Pod {runningPod , ephemeralVolumePod , csiEBSTwoVolPod },
572
- maxVols : 4 ,
555
+ maxVols : 5 ,
556
+ limitSource : "csinode" ,
573
557
test : "persistent okay when node volume limit > pods ephemeral CSI volume + persistent volume" ,
574
558
},
575
559
{
@@ -578,7 +562,7 @@ func TestCSILimits(t *testing.T) {
578
562
maxVols : 2 ,
579
563
driverNames : []string {ebsCSIDriverName },
580
564
test : "skip Filter when the pod only uses secrets and configmaps" ,
581
- limitSource : "node " ,
565
+ limitSource : "csinode " ,
582
566
wantPreFilterStatus : framework .NewStatus (framework .Skip ),
583
567
},
584
568
{
@@ -587,13 +571,14 @@ func TestCSILimits(t *testing.T) {
587
571
maxVols : 2 ,
588
572
driverNames : []string {ebsCSIDriverName },
589
573
test : "don't skip Filter when the pod has pvcs" ,
590
- limitSource : "node " ,
574
+ limitSource : "csinode " ,
591
575
},
592
576
{
593
577
newPod : ephemeralPodWithConfigmapAndSecret ,
594
578
filterName : "csi" ,
595
579
ephemeralEnabled : true ,
596
580
driverNames : []string {ebsCSIDriverName },
581
+ limitSource : "csinode-with-no-limit" ,
597
582
test : "don't skip Filter when the pod has ephemeral volumes" ,
598
583
wantStatus : framework .NewStatus (framework .UnschedulableAndUnresolvable , `looking up PVC test/abc-xyz: persistentvolumeclaims "abc-xyz" not found` ),
599
584
},
@@ -898,12 +883,6 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
898
883
}
899
884
var csiNode * storagev1.CSINode
900
885
901
- addLimitToNode := func () {
902
- for _ , driver := range driverNames {
903
- node .Status .Allocatable [getVolumeLimitKey (driver )] = * resource .NewQuantity (int64 (limit ), resource .DecimalSI )
904
- }
905
- }
906
-
907
886
initCSINode := func () {
908
887
csiNode = & storagev1.CSINode {
909
888
ObjectMeta : metav1.ObjectMeta {Name : "node-for-max-pd-test-1" },
@@ -930,13 +909,8 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
930
909
}
931
910
932
911
switch limitSource {
933
- case "node" :
934
- addLimitToNode ()
935
912
case "csinode" :
936
913
addDriversCSINode (true )
937
- case "both" :
938
- addLimitToNode ()
939
- addDriversCSINode (true )
940
914
case "csinode-with-no-limit" :
941
915
addDriversCSINode (false )
942
916
case "no-csi-driver" :
0 commit comments