@@ -24,6 +24,7 @@ import (
24
24
"time"
25
25
26
26
"github.com/onsi/ginkgo/v2"
27
+ "github.com/onsi/gomega"
27
28
28
29
v1 "k8s.io/api/core/v1"
29
30
storagev1 "k8s.io/api/storage/v1"
@@ -39,6 +40,7 @@ import (
39
40
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
40
41
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
41
42
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
43
+ e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
42
44
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
43
45
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
44
46
admissionapi "k8s.io/pod-security-admission/api"
@@ -86,6 +88,13 @@ func (t *volumeLimitsTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInf
86
88
}
87
89
88
90
func (t * volumeLimitsTestSuite ) SkipUnsupportedTests (driver storageframework.TestDriver , pattern storageframework.TestPattern ) {
91
+ if pattern .VolType != storageframework .DynamicPV {
92
+ e2eskipper .Skipf ("Suite %q does not support %v" , t .tsInfo .Name , pattern .VolType )
93
+ }
94
+ dInfo := driver .GetDriverInfo ()
95
+ if ! dInfo .Capabilities [storageframework .CapVolumeLimits ] {
96
+ e2eskipper .Skipf ("Driver %s does not support volume limits" , dInfo .Name )
97
+ }
89
98
}
90
99
91
100
func (t * volumeLimitsTestSuite ) DefineTests (driver storageframework.TestDriver , pattern storageframework.TestPattern ) {
@@ -104,17 +113,23 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
104
113
podNames []string
105
114
106
115
// All created PVs, incl. the one in resource
107
- pvNames sets.String
116
+ pvNames sets.Set [ string ]
108
117
}
109
118
var (
110
119
l local
120
+
121
+ dDriver storageframework.DynamicPVTestDriver
111
122
)
112
123
113
124
// Beware that it also registers an AfterEach which renders f unusable. Any code using
114
125
// f must run inside an It or Context callback.
115
126
f := framework .NewFrameworkWithCustomTimeouts ("volumelimits" , storageframework .GetDriverTimeouts (driver ))
116
127
f .NamespacePodSecurityLevel = admissionapi .LevelPrivileged
117
128
129
+ ginkgo .BeforeEach (func () {
130
+ dDriver = driver .(storageframework.DynamicPVTestDriver )
131
+ })
132
+
118
133
// This checks that CSIMaxVolumeLimitChecker works as expected.
119
134
// A randomly chosen node should be able to handle as many CSI volumes as
120
135
// it claims to handle in CSINode.Spec.Drivers[x].Allocatable.
@@ -125,14 +140,6 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
125
140
// BEWARE: the test may create lot of volumes and it's really slow.
126
141
f .It ("should support volume limits" , f .WithSerial (), func (ctx context.Context ) {
127
142
driverInfo := driver .GetDriverInfo ()
128
- if ! driverInfo .Capabilities [storageframework .CapVolumeLimits ] {
129
- ginkgo .Skip (fmt .Sprintf ("driver %s does not support volume limits" , driverInfo .Name ))
130
- }
131
- var dDriver storageframework.DynamicPVTestDriver
132
- if dDriver = driver .(storageframework.DynamicPVTestDriver ); dDriver == nil {
133
- framework .Failf ("Test driver does not provide dynamically created volumes" )
134
- }
135
-
136
143
l .ns = f .Namespace
137
144
l .cs = f .ClientSet
138
145
@@ -150,7 +157,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
150
157
framework .Logf ("Selected node %s" , nodeName )
151
158
152
159
ginkgo .By ("Checking node limits" )
153
- limit , err := getNodeLimits (ctx , l .cs , l .config , nodeName , driverInfo )
160
+ limit , err := getNodeLimits (ctx , l .cs , l .config , nodeName , dDriver )
154
161
framework .ExpectNoError (err )
155
162
156
163
framework .Logf ("Node %s can handle %d volumes of driver %s" , nodeName , limit , driverInfo .Name )
@@ -204,8 +211,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
204
211
}
205
212
206
213
ginkgo .By ("Waiting for all PVCs to get Bound" )
207
- l .pvNames , err = waitForAllPVCsBound (ctx , l .cs , testSlowMultiplier * f .Timeouts .PVBound , l .ns .Name , l .pvcNames )
208
- framework .ExpectNoError (err )
214
+ l .pvNames = waitForAllPVCsBound (ctx , l .cs , testSlowMultiplier * f .Timeouts .PVBound , l .ns .Name , l .pvcNames )
209
215
210
216
ginkgo .By ("Waiting for the pod(s) running" )
211
217
for _ , podName := range l .podNames {
@@ -232,7 +238,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
232
238
}
233
239
}
234
240
if pod .Status .Phase != v1 .PodPending {
235
- return true , fmt .Errorf ("Expected pod to be in phase Pending, but got phase: %v" , pod .Status .Phase )
241
+ return true , fmt .Errorf ("expected pod to be in phase Pending, but got phase: %v" , pod .Status .Phase )
236
242
}
237
243
return false , nil
238
244
})
@@ -265,15 +271,15 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
265
271
266
272
for _ , nodeName := range nodeNames {
267
273
ginkgo .By ("Checking csinode limits" )
268
- _ , err := getNodeLimits (ctx , l .cs , l .config , nodeName , driverInfo )
274
+ _ , err := getNodeLimits (ctx , l .cs , l .config , nodeName , dDriver )
269
275
if err != nil {
270
276
framework .Failf ("Expected volume limits to be set, error: %v" , err )
271
277
}
272
278
}
273
279
})
274
280
}
275
281
276
- func cleanupTest (ctx context.Context , cs clientset.Interface , ns string , podNames , pvcNames []string , pvNames sets.String , timeout time.Duration ) error {
282
+ func cleanupTest (ctx context.Context , cs clientset.Interface , ns string , podNames , pvcNames []string , pvNames sets.Set [ string ] , timeout time.Duration ) error {
277
283
var cleanupErrors []string
278
284
for _ , podName := range podNames {
279
285
err := cs .CoreV1 ().Pods (ns ).Delete (ctx , podName , metav1.DeleteOptions {})
@@ -290,7 +296,7 @@ func cleanupTest(ctx context.Context, cs clientset.Interface, ns string, podName
290
296
// Wait for the PVs to be deleted. It includes also pod and PVC deletion because of PVC protection.
291
297
// We use PVs to make sure that the test does not leave orphan PVs when a CSI driver is destroyed
292
298
// just after the test ends.
293
- err := wait .Poll ( 5 * time .Second , timeout , func () (bool , error ) {
299
+ err := wait .PollUntilContextTimeout ( ctx , 5 * time .Second , timeout , false , func (ctx context. Context ) (bool , error ) {
294
300
existing := 0
295
301
for _ , pvName := range pvNames .UnsortedList () {
296
302
_ , err := cs .CoreV1 ().PersistentVolumes ().Get (ctx , pvName , metav1.GetOptions {})
@@ -320,48 +326,45 @@ func cleanupTest(ctx context.Context, cs clientset.Interface, ns string, podName
320
326
}
321
327
322
328
// waitForAllPVCsBound waits until the given PVCs are all bound. It then returns the bound PVC names as a set.
323
- func waitForAllPVCsBound (ctx context.Context , cs clientset.Interface , timeout time.Duration , ns string , pvcNames []string ) ( sets.String , error ) {
324
- pvNames := sets .NewString ()
325
- err := wait . Poll ( 5 * time . Second , timeout , func () (bool , error ) {
329
+ func waitForAllPVCsBound (ctx context.Context , cs clientset.Interface , timeout time.Duration , ns string , pvcNames []string ) sets.Set [ string ] {
330
+ pvNames := sets .New [ string ] ()
331
+ gomega . Eventually ( ctx , func () (int , error ) {
326
332
unbound := 0
327
333
for _ , pvcName := range pvcNames {
328
334
pvc , err := cs .CoreV1 ().PersistentVolumeClaims (ns ).Get (ctx , pvcName , metav1.GetOptions {})
329
335
if err != nil {
330
- return false , err
336
+ gomega . StopTrying ( "failed to fetch PVCs" ). Wrap ( err ). Now ()
331
337
}
332
338
if pvc .Status .Phase != v1 .ClaimBound {
333
339
unbound ++
334
340
} else {
335
341
pvNames .Insert (pvc .Spec .VolumeName )
336
342
}
337
343
}
338
- if unbound > 0 {
339
- framework .Logf ("%d/%d of PVCs are Bound" , pvNames .Len (), len (pvcNames ))
340
- return false , nil
341
- }
342
- return true , nil
343
- })
344
- if err != nil {
345
- return nil , fmt .Errorf ("error waiting for all PVCs to be bound: %w" , err )
346
- }
347
- return pvNames , nil
344
+ framework .Logf ("%d/%d of PVCs are Bound" , pvNames .Len (), len (pvcNames ))
345
+ return unbound , nil
346
+ }).WithPolling (5 * time .Second ).WithTimeout (timeout ).Should (gomega .BeZero (), "error waiting for all PVCs to be bound" )
347
+ return pvNames
348
348
}
349
349
350
- func getNodeLimits (ctx context.Context , cs clientset.Interface , config * storageframework.PerTestConfig , nodeName string , driverInfo * storageframework.DriverInfo ) (int , error ) {
351
- if len (driverInfo .InTreePluginName ) == 0 {
352
- return getCSINodeLimits (ctx , cs , config , nodeName , driverInfo )
350
+ func getNodeLimits (ctx context.Context , cs clientset.Interface , config * storageframework.PerTestConfig , nodeName string , driver storageframework.DynamicPVTestDriver ) (int , error ) {
351
+ driverInfo := driver .GetDriverInfo ()
352
+ if len (driverInfo .InTreePluginName ) > 0 {
353
+ return getInTreeNodeLimits (ctx , cs , nodeName , driverInfo .InTreePluginName )
353
354
}
354
- return getInTreeNodeLimits (ctx , cs , nodeName , driverInfo )
355
+
356
+ sc := driver .GetDynamicProvisionStorageClass (ctx , config , "" )
357
+ return getCSINodeLimits (ctx , cs , config , nodeName , sc .Provisioner )
355
358
}
356
359
357
- func getInTreeNodeLimits (ctx context.Context , cs clientset.Interface , nodeName string , driverInfo * storageframework. DriverInfo ) (int , error ) {
360
+ func getInTreeNodeLimits (ctx context.Context , cs clientset.Interface , nodeName , driverName string ) (int , error ) {
358
361
node , err := cs .CoreV1 ().Nodes ().Get (ctx , nodeName , metav1.GetOptions {})
359
362
if err != nil {
360
363
return 0 , err
361
364
}
362
365
363
366
var allocatableKey string
364
- switch driverInfo . InTreePluginName {
367
+ switch driverName {
365
368
case migrationplugins .AWSEBSInTreePluginName :
366
369
allocatableKey = volumeutil .EBSVolumeLimitKey
367
370
case migrationplugins .GCEPDInTreePluginName :
@@ -371,47 +374,47 @@ func getInTreeNodeLimits(ctx context.Context, cs clientset.Interface, nodeName s
371
374
case migrationplugins .AzureDiskInTreePluginName :
372
375
allocatableKey = volumeutil .AzureVolumeLimitKey
373
376
default :
374
- return 0 , fmt .Errorf ("Unknown in-tree volume plugin name: %s" , driverInfo . InTreePluginName )
377
+ return 0 , fmt .Errorf ("unknown in-tree volume plugin name: %s" , driverName )
375
378
}
376
379
377
380
limit , ok := node .Status .Allocatable [v1 .ResourceName (allocatableKey )]
378
381
if ! ok {
379
- return 0 , fmt .Errorf ("Node %s does not contain status.allocatable[%s] for volume plugin %s" , nodeName , allocatableKey , driverInfo . InTreePluginName )
382
+ return 0 , fmt .Errorf ("node %s does not contain status.allocatable[%s] for volume plugin %s" , nodeName , allocatableKey , driverName )
380
383
}
381
384
return int (limit .Value ()), nil
382
385
}
383
386
384
- func getCSINodeLimits (ctx context.Context , cs clientset.Interface , config * storageframework.PerTestConfig , nodeName string , driverInfo * storageframework. DriverInfo ) (int , error ) {
387
+ func getCSINodeLimits (ctx context.Context , cs clientset.Interface , config * storageframework.PerTestConfig , nodeName , driverName string ) (int , error ) {
385
388
// Retry with a timeout, the driver might just have been installed and kubelet takes a while to publish everything.
386
389
var limit int
387
- err := wait .PollImmediate ( 2 * time .Second , csiNodeInfoTimeout , func () (bool , error ) {
390
+ err := wait .PollUntilContextTimeout ( ctx , 2 * time .Second , csiNodeInfoTimeout , true , func (ctx context. Context ) (bool , error ) {
388
391
csiNode , err := cs .StorageV1 ().CSINodes ().Get (ctx , nodeName , metav1.GetOptions {})
389
392
if err != nil {
390
393
framework .Logf ("%s" , err )
391
394
return false , nil
392
395
}
393
396
var csiDriver * storagev1.CSINodeDriver
394
397
for i , c := range csiNode .Spec .Drivers {
395
- if c .Name == driverInfo . Name || c .Name == config .GetUniqueDriverName () {
398
+ if c .Name == driverName || c .Name == config .GetUniqueDriverName () {
396
399
csiDriver = & csiNode .Spec .Drivers [i ]
397
400
break
398
401
}
399
402
}
400
403
if csiDriver == nil {
401
- framework .Logf ("CSINodeInfo does not have driver %s yet" , driverInfo . Name )
404
+ framework .Logf ("CSINodeInfo does not have driver %s yet" , driverName )
402
405
return false , nil
403
406
}
404
407
if csiDriver .Allocatable == nil {
405
- return false , fmt .Errorf ("CSINodeInfo does not have Allocatable for driver %s" , driverInfo . Name )
408
+ return false , fmt .Errorf ("CSINodeInfo does not have Allocatable for driver %s" , driverName )
406
409
}
407
410
if csiDriver .Allocatable .Count == nil {
408
- return false , fmt .Errorf ("CSINodeInfo does not have Allocatable.Count for driver %s" , driverInfo . Name )
411
+ return false , fmt .Errorf ("CSINodeInfo does not have Allocatable.Count for driver %s" , driverName )
409
412
}
410
413
limit = int (* csiDriver .Allocatable .Count )
411
414
return true , nil
412
415
})
413
416
if err != nil {
414
- return 0 , fmt .Errorf ("could not get CSINode limit for driver %s: %w" , driverInfo . Name , err )
417
+ return 0 , fmt .Errorf ("could not get CSINode limit for driver %s: %w" , driverName , err )
415
418
}
416
419
return limit , nil
417
420
}
0 commit comments