Skip to content

Commit 3ab0c84

Browse files
authored
Merge pull request kubernetes#126978 from huww98/e2e-storage-multi-name
e2e/storage: get driver name from storage class
2 parents 856475e + d620b10 commit 3ab0c84

File tree

1 file changed

+48
-45
lines changed

1 file changed

+48
-45
lines changed

test/e2e/storage/testsuites/volumelimits.go

Lines changed: 48 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424
"time"
2525

2626
"github.com/onsi/ginkgo/v2"
27+
"github.com/onsi/gomega"
2728

2829
v1 "k8s.io/api/core/v1"
2930
storagev1 "k8s.io/api/storage/v1"
@@ -39,6 +40,7 @@ import (
3940
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
4041
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
4142
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
43+
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
4244
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
4345
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
4446
admissionapi "k8s.io/pod-security-admission/api"
@@ -86,6 +88,13 @@ func (t *volumeLimitsTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInf
8688
}
8789

8890
func (t *volumeLimitsTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
91+
if pattern.VolType != storageframework.DynamicPV {
92+
e2eskipper.Skipf("Suite %q does not support %v", t.tsInfo.Name, pattern.VolType)
93+
}
94+
dInfo := driver.GetDriverInfo()
95+
if !dInfo.Capabilities[storageframework.CapVolumeLimits] {
96+
e2eskipper.Skipf("Driver %s does not support volume limits", dInfo.Name)
97+
}
8998
}
9099

91100
func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
@@ -104,17 +113,23 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
104113
podNames []string
105114

106115
// All created PVs, incl. the one in resource
107-
pvNames sets.String
116+
pvNames sets.Set[string]
108117
}
109118
var (
110119
l local
120+
121+
dDriver storageframework.DynamicPVTestDriver
111122
)
112123

113124
// Beware that it also registers an AfterEach which renders f unusable. Any code using
114125
// f must run inside an It or Context callback.
115126
f := framework.NewFrameworkWithCustomTimeouts("volumelimits", storageframework.GetDriverTimeouts(driver))
116127
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
117128

129+
ginkgo.BeforeEach(func() {
130+
dDriver = driver.(storageframework.DynamicPVTestDriver)
131+
})
132+
118133
// This checks that CSIMaxVolumeLimitChecker works as expected.
119134
// A randomly chosen node should be able to handle as many CSI volumes as
120135
// it claims to handle in CSINode.Spec.Drivers[x].Allocatable.
@@ -125,14 +140,6 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
125140
// BEWARE: the test may create lot of volumes and it's really slow.
126141
f.It("should support volume limits", f.WithSerial(), func(ctx context.Context) {
127142
driverInfo := driver.GetDriverInfo()
128-
if !driverInfo.Capabilities[storageframework.CapVolumeLimits] {
129-
ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name))
130-
}
131-
var dDriver storageframework.DynamicPVTestDriver
132-
if dDriver = driver.(storageframework.DynamicPVTestDriver); dDriver == nil {
133-
framework.Failf("Test driver does not provide dynamically created volumes")
134-
}
135-
136143
l.ns = f.Namespace
137144
l.cs = f.ClientSet
138145

@@ -150,7 +157,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
150157
framework.Logf("Selected node %s", nodeName)
151158

152159
ginkgo.By("Checking node limits")
153-
limit, err := getNodeLimits(ctx, l.cs, l.config, nodeName, driverInfo)
160+
limit, err := getNodeLimits(ctx, l.cs, l.config, nodeName, dDriver)
154161
framework.ExpectNoError(err)
155162

156163
framework.Logf("Node %s can handle %d volumes of driver %s", nodeName, limit, driverInfo.Name)
@@ -204,8 +211,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
204211
}
205212

206213
ginkgo.By("Waiting for all PVCs to get Bound")
207-
l.pvNames, err = waitForAllPVCsBound(ctx, l.cs, testSlowMultiplier*f.Timeouts.PVBound, l.ns.Name, l.pvcNames)
208-
framework.ExpectNoError(err)
214+
l.pvNames = waitForAllPVCsBound(ctx, l.cs, testSlowMultiplier*f.Timeouts.PVBound, l.ns.Name, l.pvcNames)
209215

210216
ginkgo.By("Waiting for the pod(s) running")
211217
for _, podName := range l.podNames {
@@ -232,7 +238,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
232238
}
233239
}
234240
if pod.Status.Phase != v1.PodPending {
235-
return true, fmt.Errorf("Expected pod to be in phase Pending, but got phase: %v", pod.Status.Phase)
241+
return true, fmt.Errorf("expected pod to be in phase Pending, but got phase: %v", pod.Status.Phase)
236242
}
237243
return false, nil
238244
})
@@ -265,15 +271,15 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
265271

266272
for _, nodeName := range nodeNames {
267273
ginkgo.By("Checking csinode limits")
268-
_, err := getNodeLimits(ctx, l.cs, l.config, nodeName, driverInfo)
274+
_, err := getNodeLimits(ctx, l.cs, l.config, nodeName, dDriver)
269275
if err != nil {
270276
framework.Failf("Expected volume limits to be set, error: %v", err)
271277
}
272278
}
273279
})
274280
}
275281

276-
func cleanupTest(ctx context.Context, cs clientset.Interface, ns string, podNames, pvcNames []string, pvNames sets.String, timeout time.Duration) error {
282+
func cleanupTest(ctx context.Context, cs clientset.Interface, ns string, podNames, pvcNames []string, pvNames sets.Set[string], timeout time.Duration) error {
277283
var cleanupErrors []string
278284
for _, podName := range podNames {
279285
err := cs.CoreV1().Pods(ns).Delete(ctx, podName, metav1.DeleteOptions{})
@@ -290,7 +296,7 @@ func cleanupTest(ctx context.Context, cs clientset.Interface, ns string, podName
290296
// Wait for the PVs to be deleted. It includes also pod and PVC deletion because of PVC protection.
291297
// We use PVs to make sure that the test does not leave orphan PVs when a CSI driver is destroyed
292298
// just after the test ends.
293-
err := wait.Poll(5*time.Second, timeout, func() (bool, error) {
299+
err := wait.PollUntilContextTimeout(ctx, 5*time.Second, timeout, false, func(ctx context.Context) (bool, error) {
294300
existing := 0
295301
for _, pvName := range pvNames.UnsortedList() {
296302
_, err := cs.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
@@ -320,48 +326,45 @@ func cleanupTest(ctx context.Context, cs clientset.Interface, ns string, podName
320326
}
321327

322328
// waitForAllPVCsBound waits until the given PVCs are all bound. It then returns the bound PVC names as a set.
323-
func waitForAllPVCsBound(ctx context.Context, cs clientset.Interface, timeout time.Duration, ns string, pvcNames []string) (sets.String, error) {
324-
pvNames := sets.NewString()
325-
err := wait.Poll(5*time.Second, timeout, func() (bool, error) {
329+
func waitForAllPVCsBound(ctx context.Context, cs clientset.Interface, timeout time.Duration, ns string, pvcNames []string) sets.Set[string] {
330+
pvNames := sets.New[string]()
331+
gomega.Eventually(ctx, func() (int, error) {
326332
unbound := 0
327333
for _, pvcName := range pvcNames {
328334
pvc, err := cs.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvcName, metav1.GetOptions{})
329335
if err != nil {
330-
return false, err
336+
gomega.StopTrying("failed to fetch PVCs").Wrap(err).Now()
331337
}
332338
if pvc.Status.Phase != v1.ClaimBound {
333339
unbound++
334340
} else {
335341
pvNames.Insert(pvc.Spec.VolumeName)
336342
}
337343
}
338-
if unbound > 0 {
339-
framework.Logf("%d/%d of PVCs are Bound", pvNames.Len(), len(pvcNames))
340-
return false, nil
341-
}
342-
return true, nil
343-
})
344-
if err != nil {
345-
return nil, fmt.Errorf("error waiting for all PVCs to be bound: %w", err)
346-
}
347-
return pvNames, nil
344+
framework.Logf("%d/%d of PVCs are Bound", pvNames.Len(), len(pvcNames))
345+
return unbound, nil
346+
}).WithPolling(5*time.Second).WithTimeout(timeout).Should(gomega.BeZero(), "error waiting for all PVCs to be bound")
347+
return pvNames
348348
}
349349

350-
func getNodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) {
351-
if len(driverInfo.InTreePluginName) == 0 {
352-
return getCSINodeLimits(ctx, cs, config, nodeName, driverInfo)
350+
func getNodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driver storageframework.DynamicPVTestDriver) (int, error) {
351+
driverInfo := driver.GetDriverInfo()
352+
if len(driverInfo.InTreePluginName) > 0 {
353+
return getInTreeNodeLimits(ctx, cs, nodeName, driverInfo.InTreePluginName)
353354
}
354-
return getInTreeNodeLimits(ctx, cs, nodeName, driverInfo)
355+
356+
sc := driver.GetDynamicProvisionStorageClass(ctx, config, "")
357+
return getCSINodeLimits(ctx, cs, config, nodeName, sc.Provisioner)
355358
}
356359

357-
func getInTreeNodeLimits(ctx context.Context, cs clientset.Interface, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) {
360+
func getInTreeNodeLimits(ctx context.Context, cs clientset.Interface, nodeName, driverName string) (int, error) {
358361
node, err := cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
359362
if err != nil {
360363
return 0, err
361364
}
362365

363366
var allocatableKey string
364-
switch driverInfo.InTreePluginName {
367+
switch driverName {
365368
case migrationplugins.AWSEBSInTreePluginName:
366369
allocatableKey = volumeutil.EBSVolumeLimitKey
367370
case migrationplugins.GCEPDInTreePluginName:
@@ -371,47 +374,47 @@ func getInTreeNodeLimits(ctx context.Context, cs clientset.Interface, nodeName s
371374
case migrationplugins.AzureDiskInTreePluginName:
372375
allocatableKey = volumeutil.AzureVolumeLimitKey
373376
default:
374-
return 0, fmt.Errorf("Unknown in-tree volume plugin name: %s", driverInfo.InTreePluginName)
377+
return 0, fmt.Errorf("unknown in-tree volume plugin name: %s", driverName)
375378
}
376379

377380
limit, ok := node.Status.Allocatable[v1.ResourceName(allocatableKey)]
378381
if !ok {
379-
return 0, fmt.Errorf("Node %s does not contain status.allocatable[%s] for volume plugin %s", nodeName, allocatableKey, driverInfo.InTreePluginName)
382+
return 0, fmt.Errorf("node %s does not contain status.allocatable[%s] for volume plugin %s", nodeName, allocatableKey, driverName)
380383
}
381384
return int(limit.Value()), nil
382385
}
383386

384-
func getCSINodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) {
387+
func getCSINodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName, driverName string) (int, error) {
385388
// Retry with a timeout, the driver might just have been installed and kubelet takes a while to publish everything.
386389
var limit int
387-
err := wait.PollImmediate(2*time.Second, csiNodeInfoTimeout, func() (bool, error) {
390+
err := wait.PollUntilContextTimeout(ctx, 2*time.Second, csiNodeInfoTimeout, true, func(ctx context.Context) (bool, error) {
388391
csiNode, err := cs.StorageV1().CSINodes().Get(ctx, nodeName, metav1.GetOptions{})
389392
if err != nil {
390393
framework.Logf("%s", err)
391394
return false, nil
392395
}
393396
var csiDriver *storagev1.CSINodeDriver
394397
for i, c := range csiNode.Spec.Drivers {
395-
if c.Name == driverInfo.Name || c.Name == config.GetUniqueDriverName() {
398+
if c.Name == driverName || c.Name == config.GetUniqueDriverName() {
396399
csiDriver = &csiNode.Spec.Drivers[i]
397400
break
398401
}
399402
}
400403
if csiDriver == nil {
401-
framework.Logf("CSINodeInfo does not have driver %s yet", driverInfo.Name)
404+
framework.Logf("CSINodeInfo does not have driver %s yet", driverName)
402405
return false, nil
403406
}
404407
if csiDriver.Allocatable == nil {
405-
return false, fmt.Errorf("CSINodeInfo does not have Allocatable for driver %s", driverInfo.Name)
408+
return false, fmt.Errorf("CSINodeInfo does not have Allocatable for driver %s", driverName)
406409
}
407410
if csiDriver.Allocatable.Count == nil {
408-
return false, fmt.Errorf("CSINodeInfo does not have Allocatable.Count for driver %s", driverInfo.Name)
411+
return false, fmt.Errorf("CSINodeInfo does not have Allocatable.Count for driver %s", driverName)
409412
}
410413
limit = int(*csiDriver.Allocatable.Count)
411414
return true, nil
412415
})
413416
if err != nil {
414-
return 0, fmt.Errorf("could not get CSINode limit for driver %s: %w", driverInfo.Name, err)
417+
return 0, fmt.Errorf("could not get CSINode limit for driver %s: %w", driverName, err)
415418
}
416419
return limit, nil
417420
}

0 commit comments

Comments
 (0)