Skip to content

Commit ef42d82

Browse files
authored
Merge pull request kubernetes#75796 from msau42/fix-e2e-multizone
Switch PD tests to use delayed binding
2 parents aad83d1 + 634be03 commit ef42d82

File tree

8 files changed

+166
-107
lines changed

8 files changed

+166
-107
lines changed

test/e2e/framework/pv_util.go

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ import (
2121
"time"
2222

2323
. "github.com/onsi/ginkgo"
24-
"k8s.io/api/core/v1"
24+
v1 "k8s.io/api/core/v1"
2525
apierrs "k8s.io/apimachinery/pkg/api/errors"
2626
"k8s.io/apimachinery/pkg/api/resource"
2727
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -1007,3 +1007,15 @@ func CreatePVSource(zone string) (*v1.PersistentVolumeSource, error) {
10071007
func DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
10081008
return TestContext.CloudConfig.Provider.DeletePVSource(pvSource)
10091009
}
1010+
1011+
func GetBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
1012+
// Get new copy of the claim
1013+
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
1014+
if err != nil {
1015+
return nil, err
1016+
}
1017+
1018+
// Get the bound PV
1019+
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
1020+
return pv, err
1021+
}

test/e2e/storage/csi_volumes.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
153153
if delayBinding {
154154
test.TestBindingWaitForFirstConsumer(nodeSelector, true /* expect unschedulable */)
155155
} else {
156-
test.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
156+
test.PvCheck = func(claim *v1.PersistentVolumeClaim) {
157157
// Ensure that a pod cannot be scheduled in an unsuitable zone.
158158
pod := testsuites.StartInPodWithVolume(cs, namespace, claim.Name, "pvc-tester-unschedulable", "sleep 100000",
159159
framework.NodeSelection{Selector: nodeSelector})

test/e2e/storage/drivers/csi.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -381,8 +381,9 @@ func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerT
381381
if fsType != "" {
382382
parameters["csi.storage.k8s.io/fstype"] = fsType
383383
}
384+
delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
384385

385-
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
386+
return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
386387
}
387388

388389
func (g *gcePDCSIDriver) GetClaimSize() string {

test/e2e/storage/drivers/in_tree.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1186,8 +1186,9 @@ func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTest
11861186
}
11871187
ns := config.Framework.Namespace.Name
11881188
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
1189+
delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
11891190

1190-
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
1191+
return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
11911192
}
11921193

11931194
func (h *gcePdDriver) GetClaimSize() string {

test/e2e/storage/regional_pd.go

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -108,14 +108,15 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
108108
},
109109
ClaimSize: repdMinSize,
110110
ExpectedSize: repdMinSize,
111-
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
112-
var err error
113-
err = checkGCEPD(volume, "pd-standard")
111+
PvCheck: func(claim *v1.PersistentVolumeClaim) {
112+
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
113+
Expect(volume).NotTo(BeNil())
114+
115+
err := checkGCEPD(volume, "pd-standard")
114116
Expect(err).NotTo(HaveOccurred(), "checkGCEPD")
115117
err = verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */)
116118
Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV")
117119

118-
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
119120
},
120121
},
121122
{
@@ -128,16 +129,16 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
128129
},
129130
ClaimSize: repdMinSize,
130131
ExpectedSize: repdMinSize,
131-
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
132-
var err error
133-
err = checkGCEPD(volume, "pd-standard")
132+
PvCheck: func(claim *v1.PersistentVolumeClaim) {
133+
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
134+
Expect(volume).NotTo(BeNil())
135+
136+
err := checkGCEPD(volume, "pd-standard")
134137
Expect(err).NotTo(HaveOccurred(), "checkGCEPD")
135138
zones, err := framework.GetClusterZones(c)
136139
Expect(err).NotTo(HaveOccurred(), "GetClusterZones")
137140
err = verifyZonesInPV(volume, zones, false /* match */)
138141
Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV")
139-
140-
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
141142
},
142143
},
143144
}

test/e2e/storage/testsuites/base.go

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ import (
2525
. "github.com/onsi/ginkgo"
2626
. "github.com/onsi/gomega"
2727

28-
"k8s.io/api/core/v1"
28+
v1 "k8s.io/api/core/v1"
2929
storagev1 "k8s.io/api/storage/v1"
3030
apierrs "k8s.io/apimachinery/pkg/api/errors"
3131
"k8s.io/apimachinery/pkg/api/resource"
@@ -325,14 +325,19 @@ func createVolumeSourceWithPVCPVFromDynamicProvisionSC(
325325
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
326326
Expect(err).NotTo(HaveOccurred())
327327

328-
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
329-
Expect(err).NotTo(HaveOccurred())
328+
if !isDelayedBinding(sc) {
329+
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
330+
Expect(err).NotTo(HaveOccurred())
331+
}
330332

331333
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
332334
Expect(err).NotTo(HaveOccurred())
333335

334-
pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
335-
Expect(err).NotTo(HaveOccurred())
336+
var pv *v1.PersistentVolume
337+
if !isDelayedBinding(sc) {
338+
pv, err = cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
339+
Expect(err).NotTo(HaveOccurred())
340+
}
336341

337342
volSource := &v1.VolumeSource{
338343
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
@@ -343,6 +348,13 @@ func createVolumeSourceWithPVCPVFromDynamicProvisionSC(
343348
return volSource, pv, pvc
344349
}
345350

351+
func isDelayedBinding(sc *storagev1.StorageClass) bool {
352+
if sc.VolumeBindingMode != nil {
353+
return *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer
354+
}
355+
return false
356+
}
357+
346358
func getClaim(claimSize string, ns string) *v1.PersistentVolumeClaim {
347359
claim := v1.PersistentVolumeClaim{
348360
ObjectMeta: metav1.ObjectMeta{

test/e2e/storage/testsuites/provisioning.go

Lines changed: 80 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,9 @@ import (
2323
. "github.com/onsi/ginkgo"
2424
. "github.com/onsi/gomega"
2525

26-
"k8s.io/api/core/v1"
26+
v1 "k8s.io/api/core/v1"
2727
storage "k8s.io/api/storage/v1"
28+
storagev1 "k8s.io/api/storage/v1"
2829
apierrs "k8s.io/apimachinery/pkg/api/errors"
2930
"k8s.io/apimachinery/pkg/api/resource"
3031
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -50,7 +51,7 @@ type StorageClassTest struct {
5051
DelayBinding bool
5152
ClaimSize string
5253
ExpectedSize string
53-
PvCheck func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume)
54+
PvCheck func(claim *v1.PersistentVolumeClaim)
5455
VolumeMode *v1.PersistentVolumeMode
5556
AllowVolumeExpansion bool
5657
}
@@ -146,6 +147,9 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
146147
init()
147148
defer cleanup()
148149

150+
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
151+
PVWriteReadSingleNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName})
152+
}
149153
l.testCase.TestDynamicProvisioning()
150154
})
151155

@@ -158,6 +162,9 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
158162
defer cleanup()
159163

160164
l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
165+
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
166+
PVWriteReadSingleNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName})
167+
}
161168
l.testCase.TestDynamicProvisioning()
162169
})
163170

@@ -178,23 +185,9 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
178185
if len(nodes.Items) <= 1 {
179186
framework.Skipf("need more than one node - skipping")
180187
}
181-
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
182-
PVMultiNodeCheck(l.cs, claim, volume, framework.NodeSelection{Name: l.config.ClientNodeName})
183-
}
184-
l.testCase.TestDynamicProvisioning()
185-
})
186-
187-
It("should create and delete block persistent volumes", func() {
188-
if !dInfo.Capabilities[CapBlock] {
189-
framework.Skipf("Driver %q does not support BlockVolume - skipping", dInfo.Name)
188+
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
189+
PVMultiNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName})
190190
}
191-
192-
init()
193-
defer cleanup()
194-
195-
block := v1.PersistentVolumeBlock
196-
l.testCase.VolumeMode = &block
197-
l.pvc.Spec.VolumeMode = &block
198191
l.testCase.TestDynamicProvisioning()
199192
})
200193

@@ -217,7 +210,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
217210
defer cleanupFunc()
218211

219212
l.pvc.Spec.DataSource = dataSource
220-
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
213+
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
221214
By("checking whether the created volume has the pre-populated data")
222215
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
223216
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, framework.NodeSelection{Name: l.config.ClientNodeName})
@@ -261,16 +254,39 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
261254
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
262255
}
263256
}()
264-
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
265-
Expect(err).NotTo(HaveOccurred())
266257

267-
By("checking the claim")
268-
// Get new copy of the claim
269-
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
258+
// Run the checker
259+
if t.PvCheck != nil {
260+
t.PvCheck(claim)
261+
}
262+
263+
pv := t.checkProvisioning(client, claim, class)
264+
265+
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
266+
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
267+
268+
// Wait for the PV to get deleted if reclaim policy is Delete. (If it's
269+
// Retain, there's no use waiting because the PV won't be auto-deleted and
270+
// it's expected for the caller to do it.) Technically, the first few delete
271+
// attempts may fail, as the volume is still attached to a node because
272+
// kubelet is slowly cleaning up the previous pod, however it should succeed
273+
// in a couple of minutes. Wait 20 minutes to recover from random cloud
274+
// hiccups.
275+
if pv != nil && pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
276+
By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
277+
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
278+
}
279+
280+
return pv
281+
}
282+
283+
// checkProvisioning verifies that the claim is bound and has the correct properities
284+
func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
285+
err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
270286
Expect(err).NotTo(HaveOccurred())
271287

272-
// Get the bound PV
273-
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
288+
By("checking the claim")
289+
pv, err := framework.GetBoundPV(client, claim)
274290
Expect(err).NotTo(HaveOccurred())
275291

276292
// Check sizes
@@ -284,8 +300,20 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
284300

285301
// Check PV properties
286302
By("checking the PV")
287-
expectedAccessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
288-
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
303+
304+
// Every access mode in PV should be in PVC
305+
Expect(pv.Spec.AccessModes).NotTo(BeZero())
306+
for _, pvMode := range pv.Spec.AccessModes {
307+
found := false
308+
for _, pvcMode := range claim.Spec.AccessModes {
309+
if pvMode == pvcMode {
310+
found = true
311+
break
312+
}
313+
}
314+
Expect(found).To(BeTrue())
315+
}
316+
289317
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
290318
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))
291319
if class == nil {
@@ -294,35 +322,15 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
294322
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(*class.ReclaimPolicy))
295323
Expect(pv.Spec.MountOptions).To(Equal(class.MountOptions))
296324
}
297-
if t.VolumeMode != nil {
325+
if claim.Spec.VolumeMode != nil {
298326
Expect(pv.Spec.VolumeMode).NotTo(BeNil())
299-
Expect(*pv.Spec.VolumeMode).To(Equal(*t.VolumeMode))
300-
}
301-
302-
// Run the checker
303-
if t.PvCheck != nil {
304-
t.PvCheck(claim, pv)
305-
}
306-
307-
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
308-
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
309-
310-
// Wait for the PV to get deleted if reclaim policy is Delete. (If it's
311-
// Retain, there's no use waiting because the PV won't be auto-deleted and
312-
// it's expected for the caller to do it.) Technically, the first few delete
313-
// attempts may fail, as the volume is still attached to a node because
314-
// kubelet is slowly cleaning up the previous pod, however it should succeed
315-
// in a couple of minutes. Wait 20 minutes to recover from random cloud
316-
// hiccups.
317-
if pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
318-
By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
319-
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
327+
Expect(*pv.Spec.VolumeMode).To(Equal(*claim.Spec.VolumeMode))
320328
}
321-
322329
return pv
323330
}
324331

325-
// PVWriteReadSingleNodeCheck checks that a PV retains data on a single node.
332+
// PVWriteReadSingleNodeCheck checks that a PV retains data on a single node
333+
// and returns the PV.
326334
//
327335
// It starts two pods:
328336
// - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node.
@@ -335,16 +343,9 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
335343
// persistent across pods.
336344
//
337345
// This is a common test that can be called from a StorageClassTest.PvCheck.
338-
func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node framework.NodeSelection) {
339-
By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node))
346+
func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) *v1.PersistentVolume {
347+
By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
340348
command := "echo 'hello world' > /mnt/test/data"
341-
// We give the first pod the secondary responsibility of checking the volume has
342-
// been mounted with the PV's mount options, if the PV was provisioned with any
343-
for _, option := range volume.Spec.MountOptions {
344-
// Get entry, get mount options at 6th word, replace brackets with commas
345-
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
346-
}
347-
command += " || (mount | grep 'on /mnt/test'; false)"
348349
pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
349350
defer func() {
350351
// pod might be nil now.
@@ -357,9 +358,24 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
357358
StopPod(client, pod)
358359
pod = nil // Don't stop twice.
359360

360-
By(fmt.Sprintf("checking the created volume is readable and retains data on the same node %q", actualNodeName))
361+
// Get a new copy of the PV
362+
volume, err := framework.GetBoundPV(client, claim)
363+
Expect(err).NotTo(HaveOccurred())
364+
365+
By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))
361366
command = "grep 'hello world' /mnt/test/data"
367+
368+
// We give the second pod the additional responsibility of checking the volume has
369+
// been mounted with the PV's mount options, if the PV was provisioned with any
370+
for _, option := range volume.Spec.MountOptions {
371+
// Get entry, get mount options at 6th word, replace brackets with commas
372+
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
373+
}
374+
command += " || (mount | grep 'on /mnt/test'; false)"
375+
362376
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, framework.NodeSelection{Name: actualNodeName})
377+
378+
return volume
363379
}
364380

365381
// PVMultiNodeCheck checks that a PV retains data when moved between nodes.
@@ -376,7 +392,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
376392
// persistent across pods and across nodes.
377393
//
378394
// This is a common test that can be called from a StorageClassTest.PvCheck.
379-
func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node framework.NodeSelection) {
395+
func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) {
380396
Expect(node.Name).To(Equal(""), "this test only works when not locked onto a single node")
381397

382398
var pod *v1.Pod
@@ -385,7 +401,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
385401
StopPod(client, pod)
386402
}()
387403

388-
By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node))
404+
By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
389405
command := "echo 'hello world' > /mnt/test/data"
390406
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
391407
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))

0 commit comments

Comments
 (0)