@@ -23,8 +23,9 @@ import (
23
23
. "github.com/onsi/ginkgo"
24
24
. "github.com/onsi/gomega"
25
25
26
- "k8s.io/api/core/v1"
26
+ v1 "k8s.io/api/core/v1"
27
27
storage "k8s.io/api/storage/v1"
28
+ storagev1 "k8s.io/api/storage/v1"
28
29
apierrs "k8s.io/apimachinery/pkg/api/errors"
29
30
"k8s.io/apimachinery/pkg/api/resource"
30
31
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -50,7 +51,7 @@ type StorageClassTest struct {
50
51
DelayBinding bool
51
52
ClaimSize string
52
53
ExpectedSize string
53
- PvCheck func (claim * v1.PersistentVolumeClaim , volume * v1. PersistentVolume )
54
+ PvCheck func (claim * v1.PersistentVolumeClaim )
54
55
VolumeMode * v1.PersistentVolumeMode
55
56
AllowVolumeExpansion bool
56
57
}
@@ -146,6 +147,9 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
146
147
init ()
147
148
defer cleanup ()
148
149
150
+ l .testCase .PvCheck = func (claim * v1.PersistentVolumeClaim ) {
151
+ PVWriteReadSingleNodeCheck (l .cs , claim , framework.NodeSelection {Name : l .config .ClientNodeName })
152
+ }
149
153
l .testCase .TestDynamicProvisioning ()
150
154
})
151
155
@@ -158,6 +162,9 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
158
162
defer cleanup ()
159
163
160
164
l .testCase .Class .MountOptions = dInfo .SupportedMountOption .Union (dInfo .RequiredMountOption ).List ()
165
+ l .testCase .PvCheck = func (claim * v1.PersistentVolumeClaim ) {
166
+ PVWriteReadSingleNodeCheck (l .cs , claim , framework.NodeSelection {Name : l .config .ClientNodeName })
167
+ }
161
168
l .testCase .TestDynamicProvisioning ()
162
169
})
163
170
@@ -178,23 +185,9 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
178
185
if len (nodes .Items ) <= 1 {
179
186
framework .Skipf ("need more than one node - skipping" )
180
187
}
181
- l .testCase .PvCheck = func (claim * v1.PersistentVolumeClaim , volume * v1.PersistentVolume ) {
182
- PVMultiNodeCheck (l .cs , claim , volume , framework.NodeSelection {Name : l .config .ClientNodeName })
183
- }
184
- l .testCase .TestDynamicProvisioning ()
185
- })
186
-
187
- It ("should create and delete block persistent volumes" , func () {
188
- if ! dInfo .Capabilities [CapBlock ] {
189
- framework .Skipf ("Driver %q does not support BlockVolume - skipping" , dInfo .Name )
188
+ l .testCase .PvCheck = func (claim * v1.PersistentVolumeClaim ) {
189
+ PVMultiNodeCheck (l .cs , claim , framework.NodeSelection {Name : l .config .ClientNodeName })
190
190
}
191
-
192
- init ()
193
- defer cleanup ()
194
-
195
- block := v1 .PersistentVolumeBlock
196
- l .testCase .VolumeMode = & block
197
- l .pvc .Spec .VolumeMode = & block
198
191
l .testCase .TestDynamicProvisioning ()
199
192
})
200
193
@@ -217,7 +210,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
217
210
defer cleanupFunc ()
218
211
219
212
l .pvc .Spec .DataSource = dataSource
220
- l .testCase .PvCheck = func (claim * v1.PersistentVolumeClaim , volume * v1. PersistentVolume ) {
213
+ l .testCase .PvCheck = func (claim * v1.PersistentVolumeClaim ) {
221
214
By ("checking whether the created volume has the pre-populated data" )
222
215
command := fmt .Sprintf ("grep '%s' /mnt/test/initialData" , claim .Namespace )
223
216
RunInPodWithVolume (l .cs , claim .Namespace , claim .Name , "pvc-snapshot-tester" , command , framework.NodeSelection {Name : l .config .ClientNodeName })
@@ -261,16 +254,39 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
261
254
framework .Failf ("Error deleting claim %q. Error: %v" , claim .Name , err )
262
255
}
263
256
}()
264
- err = framework .WaitForPersistentVolumeClaimPhase (v1 .ClaimBound , client , claim .Namespace , claim .Name , framework .Poll , framework .ClaimProvisionTimeout )
265
- Expect (err ).NotTo (HaveOccurred ())
266
257
267
- By ("checking the claim" )
268
- // Get new copy of the claim
269
- claim , err = client .CoreV1 ().PersistentVolumeClaims (claim .Namespace ).Get (claim .Name , metav1.GetOptions {})
258
+ // Run the checker
259
+ if t .PvCheck != nil {
260
+ t .PvCheck (claim )
261
+ }
262
+
263
+ pv := t .checkProvisioning (client , claim , class )
264
+
265
+ By (fmt .Sprintf ("deleting claim %q/%q" , claim .Namespace , claim .Name ))
266
+ framework .ExpectNoError (client .CoreV1 ().PersistentVolumeClaims (claim .Namespace ).Delete (claim .Name , nil ))
267
+
268
+ // Wait for the PV to get deleted if reclaim policy is Delete. (If it's
269
+ // Retain, there's no use waiting because the PV won't be auto-deleted and
270
+ // it's expected for the caller to do it.) Technically, the first few delete
271
+ // attempts may fail, as the volume is still attached to a node because
272
+ // kubelet is slowly cleaning up the previous pod, however it should succeed
273
+ // in a couple of minutes. Wait 20 minutes to recover from random cloud
274
+ // hiccups.
275
+ if pv != nil && pv .Spec .PersistentVolumeReclaimPolicy == v1 .PersistentVolumeReclaimDelete {
276
+ By (fmt .Sprintf ("deleting the claim's PV %q" , pv .Name ))
277
+ framework .ExpectNoError (framework .WaitForPersistentVolumeDeleted (client , pv .Name , 5 * time .Second , 20 * time .Minute ))
278
+ }
279
+
280
+ return pv
281
+ }
282
+
283
+ // checkProvisioning verifies that the claim is bound and has the correct properities
284
+ func (t StorageClassTest ) checkProvisioning (client clientset.Interface , claim * v1.PersistentVolumeClaim , class * storagev1.StorageClass ) * v1.PersistentVolume {
285
+ err := framework .WaitForPersistentVolumeClaimPhase (v1 .ClaimBound , client , claim .Namespace , claim .Name , framework .Poll , framework .ClaimProvisionTimeout )
270
286
Expect (err ).NotTo (HaveOccurred ())
271
287
272
- // Get the bound PV
273
- pv , err := client . CoreV1 (). PersistentVolumes (). Get ( claim . Spec . VolumeName , metav1. GetOptions {} )
288
+ By ( "checking the claim" )
289
+ pv , err := framework . GetBoundPV ( client , claim )
274
290
Expect (err ).NotTo (HaveOccurred ())
275
291
276
292
// Check sizes
@@ -284,8 +300,20 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
284
300
285
301
// Check PV properties
286
302
By ("checking the PV" )
287
- expectedAccessModes := []v1.PersistentVolumeAccessMode {v1 .ReadWriteOnce }
288
- Expect (pv .Spec .AccessModes ).To (Equal (expectedAccessModes ))
303
+
304
+ // Every access mode in PV should be in PVC
305
+ Expect (pv .Spec .AccessModes ).NotTo (BeZero ())
306
+ for _ , pvMode := range pv .Spec .AccessModes {
307
+ found := false
308
+ for _ , pvcMode := range claim .Spec .AccessModes {
309
+ if pvMode == pvcMode {
310
+ found = true
311
+ break
312
+ }
313
+ }
314
+ Expect (found ).To (BeTrue ())
315
+ }
316
+
289
317
Expect (pv .Spec .ClaimRef .Name ).To (Equal (claim .ObjectMeta .Name ))
290
318
Expect (pv .Spec .ClaimRef .Namespace ).To (Equal (claim .ObjectMeta .Namespace ))
291
319
if class == nil {
@@ -294,35 +322,15 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
294
322
Expect (pv .Spec .PersistentVolumeReclaimPolicy ).To (Equal (* class .ReclaimPolicy ))
295
323
Expect (pv .Spec .MountOptions ).To (Equal (class .MountOptions ))
296
324
}
297
- if t .VolumeMode != nil {
325
+ if claim . Spec .VolumeMode != nil {
298
326
Expect (pv .Spec .VolumeMode ).NotTo (BeNil ())
299
- Expect (* pv .Spec .VolumeMode ).To (Equal (* t .VolumeMode ))
300
- }
301
-
302
- // Run the checker
303
- if t .PvCheck != nil {
304
- t .PvCheck (claim , pv )
305
- }
306
-
307
- By (fmt .Sprintf ("deleting claim %q/%q" , claim .Namespace , claim .Name ))
308
- framework .ExpectNoError (client .CoreV1 ().PersistentVolumeClaims (claim .Namespace ).Delete (claim .Name , nil ))
309
-
310
- // Wait for the PV to get deleted if reclaim policy is Delete. (If it's
311
- // Retain, there's no use waiting because the PV won't be auto-deleted and
312
- // it's expected for the caller to do it.) Technically, the first few delete
313
- // attempts may fail, as the volume is still attached to a node because
314
- // kubelet is slowly cleaning up the previous pod, however it should succeed
315
- // in a couple of minutes. Wait 20 minutes to recover from random cloud
316
- // hiccups.
317
- if pv .Spec .PersistentVolumeReclaimPolicy == v1 .PersistentVolumeReclaimDelete {
318
- By (fmt .Sprintf ("deleting the claim's PV %q" , pv .Name ))
319
- framework .ExpectNoError (framework .WaitForPersistentVolumeDeleted (client , pv .Name , 5 * time .Second , 20 * time .Minute ))
327
+ Expect (* pv .Spec .VolumeMode ).To (Equal (* claim .Spec .VolumeMode ))
320
328
}
321
-
322
329
return pv
323
330
}
324
331
325
- // PVWriteReadSingleNodeCheck checks that a PV retains data on a single node.
332
+ // PVWriteReadSingleNodeCheck checks that a PV retains data on a single node
333
+ // and returns the PV.
326
334
//
327
335
// It starts two pods:
328
336
// - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node.
@@ -335,16 +343,9 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
335
343
// persistent across pods.
336
344
//
337
345
// This is a common test that can be called from a StorageClassTest.PvCheck.
338
- func PVWriteReadSingleNodeCheck (client clientset.Interface , claim * v1.PersistentVolumeClaim , volume * v1. PersistentVolume , node framework.NodeSelection ) {
339
- By (fmt .Sprintf ("checking the created volume is writable and has the PV's mount options on node %+v" , node ))
346
+ func PVWriteReadSingleNodeCheck (client clientset.Interface , claim * v1.PersistentVolumeClaim , node framework.NodeSelection ) * v1. PersistentVolume {
347
+ By (fmt .Sprintf ("checking the created volume is writable on node %+v" , node ))
340
348
command := "echo 'hello world' > /mnt/test/data"
341
- // We give the first pod the secondary responsibility of checking the volume has
342
- // been mounted with the PV's mount options, if the PV was provisioned with any
343
- for _ , option := range volume .Spec .MountOptions {
344
- // Get entry, get mount options at 6th word, replace brackets with commas
345
- command += fmt .Sprintf (" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )" , option )
346
- }
347
- command += " || (mount | grep 'on /mnt/test'; false)"
348
349
pod := StartInPodWithVolume (client , claim .Namespace , claim .Name , "pvc-volume-tester-writer" , command , node )
349
350
defer func () {
350
351
// pod might be nil now.
@@ -357,9 +358,24 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
357
358
StopPod (client , pod )
358
359
pod = nil // Don't stop twice.
359
360
360
- By (fmt .Sprintf ("checking the created volume is readable and retains data on the same node %q" , actualNodeName ))
361
+ // Get a new copy of the PV
362
+ volume , err := framework .GetBoundPV (client , claim )
363
+ Expect (err ).NotTo (HaveOccurred ())
364
+
365
+ By (fmt .Sprintf ("checking the created volume has the correct mount options, is readable and retains data on the same node %q" , actualNodeName ))
361
366
command = "grep 'hello world' /mnt/test/data"
367
+
368
+ // We give the second pod the additional responsibility of checking the volume has
369
+ // been mounted with the PV's mount options, if the PV was provisioned with any
370
+ for _ , option := range volume .Spec .MountOptions {
371
+ // Get entry, get mount options at 6th word, replace brackets with commas
372
+ command += fmt .Sprintf (" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )" , option )
373
+ }
374
+ command += " || (mount | grep 'on /mnt/test'; false)"
375
+
362
376
RunInPodWithVolume (client , claim .Namespace , claim .Name , "pvc-volume-tester-reader" , command , framework.NodeSelection {Name : actualNodeName })
377
+
378
+ return volume
363
379
}
364
380
365
381
// PVMultiNodeCheck checks that a PV retains data when moved between nodes.
@@ -376,7 +392,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
376
392
// persistent across pods and across nodes.
377
393
//
378
394
// This is a common test that can be called from a StorageClassTest.PvCheck.
379
- func PVMultiNodeCheck (client clientset.Interface , claim * v1.PersistentVolumeClaim , volume * v1. PersistentVolume , node framework.NodeSelection ) {
395
+ func PVMultiNodeCheck (client clientset.Interface , claim * v1.PersistentVolumeClaim , node framework.NodeSelection ) {
380
396
Expect (node .Name ).To (Equal ("" ), "this test only works when not locked onto a single node" )
381
397
382
398
var pod * v1.Pod
@@ -385,7 +401,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
385
401
StopPod (client , pod )
386
402
}()
387
403
388
- By (fmt .Sprintf ("checking the created volume is writable and has the PV's mount options on node %+v" , node ))
404
+ By (fmt .Sprintf ("checking the created volume is writable on node %+v" , node ))
389
405
command := "echo 'hello world' > /mnt/test/data"
390
406
pod = StartInPodWithVolume (client , claim .Namespace , claim .Name , "pvc-writer-node1" , command , node )
391
407
framework .ExpectNoError (framework .WaitForPodSuccessInNamespaceSlow (client , pod .Name , pod .Namespace ))
0 commit comments