@@ -323,7 +323,35 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
323
323
324
324
// Test access to the volume from pods on different node
325
325
TestConcurrentAccessToSingleVolume (l .config .Framework , l .cs , l .ns .Name ,
326
- l .config .ClientNodeSelection , resource .Pvc , numPods , true /* sameNode */ )
326
+ l .config .ClientNodeSelection , resource .Pvc , numPods , true /* sameNode */ , false /* readOnly */ )
327
+ })
328
+
329
+ // This tests below configuration:
330
+ // [pod1] [pod2]
331
+ // [ node1 ]
332
+ // \ / <- same volume mode (read only)
333
+ // [volume1]
334
+ ginkgo .It ("should concurrently access the single read-only volume from pods on the same node" , func () {
335
+ init ()
336
+ defer cleanup ()
337
+
338
+ numPods := 2
339
+
340
+ if ! l .driver .GetDriverInfo ().Capabilities [CapMultiPODs ] {
341
+ e2eskipper .Skipf ("Driver %q does not support multiple concurrent pods - skipping" , dInfo .Name )
342
+ }
343
+
344
+ // Create volume
345
+ testVolumeSizeRange := t .GetTestSuiteInfo ().SupportedSizeRange
346
+ resource := CreateVolumeResource (l .driver , l .config , pattern , testVolumeSizeRange )
347
+ l .resources = append (l .resources , resource )
348
+
349
+ // Initialize the volume with a filesystem - it's going to be mounted as read-only below.
350
+ initializeVolume (l .cs , l .ns .Name , resource .Pvc , l .config .ClientNodeSelection )
351
+
352
+ // Test access to the volume from pods on a single node
353
+ TestConcurrentAccessToSingleVolume (l .config .Framework , l .cs , l .ns .Name ,
354
+ l .config .ClientNodeSelection , resource .Pvc , numPods , true /* sameNode */ , true /* readOnly */ )
327
355
})
328
356
329
357
// This tests below configuration:
@@ -365,7 +393,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
365
393
366
394
// Test access to the volume from pods on different node
367
395
TestConcurrentAccessToSingleVolume (l .config .Framework , l .cs , l .ns .Name ,
368
- l .config .ClientNodeSelection , resource .Pvc , numPods , false /* sameNode */ )
396
+ l .config .ClientNodeSelection , resource .Pvc , numPods , false /* sameNode */ , false /* readOnly */ )
369
397
})
370
398
}
371
399
@@ -443,7 +471,8 @@ func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs cli
443
471
// pod deletion doesn't affect. Pods are deployed on the same node or different nodes depending on requiresSameNode.
444
472
// Read/write check are done across pod, by check reading both what pod{n-1} and pod{n} wrote from pod{n}.
445
473
func TestConcurrentAccessToSingleVolume (f * framework.Framework , cs clientset.Interface , ns string ,
446
- node e2epod.NodeSelection , pvc * v1.PersistentVolumeClaim , numPods int , requiresSameNode bool ) {
474
+ node e2epod.NodeSelection , pvc * v1.PersistentVolumeClaim , numPods int , requiresSameNode bool ,
475
+ readOnly bool ) {
447
476
448
477
var pods []* v1.Pod
449
478
@@ -456,6 +485,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
456
485
PVCs : []* v1.PersistentVolumeClaim {pvc },
457
486
SeLinuxLabel : e2epv .SELinuxLabel ,
458
487
NodeSelection : node ,
488
+ PVCsReadOnly : readOnly ,
459
489
}
460
490
pod , err := e2epod .CreateSecPodWithNodeSelection (cs , & podConfig , framework .PodStartTimeout )
461
491
defer func () {
@@ -484,6 +514,11 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
484
514
ginkgo .By (fmt .Sprintf ("Checking if the volume in pod%d exists as expected volume mode (%s)" , index , * pvc .Spec .VolumeMode ))
485
515
utils .CheckVolumeModeOfPath (f , pod , * pvc .Spec .VolumeMode , path )
486
516
517
+ if readOnly {
518
+ ginkgo .By ("Skipping volume content checks, volume is read-only" )
519
+ continue
520
+ }
521
+
487
522
if i != 0 {
488
523
ginkgo .By (fmt .Sprintf ("From pod%d, checking if reading the data that pod%d write works properly" , index , index - 1 ))
489
524
// For 1st pod, no one has written data yet, so pass the read check
@@ -515,6 +550,11 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
515
550
ginkgo .By (fmt .Sprintf ("Rechecking if the volume in pod%d exists as expected volume mode (%s)" , index , * pvc .Spec .VolumeMode ))
516
551
utils .CheckVolumeModeOfPath (f , pod , * pvc .Spec .VolumeMode , "/mnt/volume1" )
517
552
553
+ if readOnly {
554
+ ginkgo .By ("Skipping volume content checks, volume is read-only" )
555
+ continue
556
+ }
557
+
518
558
if i == 0 {
519
559
// This time there should be data that last pod wrote, for 1st pod
520
560
ginkgo .By (fmt .Sprintf ("From pod%d, rechecking if reading the data that last pod write works properly" , index ))
@@ -586,3 +626,26 @@ func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, nodes *v1.N
586
626
587
627
return nil
588
628
}
629
+
630
+ // initializeVolume creates a filesystem on given volume, so it can be used as read-only later
631
+ func initializeVolume (cs clientset.Interface , ns string , pvc * v1.PersistentVolumeClaim , node e2epod.NodeSelection ) {
632
+ if pvc .Spec .VolumeMode != nil && * pvc .Spec .VolumeMode == v1 .PersistentVolumeBlock {
633
+ // Block volumes do not need to be initialized.
634
+ return
635
+ }
636
+
637
+ ginkgo .By (fmt .Sprintf ("Initializing a filesystem on PVC %s" , pvc .Name ))
638
+ // Just create a pod with the volume as read-write. Kubernetes will create a filesystem there
639
+ // if it does not exist yet.
640
+ podConfig := e2epod.Config {
641
+ NS : ns ,
642
+ PVCs : []* v1.PersistentVolumeClaim {pvc },
643
+ SeLinuxLabel : e2epv .SELinuxLabel ,
644
+ NodeSelection : node ,
645
+ }
646
+ pod , err := e2epod .CreateSecPod (cs , & podConfig , framework .PodStartTimeout )
647
+ defer func () {
648
+ framework .ExpectNoError (e2epod .DeletePodWithWait (cs , pod ))
649
+ }()
650
+ framework .ExpectNoError (err )
651
+ }
0 commit comments