@@ -322,7 +322,35 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
322
322
323
323
// Test access to the volume from pods on different node
324
324
TestConcurrentAccessToSingleVolume (l .config .Framework , l .cs , l .ns .Name ,
325
- l .config .ClientNodeSelection , resource .Pvc , numPods , true /* sameNode */ )
325
+ l .config .ClientNodeSelection , resource .Pvc , numPods , true /* sameNode */ , false /* readOnly */ )
326
+ })
327
+
328
+ // This tests below configuration:
329
+ // [pod1] [pod2]
330
+ // [ node1 ]
331
+ // \ / <- same volume mode (read only)
332
+ // [volume1]
333
+ ginkgo .It ("should concurrently access the single read-only volume from pods on the same node" , func () {
334
+ init ()
335
+ defer cleanup ()
336
+
337
+ numPods := 2
338
+
339
+ if ! l .driver .GetDriverInfo ().Capabilities [CapMultiPODs ] {
340
+ e2eskipper .Skipf ("Driver %q does not support multiple concurrent pods - skipping" , dInfo .Name )
341
+ }
342
+
343
+ // Create volume
344
+ testVolumeSizeRange := t .GetTestSuiteInfo ().SupportedSizeRange
345
+ resource := CreateVolumeResource (l .driver , l .config , pattern , testVolumeSizeRange )
346
+ l .resources = append (l .resources , resource )
347
+
348
+ // Initialize the volume with a filesystem - it's going to be mounted as read-only below.
349
+ initializeVolume (l .cs , l .ns .Name , resource .Pvc , l .config .ClientNodeSelection )
350
+
351
+ // Test access to the volume from pods on a single node
352
+ TestConcurrentAccessToSingleVolume (l .config .Framework , l .cs , l .ns .Name ,
353
+ l .config .ClientNodeSelection , resource .Pvc , numPods , true /* sameNode */ , true /* readOnly */ )
326
354
})
327
355
328
356
// This tests below configuration:
@@ -364,7 +392,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
364
392
365
393
// Test access to the volume from pods on different node
366
394
TestConcurrentAccessToSingleVolume (l .config .Framework , l .cs , l .ns .Name ,
367
- l .config .ClientNodeSelection , resource .Pvc , numPods , false /* sameNode */ )
395
+ l .config .ClientNodeSelection , resource .Pvc , numPods , false /* sameNode */ , false /* readOnly */ )
368
396
})
369
397
}
370
398
@@ -442,7 +470,8 @@ func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs cli
442
470
// pod deletion doesn't affect. Pods are deployed on the same node or different nodes depending on requiresSameNode.
443
471
// Read/write check are done across pod, by check reading both what pod{n-1} and pod{n} wrote from pod{n}.
444
472
func TestConcurrentAccessToSingleVolume (f * framework.Framework , cs clientset.Interface , ns string ,
445
- node e2epod.NodeSelection , pvc * v1.PersistentVolumeClaim , numPods int , requiresSameNode bool ) {
473
+ node e2epod.NodeSelection , pvc * v1.PersistentVolumeClaim , numPods int , requiresSameNode bool ,
474
+ readOnly bool ) {
446
475
447
476
var pods []* v1.Pod
448
477
@@ -455,6 +484,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
455
484
PVCs : []* v1.PersistentVolumeClaim {pvc },
456
485
SeLinuxLabel : e2epv .SELinuxLabel ,
457
486
NodeSelection : node ,
487
+ PVCsReadOnly : readOnly ,
458
488
}
459
489
pod , err := e2epod .CreateSecPodWithNodeSelection (cs , & podConfig , framework .PodStartTimeout )
460
490
defer func () {
@@ -483,6 +513,11 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
483
513
ginkgo .By (fmt .Sprintf ("Checking if the volume in pod%d exists as expected volume mode (%s)" , index , * pvc .Spec .VolumeMode ))
484
514
utils .CheckVolumeModeOfPath (f , pod , * pvc .Spec .VolumeMode , path )
485
515
516
+ if readOnly {
517
+ ginkgo .By ("Skipping volume content checks, volume is read-only" )
518
+ continue
519
+ }
520
+
486
521
if i != 0 {
487
522
ginkgo .By (fmt .Sprintf ("From pod%d, checking if reading the data that pod%d write works properly" , index , index - 1 ))
488
523
// For 1st pod, no one has written data yet, so pass the read check
@@ -514,6 +549,11 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
514
549
ginkgo .By (fmt .Sprintf ("Rechecking if the volume in pod%d exists as expected volume mode (%s)" , index , * pvc .Spec .VolumeMode ))
515
550
utils .CheckVolumeModeOfPath (f , pod , * pvc .Spec .VolumeMode , "/mnt/volume1" )
516
551
552
+ if readOnly {
553
+ ginkgo .By ("Skipping volume content checks, volume is read-only" )
554
+ continue
555
+ }
556
+
517
557
if i == 0 {
518
558
// This time there should be data that last pod wrote, for 1st pod
519
559
ginkgo .By (fmt .Sprintf ("From pod%d, rechecking if reading the data that last pod write works properly" , index ))
@@ -585,3 +625,26 @@ func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, nodes *v1.N
585
625
586
626
return nil
587
627
}
628
+
629
+ // initializeVolume creates a filesystem on given volume, so it can be used as read-only later
630
+ func initializeVolume (cs clientset.Interface , ns string , pvc * v1.PersistentVolumeClaim , node e2epod.NodeSelection ) {
631
+ if pvc .Spec .VolumeMode != nil && * pvc .Spec .VolumeMode == v1 .PersistentVolumeBlock {
632
+ // Block volumes do not need to be initialized.
633
+ return
634
+ }
635
+
636
+ ginkgo .By (fmt .Sprintf ("Initializing a filesystem on PVC %s" , pvc .Name ))
637
+ // Just create a pod with the volume as read-write. Kubernetes will create a filesystem there
638
+ // if it does not exist yet.
639
+ podConfig := e2epod.Config {
640
+ NS : ns ,
641
+ PVCs : []* v1.PersistentVolumeClaim {pvc },
642
+ SeLinuxLabel : e2epv .SELinuxLabel ,
643
+ NodeSelection : node ,
644
+ }
645
+ pod , err := e2epod .CreateSecPod (cs , & podConfig , framework .PodStartTimeout )
646
+ defer func () {
647
+ framework .ExpectNoError (e2epod .DeletePodWithWait (cs , pod ))
648
+ }()
649
+ framework .ExpectNoError (err )
650
+ }
0 commit comments