Skip to content

Commit 62b7516

Browse files
committed
Add e2e test for read-only volume used by multiple pods
1 parent b93e9d9 commit 62b7516

File tree

2 files changed

+68
-4
lines changed

2 files changed

+68
-4
lines changed

test/e2e/framework/pod/create.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ var (
3838
type Config struct {
3939
NS string
4040
PVCs []*v1.PersistentVolumeClaim
41+
PVCsReadOnly bool
4142
InlineVolumeSources []*v1.VolumeSource
4243
IsPrivileged bool
4344
Command string
@@ -224,7 +225,7 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
224225
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
225226
}
226227

227-
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
228+
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: podConfig.PVCsReadOnly}}}
228229
volumeIndex++
229230
}
230231
for _, src := range podConfig.InlineVolumeSources {

test/e2e/storage/testsuites/multivolume.go

Lines changed: 66 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -323,7 +323,35 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
323323

324324
// Test access to the volume from pods on different node
325325
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
326-
l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */)
326+
l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */, false /* readOnly */)
327+
})
328+
329+
// This tests below configuration:
330+
// [pod1] [pod2]
331+
// [ node1 ]
332+
// \ / <- same volume mode (read only)
333+
// [volume1]
334+
ginkgo.It("should concurrently access the single read-only volume from pods on the same node", func() {
335+
init()
336+
defer cleanup()
337+
338+
numPods := 2
339+
340+
if !l.driver.GetDriverInfo().Capabilities[CapMultiPODs] {
341+
e2eskipper.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name)
342+
}
343+
344+
// Create volume
345+
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
346+
resource := CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
347+
l.resources = append(l.resources, resource)
348+
349+
// Initialize the volume with a filesystem - it's going to be mounted as read-only below.
350+
initializeVolume(l.cs, l.ns.Name, resource.Pvc, l.config.ClientNodeSelection)
351+
352+
// Test access to the volume from pods on a single node
353+
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
354+
l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */, true /* readOnly */)
327355
})
328356

329357
// This tests below configuration:
@@ -365,7 +393,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
365393

366394
// Test access to the volume from pods on different node
367395
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
368-
l.config.ClientNodeSelection, resource.Pvc, numPods, false /* sameNode */)
396+
l.config.ClientNodeSelection, resource.Pvc, numPods, false /* sameNode */, false /* readOnly */)
369397
})
370398
}
371399

@@ -443,7 +471,8 @@ func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs cli
443471
// pod deletion doesn't affect. Pods are deployed on the same node or different nodes depending on requiresSameNode.
444472
// Read/write check are done across pod, by check reading both what pod{n-1} and pod{n} wrote from pod{n}.
445473
func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Interface, ns string,
446-
node e2epod.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool) {
474+
node e2epod.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool,
475+
readOnly bool) {
447476

448477
var pods []*v1.Pod
449478

@@ -456,6 +485,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
456485
PVCs: []*v1.PersistentVolumeClaim{pvc},
457486
SeLinuxLabel: e2epv.SELinuxLabel,
458487
NodeSelection: node,
488+
PVCsReadOnly: readOnly,
459489
}
460490
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, framework.PodStartTimeout)
461491
defer func() {
@@ -484,6 +514,11 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
484514
ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
485515
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
486516

517+
if readOnly {
518+
ginkgo.By("Skipping volume content checks, volume is read-only")
519+
continue
520+
}
521+
487522
if i != 0 {
488523
ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
489524
// For 1st pod, no one has written data yet, so pass the read check
@@ -515,6 +550,11 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
515550
ginkgo.By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
516551
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
517552

553+
if readOnly {
554+
ginkgo.By("Skipping volume content checks, volume is read-only")
555+
continue
556+
}
557+
518558
if i == 0 {
519559
// This time there should be data that last pod wrote, for 1st pod
520560
ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that last pod write works properly", index))
@@ -586,3 +626,26 @@ func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, nodes *v1.N
586626

587627
return nil
588628
}
629+
630+
// initializeVolume creates a filesystem on given volume, so it can be used as read-only later
631+
func initializeVolume(cs clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
632+
if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock {
633+
// Block volumes do not need to be initialized.
634+
return
635+
}
636+
637+
ginkgo.By(fmt.Sprintf("Initializing a filesystem on PVC %s", pvc.Name))
638+
// Just create a pod with the volume as read-write. Kubernetes will create a filesystem there
639+
// if it does not exist yet.
640+
podConfig := e2epod.Config{
641+
NS: ns,
642+
PVCs: []*v1.PersistentVolumeClaim{pvc},
643+
SeLinuxLabel: e2epv.SELinuxLabel,
644+
NodeSelection: node,
645+
}
646+
pod, err := e2epod.CreateSecPod(cs, &podConfig, framework.PodStartTimeout)
647+
defer func() {
648+
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
649+
}()
650+
framework.ExpectNoError(err)
651+
}

0 commit comments

Comments
 (0)