Skip to content

Commit f2bf2ab

Browse files
committed
e2e/storage: disable caching when writing/reading to block PVCs
By passing "oflag=nocache" and "iflag=direct", caching should be disabled while writing/reading with "dd" to a block device. The TestConcurrentAccessToSingleVolume() test is known to fail with certain storage backends (like Ceph RBD) when caching is enabled. The default BusyBox image used for testing does not support the required options for "dd". So instead of running with BusyBox, run the test with a Debian image.
1 parent 00b6d7c commit f2bf2ab

File tree

2 files changed

+49
-18
lines changed

2 files changed

+49
-18
lines changed

test/e2e/storage/testsuites/multivolume.go

Lines changed: 19 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ import (
3535
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
3636
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
3737
"k8s.io/kubernetes/test/e2e/storage/utils"
38+
imageutils "k8s.io/kubernetes/test/utils/image"
3839
)
3940

4041
type multiVolumeTestSuite struct {
@@ -423,14 +424,14 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
423424

424425
if readSeedBase > 0 {
425426
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
426-
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, readSeedBase+int64(i))
427+
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, readSeedBase+int64(i))
427428
}
428429

429430
ginkgo.By(fmt.Sprintf("Checking if write to the volume%d works properly", index))
430-
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
431+
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, writeSeedBase+int64(i))
431432

432433
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
433-
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
434+
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, writeSeedBase+int64(i))
434435
}
435436

436437
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
@@ -481,6 +482,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
481482
ginkgo.By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node))
482483
podConfig := e2epod.Config{
483484
NS: ns,
485+
ImageID: imageutils.DebianIptables,
484486
PVCs: []*v1.PersistentVolumeClaim{pvc},
485487
SeLinuxLabel: e2epv.SELinuxLabel,
486488
NodeSelection: node,
@@ -506,6 +508,14 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
506508

507509
var seed int64
508510
byteLen := 64
511+
directIO := false
512+
// direct IO is needed for Block-mode PVs
513+
if *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock {
514+
// byteLen should be the size of a sector to enable direct I/O
515+
byteLen = 512
516+
directIO = true
517+
}
518+
509519
path := "/mnt/volume1"
510520
// Check if volume can be accessed from each pod
511521
for i, pod := range pods {
@@ -521,17 +531,17 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
521531
if i != 0 {
522532
ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
523533
// For 1st pod, no one has written data yet, so pass the read check
524-
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
534+
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
525535
}
526536

527537
// Update the seed and check if write/read works properly
528538
seed = time.Now().UTC().UnixNano()
529539

530540
ginkgo.By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index))
531-
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
541+
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
532542

533543
ginkgo.By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index))
534-
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
544+
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
535545
}
536546

537547
// Delete the last pod and remove from slice of pods
@@ -560,16 +570,16 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
560570
} else {
561571
ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1))
562572
}
563-
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
573+
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
564574

565575
// Update the seed and check if write/read works properly
566576
seed = time.Now().UTC().UnixNano()
567577

568578
ginkgo.By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index))
569-
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
579+
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
570580

571581
ginkgo.By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index))
572-
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
582+
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
573583
}
574584
}
575585

test/e2e/storage/utils/utils.go

Lines changed: 30 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -241,13 +241,13 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
241241
seed := time.Now().UTC().UnixNano()
242242

243243
ginkgo.By("Writing to the volume.")
244-
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
244+
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
245245

246246
ginkgo.By("Restarting kubelet")
247247
KubeletCommand(KRestart, c, clientPod)
248248

249249
ginkgo.By("Testing that written file is accessible.")
250-
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
250+
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
251251

252252
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
253253
}
@@ -259,13 +259,13 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame
259259
seed := time.Now().UTC().UnixNano()
260260

261261
ginkgo.By("Writing to the volume.")
262-
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
262+
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
263263

264264
ginkgo.By("Restarting kubelet")
265265
KubeletCommand(KRestart, c, clientPod)
266266

267267
ginkgo.By("Testing that written pv is accessible.")
268-
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
268+
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
269269

270270
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
271271
}
@@ -656,33 +656,54 @@ func genBinDataFromSeed(len int, seed int64) []byte {
656656
}
657657

658658
// CheckReadFromPath validate that file can be properly read.
659-
func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
659+
//
660+
// Note: directIO does not work with (default) BusyBox Pods. A requirement for
661+
// directIO to function correctly, is to read whole sector(s) for Block-mode
662+
// PVCs (normally a sector is 512 bytes), or memory pages for files (commonly
663+
// 4096 bytes).
664+
func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, directIO bool, path string, len int, seed int64) {
660665
var pathForVolMode string
666+
var iflag string
667+
661668
if volMode == v1.PersistentVolumeBlock {
662669
pathForVolMode = path
663670
} else {
664671
pathForVolMode = filepath.Join(path, "file1.txt")
665672
}
666673

674+
if directIO {
675+
iflag = "iflag=direct"
676+
}
677+
667678
sum := sha256.Sum256(genBinDataFromSeed(len, seed))
668679

669-
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum", pathForVolMode, len))
670-
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, len, sum))
680+
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
681+
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
671682
}
672683

673684
// CheckWriteToPath that file can be properly written.
674-
func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
685+
//
686+
// Note: nocache does not work with (default) BusyBox Pods. To read without
687+
// caching, enable directIO with CheckReadFromPath and check the hints about
688+
// the len requirements.
689+
func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, nocache bool, path string, len int, seed int64) {
675690
var pathForVolMode string
691+
var oflag string
692+
676693
if volMode == v1.PersistentVolumeBlock {
677694
pathForVolMode = path
678695
} else {
679696
pathForVolMode = filepath.Join(path, "file1.txt")
680697
}
681698

699+
if nocache {
700+
oflag = "oflag=nocache"
701+
}
702+
682703
encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
683704

684705
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
685-
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len))
706+
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
686707
}
687708

688709
// findMountPoints returns all mount points on given node under specified directory.

0 commit comments

Comments
 (0)