Skip to content

Commit 9e70d6f

Browse files
authored
Merge pull request kubernetes#91738 from jsafrane/fix-iscsi-metadata
iscsi: don't write json medata file when the volume is already mounted.
2 parents 2da917d + 9a9c216 commit 9e70d6f

File tree

3 files changed

+84
-4
lines changed

3 files changed

+84
-4
lines changed

pkg/volume/iscsi/iscsi_util.go

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -491,6 +491,22 @@ func (util *ISCSIUtil) persistISCSI(b iscsiDiskMounter) error {
491491
klog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath)
492492
return err
493493
}
494+
495+
if b.volumeMode == v1.PersistentVolumeFilesystem {
496+
notMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
497+
if err != nil {
498+
return err
499+
}
500+
if !notMnt {
501+
// The volume is already mounted, therefore the previous WaitForAttach must have
502+
// persisted the volume metadata. In addition, the metadata is actually *inside*
503+
// globalPDPath and we can't write it here, because it was shadowed by the volume
504+
// mount.
505+
klog.V(4).Infof("Skipping persistISCSI, the volume is already mounted at %s", globalPDPath)
506+
return nil
507+
}
508+
}
509+
494510
// Persist iscsi disk config to json file for DetachDisk path
495511
return util.persistISCSIFile(*(b.iscsiDisk), globalPDPath)
496512
}

test/e2e/framework/pod/create.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ var (
3838
type Config struct {
3939
NS string
4040
PVCs []*v1.PersistentVolumeClaim
41+
PVCsReadOnly bool
4142
InlineVolumeSources []*v1.VolumeSource
4243
IsPrivileged bool
4344
Command string
@@ -224,7 +225,7 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
224225
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
225226
}
226227

227-
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
228+
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: podConfig.PVCsReadOnly}}}
228229
volumeIndex++
229230
}
230231
for _, src := range podConfig.InlineVolumeSources {

test/e2e/storage/testsuites/multivolume.go

Lines changed: 66 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -322,7 +322,35 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
322322

323323
// Test access to the volume from pods on different node
324324
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
325-
l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */)
325+
l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */, false /* readOnly */)
326+
})
327+
328+
// This tests below configuration:
329+
// [pod1] [pod2]
330+
// [ node1 ]
331+
// \ / <- same volume mode (read only)
332+
// [volume1]
333+
ginkgo.It("should concurrently access the single read-only volume from pods on the same node", func() {
334+
init()
335+
defer cleanup()
336+
337+
numPods := 2
338+
339+
if !l.driver.GetDriverInfo().Capabilities[CapMultiPODs] {
340+
e2eskipper.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name)
341+
}
342+
343+
// Create volume
344+
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
345+
resource := CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
346+
l.resources = append(l.resources, resource)
347+
348+
// Initialize the volume with a filesystem - it's going to be mounted as read-only below.
349+
initializeVolume(l.cs, l.ns.Name, resource.Pvc, l.config.ClientNodeSelection)
350+
351+
// Test access to the volume from pods on a single node
352+
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
353+
l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */, true /* readOnly */)
326354
})
327355

328356
// This tests below configuration:
@@ -364,7 +392,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
364392

365393
// Test access to the volume from pods on different node
366394
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
367-
l.config.ClientNodeSelection, resource.Pvc, numPods, false /* sameNode */)
395+
l.config.ClientNodeSelection, resource.Pvc, numPods, false /* sameNode */, false /* readOnly */)
368396
})
369397
}
370398

@@ -442,7 +470,8 @@ func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs cli
442470
// pod deletion doesn't affect. Pods are deployed on the same node or different nodes depending on requiresSameNode.
443471
// Read/write check are done across pod, by check reading both what pod{n-1} and pod{n} wrote from pod{n}.
444472
func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Interface, ns string,
445-
node e2epod.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool) {
473+
node e2epod.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool,
474+
readOnly bool) {
446475

447476
var pods []*v1.Pod
448477

@@ -455,6 +484,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
455484
PVCs: []*v1.PersistentVolumeClaim{pvc},
456485
SeLinuxLabel: e2epv.SELinuxLabel,
457486
NodeSelection: node,
487+
PVCsReadOnly: readOnly,
458488
}
459489
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, framework.PodStartTimeout)
460490
defer func() {
@@ -483,6 +513,11 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
483513
ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
484514
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
485515

516+
if readOnly {
517+
ginkgo.By("Skipping volume content checks, volume is read-only")
518+
continue
519+
}
520+
486521
if i != 0 {
487522
ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
488523
// For 1st pod, no one has written data yet, so pass the read check
@@ -514,6 +549,11 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
514549
ginkgo.By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
515550
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
516551

552+
if readOnly {
553+
ginkgo.By("Skipping volume content checks, volume is read-only")
554+
continue
555+
}
556+
517557
if i == 0 {
518558
// This time there should be data that last pod wrote, for 1st pod
519559
ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that last pod write works properly", index))
@@ -585,3 +625,26 @@ func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, nodes *v1.N
585625

586626
return nil
587627
}
628+
629+
// initializeVolume creates a filesystem on given volume, so it can be used as read-only later
630+
func initializeVolume(cs clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
631+
if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock {
632+
// Block volumes do not need to be initialized.
633+
return
634+
}
635+
636+
ginkgo.By(fmt.Sprintf("Initializing a filesystem on PVC %s", pvc.Name))
637+
// Just create a pod with the volume as read-write. Kubernetes will create a filesystem there
638+
// if it does not exist yet.
639+
podConfig := e2epod.Config{
640+
NS: ns,
641+
PVCs: []*v1.PersistentVolumeClaim{pvc},
642+
SeLinuxLabel: e2epv.SELinuxLabel,
643+
NodeSelection: node,
644+
}
645+
pod, err := e2epod.CreateSecPod(cs, &podConfig, framework.PodStartTimeout)
646+
defer func() {
647+
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
648+
}()
649+
framework.ExpectNoError(err)
650+
}

0 commit comments

Comments
 (0)