From 4a7320f2641671f034b7dffcb9868e57d144cd96 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Sat, 3 Aug 2024 06:44:05 +0000 Subject: [PATCH] fix: respect ReadOnly AccessMode in volume mount fix test: add e2e test fix fix fix append fix logging fix --- pkg/blob/blob.go | 9 ++++ pkg/blob/blob_test.go | 57 +++++++++++++++++++++++++ pkg/blob/nodeserver.go | 11 ++++- test/e2e/dynamic_provisioning_test.go | 61 ++++++++++++++++++++++++++- test/e2e/testsuites/specs.go | 6 +++ test/e2e/testsuites/testsuites.go | 5 +++ 6 files changed, 147 insertions(+), 2 deletions(-) diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index 165ffc12c..bf60fed70 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -1099,3 +1099,12 @@ func isSupportedFSGroupChangePolicy(policy string) bool { } return false } + +func isReadOnlyFromCapability(vc *csi.VolumeCapability) bool { + if vc.GetAccessMode() == nil { + return false + } + mode := vc.GetAccessMode().GetMode() + return (mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY || + mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY) +} diff --git a/pkg/blob/blob_test.go b/pkg/blob/blob_test.go index b78727760..95d797de5 100644 --- a/pkg/blob/blob_test.go +++ b/pkg/blob/blob_test.go @@ -29,6 +29,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/stretchr/testify/assert" "go.uber.org/mock/gomock" "golang.org/x/sync/errgroup" @@ -1822,3 +1823,59 @@ func TestIsSupportedFSGroupChangePolicy(t *testing.T) { } } } + +func TestIsReadOnlyFromCapability(t *testing.T) { + testCases := []struct { + name string + vc *csi.VolumeCapability + expectedResult bool + }{ + { + name: "false with empty capabilities", + vc: &csi.VolumeCapability{}, + expectedResult: false, + }, + { + name: "fail with capabilities no access mode", + vc: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + }, + }, + { + name: "false with SINGLE_NODE_WRITER capabilities", + vc: &csi.VolumeCapability{ + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + expectedResult: false, + }, + { + name: "true with MULTI_NODE_READER_ONLY capabilities", + vc: &csi.VolumeCapability{ + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY, + }, + }, + expectedResult: true, + }, + { + name: "true with SINGLE_NODE_READER_ONLY capabilities", + vc: &csi.VolumeCapability{ + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY, + }, + }, + expectedResult: true, + }, + } + + for _, test := range testCases { + result := isReadOnlyFromCapability(test.vc) + if result != test.expectedResult { + t.Errorf("case(%s): isReadOnlyFromCapability returned with %v, not equal to %v", test.name, result, test.expectedResult) + } + } +} diff --git a/pkg/blob/nodeserver.go b/pkg/blob/nodeserver.go index 38078fd40..80706649a 100644 --- a/pkg/blob/nodeserver.go +++ b/pkg/blob/nodeserver.go @@ -340,6 +340,15 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe serverAddress = fmt.Sprintf("%s.blob.%s", accountName, storageEndpointSuffix) } + if isReadOnlyFromCapability(volumeCapability) { + if isNFSProtocol(protocol) { + mountFlags = util.JoinMountOptions(mountFlags, []string{"ro"}) + } else { + mountFlags = util.JoinMountOptions(mountFlags, []string{"-o ro"}) + } + klog.V(2).Infof("CSI volume is read-only, mounting with extra option ro") + } + if isNFSProtocol(protocol) { klog.V(2).Infof("target %v\nprotocol %v\n\nvolumeId %v\nmountflags %v\nserverAddress %v", targetPath, protocol, volumeID, mountFlags, serverAddress) @@ -412,7 +421,7 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe args = args + " " + opt } - klog.V(2).Infof("target %v\nprotocol %v\n\nvolumeId %v\nountflags %v mountOptions %v volumeMountGroup %s\nargs %v\nserverAddress %v", + klog.V(2).Infof("target %v protocol %v volumeId %v\nmountflags %v\nmountOptions %v volumeMountGroup %s\nargs %v\nserverAddress %v", targetPath, protocol, volumeID, mountFlags, mountOptions, volumeMountGroup, args, serverAddress) authEnv = append(authEnv, "AZURE_STORAGE_ACCOUNT="+accountName, "AZURE_STORAGE_BLOB_ENDPOINT="+serverAddress) diff --git a/test/e2e/dynamic_provisioning_test.go b/test/e2e/dynamic_provisioning_test.go index e7cc768ed..3544368cb 100644 --- a/test/e2e/dynamic_provisioning_test.go +++ b/test/e2e/dynamic_provisioning_test.go @@ -205,7 +205,6 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Dynamic Provisioning", func() { Cmd: "touch /mnt/test-1/data", Volumes: []testsuites.VolumeDetails{ { - FSType: "ext4", ClaimSize: "10Gi", VolumeMount: testsuites.VolumeMountDetails{ NameGenerate: "test-volume-", @@ -227,6 +226,66 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Dynamic Provisioning", func() { test.Run(ctx, cs, ns) }) + ginkgo.It("should create a blobfuse volume on demand and mount it as readOnly when volume access mode is readonly", func(ctx ginkgo.SpecContext) { + pods := []testsuites.PodDetails{ + { + Cmd: "touch /mnt/test-1/data", + Volumes: []testsuites.VolumeDetails{ + { + ClaimSize: "10Gi", + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}, + }, + }, + }, + } + test := testsuites.DynamicallyProvisionedReadOnlyVolumeTest{ + CSIDriver: testDriver, + Pods: pods, + StorageClassParameters: map[string]string{"skuName": "Standard_GRS"}, + } + if isAzureStackCloud { + test.StorageClassParameters = map[string]string{"skuName": "Standard_LRS"} + } + test.Run(ctx, cs, ns) + }) + + ginkgo.It("should create a nfs volume on demand and mount it as readOnly when volume access mode is readonly", func(ctx ginkgo.SpecContext) { + if isAzureStackCloud { + ginkgo.Skip("test case is not available for Azure Stack") + } + pods := []testsuites.PodDetails{ + { + Cmd: "touch /mnt/test-1/data", + Volumes: []testsuites.VolumeDetails{ + { + ClaimSize: "10Gi", + MountOptions: []string{ + "nconnect=8", + }, + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}, + }, + }, + }, + } + test := testsuites.DynamicallyProvisionedReadOnlyVolumeTest{ + CSIDriver: testDriver, + Pods: pods, + StorageClassParameters: map[string]string{ + "skuName": "Premium_LRS", + "protocol": "nfs", + }, + } + test.Run(ctx, cs, ns) + }) + ginkgo.It("should create multiple PV objects, bind to PVCs and attach all to different pods on the same node", func(ctx ginkgo.SpecContext) { pods := []testsuites.PodDetails{ { diff --git a/test/e2e/testsuites/specs.go b/test/e2e/testsuites/specs.go index fe997b802..af0f496ab 100644 --- a/test/e2e/testsuites/specs.go +++ b/test/e2e/testsuites/specs.go @@ -55,6 +55,7 @@ type VolumeDetails struct { StorageClass *storagev1.StorageClass NodeStageSecretRef string Attrib map[string]string + AccessModes []v1.PersistentVolumeAccessMode } type VolumeMode int @@ -196,6 +197,11 @@ func (volume *VolumeDetails) SetupDynamicPersistentVolumeClaim(ctx context.Conte } else { tpvc = NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass) } + + if len(volume.AccessModes) > 0 { + ginkgo.By("setting up the PVC with access modes") + tpvc.AccessModes = volume.AccessModes + } tpvc.Create(ctx) cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup) // PV will not be ready until PVC is used in a pod when volumeBindingMode: WaitForFirstConsumer diff --git a/test/e2e/testsuites/testsuites.go b/test/e2e/testsuites/testsuites.go index ed94145df..70b649f86 100644 --- a/test/e2e/testsuites/testsuites.go +++ b/test/e2e/testsuites/testsuites.go @@ -123,6 +123,7 @@ type TestPersistentVolumeClaim struct { persistentVolumeClaim *v1.PersistentVolumeClaim requestedPersistentVolumeClaim *v1.PersistentVolumeClaim dataSource *v1.TypedLocalObjectReference + AccessModes []v1.PersistentVolumeAccessMode } func NewTestPersistentVolumeClaim(c clientset.Interface, ns *v1.Namespace, claimSize string, volumeMode VolumeMode, sc *storagev1.StorageClass) *TestPersistentVolumeClaim { @@ -163,6 +164,10 @@ func (t *TestPersistentVolumeClaim) Create(ctx context.Context) { storageClassName = t.storageClass.Name } t.requestedPersistentVolumeClaim = generatePVC(t.namespace.Name, storageClassName, t.claimSize, t.volumeMode, t.dataSource) + if len(t.AccessModes) > 0 { + ginkgo.By("setting access modes") + t.requestedPersistentVolumeClaim.Spec.AccessModes = t.AccessModes + } t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Create(ctx, t.requestedPersistentVolumeClaim, metav1.CreateOptions{}) framework.ExpectNoError(err) }