From b6296ee31fddb0fd4c38a21c4ab66d0a1a1129e1 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 21 Mar 2025 16:43:18 +0000 Subject: [PATCH 1/5] cinder-csi-plugin: Don't set segment with empty topology value This is actually handled by csi-provisioner, but it's arguably incorrect. Signed-off-by: Stephen Finucane --- pkg/csi/cinder/controllerserver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go index 5ba13bb0b7..4cc3576aa6 100644 --- a/pkg/csi/cinder/controllerserver.go +++ b/pkg/csi/cinder/controllerserver.go @@ -1082,7 +1082,7 @@ func getCreateVolumeResponse(vol *volumes.Volume, volCtx map[string]string, igno if accessibleTopologyReq != nil { accessibleTopology = accessibleTopologyReq.GetPreferred() } - } else { + } else if vol.AvailabilityZone != "" { accessibleTopology = []*csi.Topology{ { Segments: map[string]string{topologyKey: vol.AvailabilityZone}, From 1bc40b52522030cb9157d6970cb44eac29d85921 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 25 Mar 2025 13:14:06 +0000 Subject: [PATCH 2/5] tests: Don't use global mocks Calling '.On' with the same expected arguments multiple times will only result in a single mock: the first one. We should avoid doing this. We also remove some useless comments that simply duplicate what the calls already say. Signed-off-by: Stephen Finucane --- pkg/csi/cinder/controllerserver_test.go | 161 ++++++++++++------------ pkg/csi/cinder/nodeserver_test.go | 75 ++++++----- 2 files changed, 118 insertions(+), 118 deletions(-) diff --git a/pkg/csi/cinder/controllerserver_test.go b/pkg/csi/cinder/controllerserver_test.go index fc5adb227b..dd1647e1c5 100644 --- a/pkg/csi/cinder/controllerserver_test.go +++ b/pkg/csi/cinder/controllerserver_test.go @@ -31,40 +31,39 @@ import ( cpoerrors "k8s.io/cloud-provider-openstack/pkg/util/errors" ) -var ( - fakeCs *controllerServer - fakeCsMultipleClouds *controllerServer - osmock *openstack.OpenStackMock - osmockRegionX *openstack.OpenStackMock -) +func fakeControllerServer() (*controllerServer, *openstack.OpenStackMock) { + osmock := new(openstack.OpenStackMock) -// Init Controller Server -func init() { - if fakeCs == nil { - osmock = new(openstack.OpenStackMock) - osmockRegionX = new(openstack.OpenStackMock) + d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster, WithTopology: true}) - d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster, WithTopology: true}) + cs := NewControllerServer(d, map[string]openstack.IOpenStack{ + "": osmock, + }) + return cs, osmock +} - fakeCs = NewControllerServer(d, map[string]openstack.IOpenStack{ - "": osmock, - }) - fakeCsMultipleClouds = NewControllerServer(d, map[string]openstack.IOpenStack{ - "": osmock, - "region-x": osmockRegionX, - }) - } +func fakeControllerServerWithMultipleRegions() (*controllerServer, *openstack.OpenStackMock, *openstack.OpenStackMock) { + osmock := new(openstack.OpenStackMock) + osmockAlt := new(openstack.OpenStackMock) + + d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster, WithTopology: true}) + + cs := NewControllerServer(d, map[string]openstack.IOpenStack{ + "": osmock, + "region-x": osmockAlt, + }) + return cs, osmock, osmockAlt } // Test CreateVolume func TestCreateVolume(t *testing.T) { + fakeCs, osmock := fakeControllerServer() + // mock OpenStack properties := map[string]string{cinderCSIClusterIDKey: FakeCluster} - // CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (string, string, int, error) osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, FakeAvailability, "", "", "", properties).Return(&FakeVol, nil) - osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) - // Init assert + assert := assert.New(t) // Fake request @@ -103,15 +102,15 @@ func TestCreateVolume(t *testing.T) { // Test CreateVolume fails with quota exceeded error func TestCreateVolumeQuotaError(t *testing.T) { + fakeCs, osmock := fakeControllerServer() + errorVolume := "errorVolume" // mock OpenStack properties := map[string]string{cinderCSIClusterIDKey: FakeCluster} - // CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (string, string, int, error) osmock.On("CreateVolume", errorVolume, mock.AnythingOfType("int"), FakeVolType, FakeAvailability, "", "", "", properties).Return(&volumes.Volume{}, cpoerrors.ErrQuotaExceeded) - osmock.On("GetVolumesByName", errorVolume).Return(FakeVolListEmpty, nil) - // Init assert + assert := assert.New(t) // Fake request @@ -150,14 +149,13 @@ func TestCreateVolumeQuotaError(t *testing.T) { // Test CreateVolume with additional param func TestCreateVolumeWithParam(t *testing.T) { + fakeCs, osmock := fakeControllerServer() + // mock OpenStack properties := map[string]string{cinderCSIClusterIDKey: FakeCluster} - // CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (string, string, int, error) - // Vol type and availability comes from CreateVolumeRequest.Parameters osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), "dummyVolType", "cinder", "", "", "", properties).Return(&FakeVol, nil) - osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) - // Init assert + assert := assert.New(t) // Fake request @@ -200,6 +198,8 @@ func TestCreateVolumeWithParam(t *testing.T) { } func TestCreateVolumeWithExtraMetadata(t *testing.T) { + fakeCs, osmock := fakeControllerServer() + // mock OpenStack properties := map[string]string{ cinderCSIClusterIDKey: FakeCluster, @@ -207,9 +207,7 @@ func TestCreateVolumeWithExtraMetadata(t *testing.T) { sharedcsi.PvcNameKey: FakePVCName, sharedcsi.PvcNamespaceKey: FakePVCNamespace, } - // CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (string, string, int, error) osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, FakeAvailability, "", "", "", properties).Return(&FakeVol, nil) - osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) // Fake request @@ -245,12 +243,12 @@ func TestCreateVolumeWithExtraMetadata(t *testing.T) { } func TestCreateVolumeFromSnapshot(t *testing.T) { + fakeCs, osmock := fakeControllerServer() + properties := map[string]string{cinderCSIClusterIDKey: FakeCluster} - // CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (string, string, int, error) osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, "", FakeSnapshotID, "", "", properties).Return(&FakeVolFromSnapshot, nil) osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) - // Init assert assert := assert.New(t) src := &csi.VolumeContentSource{ @@ -291,12 +289,12 @@ func TestCreateVolumeFromSnapshot(t *testing.T) { } func TestCreateVolumeFromSourceVolume(t *testing.T) { + fakeCs, osmock := fakeControllerServer() + properties := map[string]string{cinderCSIClusterIDKey: FakeCluster} - // CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (string, string, int, error) osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, "", "", FakeVolID, "", properties).Return(&FakeVolFromSourceVolume, nil) osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) - // Init assert assert := assert.New(t) volsrc := &csi.VolumeContentSource{ @@ -336,12 +334,15 @@ func TestCreateVolumeFromSourceVolume(t *testing.T) { assert.Equal(FakeVolID, actualRes.Volume.ContentSource.GetVolume().VolumeId) } -// Test CreateVolumeDuplicate +// Test CreateVolume when a volume with the given name already exists func TestCreateVolumeDuplicate(t *testing.T) { - // Init assert - assert := assert.New(t) + fakeCs, osmock := fakeControllerServer() osmock.On("GetVolumesByName", "fake-duplicate").Return(FakeVolList, nil) + osmock.On("GetBlockStorageOpts").Return(openstack.BlockStorageOpts{}) + osmock.On("GetVolumesByName", "fake-duplicate").Return(FakeVolList, nil) + + assert := assert.New(t) // Fake request fakeReq := &csi.CreateVolumeRequest{ @@ -370,10 +371,10 @@ func TestCreateVolumeDuplicate(t *testing.T) { // Test DeleteVolume func TestDeleteVolume(t *testing.T) { - // DeleteVolume(volumeID string) error + fakeCs, osmock := fakeControllerServer() + osmock.On("DeleteVolume", FakeVolID).Return(nil) - // Init assert assert := assert.New(t) // Fake request @@ -396,14 +397,12 @@ func TestDeleteVolume(t *testing.T) { // Test ControllerPublishVolume func TestControllerPublishVolume(t *testing.T) { - // AttachVolume(instanceID, volumeID string) (string, error) + fakeCs, osmock := fakeControllerServer() + osmock.On("AttachVolume", FakeNodeID, FakeVolID).Return(FakeVolID, nil) - // WaitDiskAttached(instanceID string, volumeID string) error osmock.On("WaitDiskAttached", FakeNodeID, FakeVolID).Return(nil) - // GetAttachmentDiskPath(instanceID, volumeID string) (string, error) osmock.On("GetAttachmentDiskPath", FakeNodeID, FakeVolID).Return(FakeDevicePath, nil) - // Init assert assert := assert.New(t) // Fake request @@ -437,12 +436,11 @@ func TestControllerPublishVolume(t *testing.T) { // Test ControllerUnpublishVolume func TestControllerUnpublishVolume(t *testing.T) { - // DetachVolume(instanceID, volumeID string) error + fakeCs, osmock := fakeControllerServer() + osmock.On("DetachVolume", FakeNodeID, FakeVolID).Return(nil) - // WaitDiskDetached(instanceID string, volumeID string) error osmock.On("WaitDiskDetached", FakeNodeID, FakeVolID).Return(nil) - // Init assert assert := assert.New(t) // Fake request @@ -493,9 +491,10 @@ func genFakeVolumeEntries(fakeVolumes []volumes.Volume) []*csi.ListVolumesRespon } func TestListVolumes(t *testing.T) { + fakeCs, osmock := fakeControllerServer() + osmock.On("ListVolumes", 2, FakeVolID).Return(FakeVolListMultiple, "", nil) - // Init assert assert := assert.New(t) fakeReq := &csi.ListVolumesRequest{MaxEntries: 2, StartingToken: FakeVolID} @@ -536,6 +535,9 @@ type ListVolumesTestResult struct { } func TestGlobalListVolumesMultipleClouds(t *testing.T) { + fakeCs, osmock := fakeControllerServer() + fakeCsMulti, osmockMulti, osmockMultiAlt := fakeControllerServerWithMultipleRegions() + tests := []*ListVolumesTest{ { name: "Single cloud, no volume", @@ -601,11 +603,11 @@ func TestGlobalListVolumesMultipleClouds(t *testing.T) { maxEntries: 0, volumeSet: map[string]ListVolumeTestOSMock{ "": { - mockCloud: osmock, + mockCloud: osmockMulti, mockVolumesRes: []volumes.Volume{}, }, "region-x": { - mockCloud: osmockRegionX, + mockCloud: osmockMultiAlt, mockVolumesRes: []volumes.Volume{}, }, }, @@ -620,11 +622,11 @@ func TestGlobalListVolumesMultipleClouds(t *testing.T) { maxEntries: 0, volumeSet: map[string]ListVolumeTestOSMock{ "": { - mockCloud: osmock, + mockCloud: osmockMulti, mockVolumesRes: []volumes.Volume{}, }, "region-x": { - mockCloud: osmockRegionX, + mockCloud: osmockMultiAlt, mockVolumesRes: []volumes.Volume{}, }, }, @@ -637,7 +639,7 @@ func TestGlobalListVolumesMultipleClouds(t *testing.T) { maxEntries: 0, volumeSet: map[string]ListVolumeTestOSMock{ "": { - mockCloud: osmock, + mockCloud: osmockMulti, mockVolumesRes: []volumes.Volume{ {ID: "vol1"}, {ID: "vol2"}, @@ -646,7 +648,7 @@ func TestGlobalListVolumesMultipleClouds(t *testing.T) { }, }, "region-x": { - mockCloud: osmockRegionX, + mockCloud: osmockMultiAlt, mockVolumesRes: []volumes.Volume{ {ID: "vol5"}, {ID: "vol6"}, @@ -670,7 +672,7 @@ func TestGlobalListVolumesMultipleClouds(t *testing.T) { startingToken: ":region-x", volumeSet: map[string]ListVolumeTestOSMock{ "": { - mockCloud: osmock, + mockCloud: osmockMulti, mockVolumesRes: []volumes.Volume{ {ID: "vol1"}, {ID: "vol2"}, @@ -679,7 +681,7 @@ func TestGlobalListVolumesMultipleClouds(t *testing.T) { }, }, "region-x": { - mockCloud: osmockRegionX, + mockCloud: osmockMultiAlt, mockVolumesRes: []volumes.Volume{ {ID: "vol5"}, {ID: "vol6"}, @@ -701,11 +703,11 @@ func TestGlobalListVolumesMultipleClouds(t *testing.T) { maxEntries: 2, volumeSet: map[string]ListVolumeTestOSMock{ "": { - mockCloud: osmock, + mockCloud: osmockMulti, mockVolumesRes: []volumes.Volume{}, }, "region-x": { - mockCloud: osmockRegionX, + mockCloud: osmockMultiAlt, mockVolumesRes: []volumes.Volume{}, }, }, @@ -719,14 +721,14 @@ func TestGlobalListVolumesMultipleClouds(t *testing.T) { maxEntries: 2, volumeSet: map[string]ListVolumeTestOSMock{ "": { - mockCloud: osmock, + mockCloud: osmockMulti, mockVolumesRes: []volumes.Volume{ {ID: "vol1"}, {ID: "vol2"}, }, }, "region-x": { - mockCloud: osmockRegionX, + mockCloud: osmockMultiAlt, mockVolumesRes: []volumes.Volume{ {ID: "vol3"}, {ID: "vol4"}, @@ -748,14 +750,14 @@ func TestGlobalListVolumesMultipleClouds(t *testing.T) { startingToken: ":region-x", volumeSet: map[string]ListVolumeTestOSMock{ "": { - mockCloud: osmock, + mockCloud: osmockMulti, mockVolumesRes: []volumes.Volume{ {ID: "vol1"}, {ID: "vol2"}, }, }, "region-x": { - mockCloud: osmockRegionX, + mockCloud: osmockMultiAlt, mockVolumesRes: []volumes.Volume{ {ID: "vol3"}, {ID: "vol4"}, @@ -777,14 +779,14 @@ func TestGlobalListVolumesMultipleClouds(t *testing.T) { startingToken: "vol4:region-x", volumeSet: map[string]ListVolumeTestOSMock{ "": { - mockCloud: osmock, + mockCloud: osmockMulti, mockVolumesRes: []volumes.Volume{ {ID: "vol1"}, {ID: "vol2"}, }, }, "region-x": { - mockCloud: osmockRegionX, + mockCloud: osmockMultiAlt, mockTokenReq: "vol4", mockVolumesRes: []volumes.Volume{ {ID: "vol5"}, @@ -801,7 +803,6 @@ func TestGlobalListVolumesMultipleClouds(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - // Init assert assert := assert.New(t) // Setup Mock for _, volumeSet := range test.volumeSet { @@ -834,7 +835,7 @@ func TestGlobalListVolumesMultipleClouds(t *testing.T) { // Invoke ListVolumes cs := fakeCs if len(test.volumeSet) > 1 { - cs = fakeCsMultipleClouds + cs = fakeCsMulti } actualRes, err := cs.ListVolumes(FakeCtx, fakeReq) if err != nil { @@ -848,12 +849,14 @@ func TestGlobalListVolumesMultipleClouds(t *testing.T) { // Test CreateSnapshot func TestCreateSnapshot(t *testing.T) { + fakeCs, osmock := fakeControllerServer() + osmock.On("CreateSnapshot", FakeSnapshotName, FakeVolID, map[string]string{cinderCSIClusterIDKey: "cluster"}).Return(&FakeSnapshotRes, nil) osmock.On("ListSnapshots", map[string]string{"Name": FakeSnapshotName}).Return(FakeSnapshotListEmpty, "", nil) osmock.On("WaitSnapshotReady", FakeSnapshotID).Return(FakeSnapshotRes.Status, nil) osmock.On("ListBackups", map[string]string{"Name": FakeSnapshotName}).Return(FakeBackupListEmpty, nil) osmock.On("GetSnapshotByID", FakeVolID).Return(&FakeSnapshotRes, nil) - // Init assert + assert := assert.New(t) // Fake request @@ -877,6 +880,8 @@ func TestCreateSnapshot(t *testing.T) { // Test CreateSnapshot with extra metadata func TestCreateSnapshotWithExtraMetadata(t *testing.T) { + fakeCs, osmock := fakeControllerServer() + properties := map[string]string{ cinderCSIClusterIDKey: FakeCluster, sharedcsi.VolSnapshotNameKey: FakeSnapshotName, @@ -887,9 +892,8 @@ func TestCreateSnapshotWithExtraMetadata(t *testing.T) { osmock.On("CreateSnapshot", FakeSnapshotName, FakeVolID, properties).Return(&FakeSnapshotRes, nil) osmock.On("ListSnapshots", map[string]string{"Name": FakeSnapshotName}).Return(FakeSnapshotListEmpty, "", nil) - osmock.On("WaitSnapshotReady", FakeSnapshotID).Return(nil) + osmock.On("WaitSnapshotReady", FakeSnapshotID).Return(FakeSnapshotRes.Status, nil) - // Init assert assert := assert.New(t) // Fake request @@ -918,11 +922,11 @@ func TestCreateSnapshotWithExtraMetadata(t *testing.T) { // Test DeleteSnapshot func TestDeleteSnapshot(t *testing.T) { - // DeleteSnapshot(volumeID string) error + fakeCs, osmock := fakeControllerServer() + osmock.On("DeleteSnapshot", FakeSnapshotID).Return(nil) osmock.On("DeleteBackup", FakeSnapshotID).Return(nil) - // Init assert assert := assert.New(t) // Fake request @@ -944,7 +948,10 @@ func TestDeleteSnapshot(t *testing.T) { } func TestListSnapshots(t *testing.T) { + fakeCs, osmock := fakeControllerServer() + osmock.On("ListSnapshots", map[string]string{"Limit": "1", "Marker": FakeVolID, "Status": "available"}).Return(FakeSnapshotsRes, "", nil) + assert := assert.New(t) fakeReq := &csi.ListSnapshotsRequest{MaxEntries: 1, StartingToken: FakeVolID} @@ -959,14 +966,12 @@ func TestListSnapshots(t *testing.T) { } func TestControllerExpandVolume(t *testing.T) { + fakeCs, osmock := fakeControllerServer() + tState := []string{"available", "in-use"} - // ExpandVolume(volumeID string, status string, size int) osmock.On("ExpandVolume", FakeVolID, openstack.VolumeAvailableStatus, 5).Return(nil) - - // WaitVolumeTargetStatus(volumeID string, tState []string) error osmock.On("WaitVolumeTargetStatus", FakeVolID, tState).Return(nil) - // Init assert assert := assert.New(t) // Fake request @@ -994,10 +999,10 @@ func TestControllerExpandVolume(t *testing.T) { } func TestValidateVolumeCapabilities(t *testing.T) { - // GetVolume(volumeID string) + fakeCs, osmock := fakeControllerServer() + osmock.On("GetVolume", FakeVolID).Return(FakeVol1) - // Init assert assert := assert.New(t) // fake req diff --git a/pkg/csi/cinder/nodeserver_test.go b/pkg/csi/cinder/nodeserver_test.go index 4a461817c2..8e8713e05d 100644 --- a/pkg/csi/cinder/nodeserver_test.go +++ b/pkg/csi/cinder/nodeserver_test.go @@ -32,46 +32,38 @@ import ( "k8s.io/cloud-provider-openstack/pkg/util/mount" ) -var fakeNs *nodeServer -var mmock *mount.MountMock -var metamock *metadata.MetadataMock -var omock *openstack.OpenStackMock +func fakeNodeServer() (*nodeServer, *openstack.OpenStackMock, *mount.MountMock, *metadata.MetadataMock) { + d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster, WithTopology: true}) -// Init Node Server -func init() { - if fakeNs == nil { - - d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster, WithTopology: true}) + osmock := new(openstack.OpenStackMock) + openstack.OsInstances = map[string]openstack.IOpenStack{ + "": osmock, + } - // mock MountMock - mmock = new(mount.MountMock) - mount.MInstance = mmock + mmock := new(mount.MountMock) + mount.MInstance = mmock - metamock = new(metadata.MetadataMock) - metadata.MetadataService = metamock + metamock := new(metadata.MetadataMock) + metadata.MetadataService = metamock - omock = new(openstack.OpenStackMock) - openstack.OsInstances = map[string]openstack.IOpenStack{ - "": omock, - } + opts := openstack.BlockStorageOpts{ + RescanOnResize: false, + NodeVolumeAttachLimit: maxVolumesPerNode, + } - opts := openstack.BlockStorageOpts{ - RescanOnResize: false, - NodeVolumeAttachLimit: maxVolumesPerNode, - } + fakeNs := NewNodeServer(d, mount.MInstance, metadata.MetadataService, opts, map[string]string{}) - fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, opts, map[string]string{}) - } + return fakeNs, osmock, mmock, metamock } // Test NodeGetInfo func TestNodeGetInfo(t *testing.T) { + fakeNs, omock, _, metamock := fakeNodeServer() metamock.On("GetInstanceID").Return(FakeNodeID, nil) metamock.On("GetAvailabilityZone").Return(FakeAvailability, nil) omock.On("GetMaxVolumeLimit").Return(FakeMaxVolume) - // Init assert assert := assert.New(t) // Expected Result @@ -84,7 +76,7 @@ func TestNodeGetInfo(t *testing.T) { // Fake request fakeReq := &csi.NodeGetInfoRequest{} - // Invoke NodeGetId + // Invoke NodeGetInfo actualRes, err := fakeNs.NodeGetInfo(FakeCtx, fakeReq) if err != nil { t.Errorf("failed to NodeGetInfo: %v", err) @@ -96,11 +88,12 @@ func TestNodeGetInfo(t *testing.T) { // Test NodePublishVolume func TestNodePublishVolume(t *testing.T) { + fakeNs, omock, mmock, _ := fakeNodeServer() mmock.On("ScanForAttach", FakeDevicePath).Return(nil) mmock.On("IsLikelyNotMountPointAttach", FakeTargetPath).Return(true, nil) omock.On("GetVolume", FakeVolID).Return(FakeVol, nil) - // Init assert + assert := assert.New(t) // Expected Result @@ -134,13 +127,13 @@ func TestNodePublishVolume(t *testing.T) { } func TestNodePublishVolumeEphemeral(t *testing.T) { + fakeNs, omock, _, _ := fakeNodeServer() properties := map[string]string{cinderCSIClusterIDKey: FakeCluster} fvolName := fmt.Sprintf("ephemeral-%s", FakeVolID) omock.On("CreateVolume", fvolName, 2, "test", "nova", "", "", "", properties).Return(&FakeVol, nil) - // Init assert assert := assert.New(t) // Expected Result @@ -170,12 +163,12 @@ func TestNodePublishVolumeEphemeral(t *testing.T) { // Test NodeStageVolume func TestNodeStageVolume(t *testing.T) { + fakeNs, omock, mmock, _ := fakeNodeServer() mmock.On("GetDevicePath", FakeVolID).Return(FakeDevicePath, nil) mmock.On("IsLikelyNotMountPointAttach", FakeStagingTargetPath).Return(true, nil) omock.On("GetVolume", FakeVolID).Return(FakeVol, nil) - // Init assert assert := assert.New(t) // Expected Result @@ -208,8 +201,10 @@ func TestNodeStageVolume(t *testing.T) { } func TestNodeStageVolumeBlock(t *testing.T) { + fakeNs, _, mmock, _ := fakeNodeServer() + + mmock.On("GetDevicePath", FakeVolID).Return(FakeDevicePath, nil) - // Init assert assert := assert.New(t) // Expected Result @@ -243,11 +238,11 @@ func TestNodeStageVolumeBlock(t *testing.T) { // Test NodeUnpublishVolume func TestNodeUnpublishVolume(t *testing.T) { + fakeNs, omock, mmock, _ := fakeNodeServer() mmock.On("UnmountPath", FakeTargetPath).Return(nil) omock.On("GetVolume", FakeVolID).Return(FakeVol, nil) - // Init assert assert := assert.New(t) // Expected Result @@ -271,11 +266,11 @@ func TestNodeUnpublishVolume(t *testing.T) { // Test NodeUnstageVolume func TestNodeUnstageVolume(t *testing.T) { + fakeNs, omock, mmock, _ := fakeNodeServer() mmock.On("UnmountPath", FakeStagingTargetPath).Return(nil) omock.On("GetVolume", FakeVolID).Return(FakeVol, nil) - // Init assert assert := assert.New(t) // Expected Result @@ -298,8 +293,8 @@ func TestNodeUnstageVolume(t *testing.T) { } func TestNodeExpandVolume(t *testing.T) { + fakeNs, _, _, _ := fakeNodeServer() - // Init assert assert := assert.New(t) // setup for test @@ -332,14 +327,16 @@ func TestNodeExpandVolume(t *testing.T) { } func TestNodeGetVolumeStatsBlock(t *testing.T) { + fakeNs, _, mmock, _ := fakeNodeServer() + + tempDir := os.TempDir() + volumePath := filepath.Join(tempDir, FakeTargetPath) + + mmock.On("GetDeviceStats", volumePath).Return(FakeBlockDeviceStats, nil) - // Init assert assert := assert.New(t) - mmock.ExpectedCalls = nil // setup for test - tempDir := os.TempDir() - volumePath := filepath.Join(tempDir, FakeTargetPath) err := os.MkdirAll(volumePath, 0750) if err != nil { t.Fatalf("Failed to set up volumepath: %v", err) @@ -352,7 +349,6 @@ func TestNodeGetVolumeStatsBlock(t *testing.T) { VolumePath: volumePath, } - mmock.On("GetDeviceStats", volumePath).Return(FakeBlockDeviceStats, nil) expectedBlockRes := &csi.NodeGetVolumeStatsResponse{ Usage: []*csi.VolumeUsage{ {Total: FakeBlockDeviceStats.TotalBytes, Unit: csi.VolumeUsage_BYTES}, @@ -367,10 +363,9 @@ func TestNodeGetVolumeStatsBlock(t *testing.T) { } func TestNodeGetVolumeStatsFs(t *testing.T) { + fakeNs, _, mmock, _ := fakeNodeServer() - // Init assert assert := assert.New(t) - mmock.ExpectedCalls = nil // setup for test tempDir := os.TempDir() From 1bc5b09020989bd14bbc5d90b041f6c62676be69 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 25 Mar 2025 11:50:35 +0000 Subject: [PATCH 3/5] cinder-csi-plugin: Add tests for ignore-volume-az Signed-off-by: Stephen Finucane --- pkg/csi/cinder/controllerserver_test.go | 66 ++++++++++++++++++++++ pkg/csi/cinder/openstack/openstack_mock.go | 3 +- 2 files changed, 68 insertions(+), 1 deletion(-) diff --git a/pkg/csi/cinder/controllerserver_test.go b/pkg/csi/cinder/controllerserver_test.go index dd1647e1c5..26b8da6782 100644 --- a/pkg/csi/cinder/controllerserver_test.go +++ b/pkg/csi/cinder/controllerserver_test.go @@ -63,6 +63,7 @@ func TestCreateVolume(t *testing.T) { properties := map[string]string{cinderCSIClusterIDKey: FakeCluster} osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, FakeAvailability, "", "", "", properties).Return(&FakeVol, nil) osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) + osmock.On("GetBlockStorageOpts").Return(openstack.BlockStorageOpts{}) assert := assert.New(t) @@ -110,6 +111,7 @@ func TestCreateVolumeQuotaError(t *testing.T) { properties := map[string]string{cinderCSIClusterIDKey: FakeCluster} osmock.On("CreateVolume", errorVolume, mock.AnythingOfType("int"), FakeVolType, FakeAvailability, "", "", "", properties).Return(&volumes.Volume{}, cpoerrors.ErrQuotaExceeded) osmock.On("GetVolumesByName", errorVolume).Return(FakeVolListEmpty, nil) + osmock.On("GetBlockStorageOpts").Return(openstack.BlockStorageOpts{}) assert := assert.New(t) @@ -155,6 +157,7 @@ func TestCreateVolumeWithParam(t *testing.T) { properties := map[string]string{cinderCSIClusterIDKey: FakeCluster} osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), "dummyVolType", "cinder", "", "", "", properties).Return(&FakeVol, nil) osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) + osmock.On("GetBlockStorageOpts").Return(openstack.BlockStorageOpts{}) assert := assert.New(t) @@ -197,6 +200,66 @@ func TestCreateVolumeWithParam(t *testing.T) { assert.Equal(FakeAvailability, actualRes.Volume.AccessibleTopology[0].GetSegments()[topologyKey]) } +// Test CreateVolume with ignore-volume-az option +func TestCreateVolumeWithIgnoreVolumeAZ(t *testing.T) { + fakeCs, osmock := fakeControllerServer() + + properties := map[string]string{cinderCSIClusterIDKey: FakeCluster} + osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, "cinder", "", "", "", properties).Return(&FakeVol, nil) + osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) + osmock.On("GetBlockStorageOpts").Return(openstack.BlockStorageOpts{IgnoreVolumeAZ: true}) + + assert := assert.New(t) + + // Fake request + fakeReq := &csi.CreateVolumeRequest{ + Name: FakeVolName, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + + Parameters: map[string]string{ + "availability": "cinder", + }, + + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{topologyKey: "bar"}, + }, + { + Segments: map[string]string{topologyKey: "foo"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{topologyKey: "foo"}, + }, + { + Segments: map[string]string{topologyKey: "bar"}, + }, + }, + }, + } + + // Invoke CreateVolume + actualRes, err := fakeCs.CreateVolume(FakeCtx, fakeReq) + if err != nil { + t.Errorf("failed to CreateVolume: %v", err) + } + + // Assert + assert.NotNil(actualRes.Volume) + assert.NotNil(actualRes.Volume.CapacityBytes) + assert.NotEqual(0, len(actualRes.Volume.VolumeId), "Volume Id is nil") + assert.NotNil(actualRes.Volume.AccessibleTopology) + assert.Equal("foo", actualRes.Volume.AccessibleTopology[0].GetSegments()[topologyKey]) +} + func TestCreateVolumeWithExtraMetadata(t *testing.T) { fakeCs, osmock := fakeControllerServer() @@ -209,6 +272,7 @@ func TestCreateVolumeWithExtraMetadata(t *testing.T) { } osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, FakeAvailability, "", "", "", properties).Return(&FakeVol, nil) osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) + osmock.On("GetBlockStorageOpts").Return(openstack.BlockStorageOpts{}) // Fake request fakeReq := &csi.CreateVolumeRequest{ @@ -248,6 +312,7 @@ func TestCreateVolumeFromSnapshot(t *testing.T) { properties := map[string]string{cinderCSIClusterIDKey: FakeCluster} osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, "", FakeSnapshotID, "", "", properties).Return(&FakeVolFromSnapshot, nil) osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) + osmock.On("GetBlockStorageOpts").Return(openstack.BlockStorageOpts{}) assert := assert.New(t) @@ -294,6 +359,7 @@ func TestCreateVolumeFromSourceVolume(t *testing.T) { properties := map[string]string{cinderCSIClusterIDKey: FakeCluster} osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, "", "", FakeVolID, "", properties).Return(&FakeVolFromSourceVolume, nil) osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) + osmock.On("GetBlockStorageOpts").Return(openstack.BlockStorageOpts{}) assert := assert.New(t) diff --git a/pkg/csi/cinder/openstack/openstack_mock.go b/pkg/csi/cinder/openstack/openstack_mock.go index 87b5d76cd5..8849b1bf53 100644 --- a/pkg/csi/cinder/openstack/openstack_mock.go +++ b/pkg/csi/cinder/openstack/openstack_mock.go @@ -521,7 +521,8 @@ func (_m *OpenStackMock) GetMetadataOpts() metadata.Opts { // GetBlockStorageOpts provides a mock function to return BlockStorageOpts func (_m *OpenStackMock) GetBlockStorageOpts() BlockStorageOpts { - return BlockStorageOpts{} + args := _m.Called() + return args.Get(0).(BlockStorageOpts) } // ResolveVolumeListToUUIDs provides a mock function to return volume UUIDs From 558663f0cc02a882c87db10dc218a082a32f8c44 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 25 Mar 2025 13:58:25 +0000 Subject: [PATCH 4/5] cinder-csi-plugin: Add tests for --with-topology=false This highlights an issue (IMO) is how we handle parameter generation. Signed-off-by: Stephen Finucane --- pkg/csi/cinder/controllerserver_test.go | 74 +++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/pkg/csi/cinder/controllerserver_test.go b/pkg/csi/cinder/controllerserver_test.go index 26b8da6782..8ad4e05763 100644 --- a/pkg/csi/cinder/controllerserver_test.go +++ b/pkg/csi/cinder/controllerserver_test.go @@ -260,6 +260,80 @@ func TestCreateVolumeWithIgnoreVolumeAZ(t *testing.T) { assert.Equal("foo", actualRes.Volume.AccessibleTopology[0].GetSegments()[topologyKey]) } +// Test CreateVolume with --with-topology=false flag +func TestCreateVolumeWithTopologyDisabled(t *testing.T) { + assert := assert.New(t) + + tests := []struct { + name string + volumeParams map[string]string + expectedVolumeAZ string + expectedCSITopology string + }{ + { + name: "empty paramters", + volumeParams: nil, + expectedVolumeAZ: "", + // FIXME(stephenfin): This should be unset + expectedCSITopology: "nova", + }, + { + name: "availability parameter", + volumeParams: map[string]string{ + "availability": "cinder", + }, + // FIXME(stephenfin): This should be "cinder" per the parameter + expectedVolumeAZ: "", + // ...and this should be unset + expectedCSITopology: "nova", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeCs, osmock := fakeControllerServer() + fakeCs.Driver.withTopology = false + + properties := map[string]string{cinderCSIClusterIDKey: FakeCluster} + osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, tt.expectedVolumeAZ, "", "", "", properties).Return(&FakeVol, nil) + osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) + osmock.On("GetBlockStorageOpts").Return(openstack.BlockStorageOpts{}) + + // Fake request + fakeReq := &csi.CreateVolumeRequest{ + Name: FakeVolName, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + + Parameters: tt.volumeParams, + + AccessibilityRequirements: nil, + } + + // Invoke CreateVolume + actualRes, err := fakeCs.CreateVolume(FakeCtx, fakeReq) + if err != nil { + t.Errorf("failed to CreateVolume: %v", err) + } + + // Assert + assert.NotNil(actualRes.Volume) + assert.NotNil(actualRes.Volume.CapacityBytes) + assert.NotEqual(0, len(actualRes.Volume.VolumeId), "Volume Id is nil") + if tt.expectedCSITopology == "" { + assert.Nil(actualRes.Volume.AccessibleTopology) + } else { + assert.GreaterOrEqual(len(actualRes.Volume.AccessibleTopology), 1) + assert.Equal(tt.expectedCSITopology, actualRes.Volume.AccessibleTopology[0].GetSegments()[topologyKey]) + } + }) + } +} + func TestCreateVolumeWithExtraMetadata(t *testing.T) { fakeCs, osmock := fakeControllerServer() From 03149e5992008962cd6459a60fe710d2073c734a Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 21 Mar 2025 16:39:06 +0000 Subject: [PATCH 5/5] cinder-csi-plugin: Group topology generation Group everything together rather than having it spread out as it is currently. A decoder of the logic, to ensure nothing has changed: 1. Determine what the OpenStack Cinder volume's AZ will be: a. If the `availability` parameter is set on the Storage Class, the cinder volume will use this as the AZ. b. Otherwise, the cinder volume will use: an AZ extracted from one of the preferred CSI Topologies; an AZ extracted from one of the requisite (available) Topologies; or no AZ. 2. Determine what the Kubernetes CSI Volume's topology will be: a. If the `ignore-volume-az` option is set, the CSI Volume will use the preferred CSI Volume Topologies, if any, else None. [*] b. Otherwise, the CSI Volume will generate a Volume Topology from the Cinder Volume's AZ. [*] This practically means that `ignore-volume-az` is only useful when the availability parameter is set on the Storage Class. This in turn means the CSI Volume Topology is junk as it has no bearing on the "real" topology constraint on the volume. Signed-off-by: Stephen Finucane --- pkg/csi/cinder/controllerserver.go | 67 +++++++++++++++++------------- 1 file changed, 37 insertions(+), 30 deletions(-) diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go index 4cc3576aa6..045cb7bcec 100644 --- a/pkg/csi/cinder/controllerserver.go +++ b/pkg/csi/cinder/controllerserver.go @@ -86,22 +86,22 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol // Volume Type volType := volParams["type"] + // Volume AZ + + accessibleTopologyReq := req.GetAccessibilityRequirements() + ignoreVolumeAZ := cloud.GetBlockStorageOpts().IgnoreVolumeAZ + + // First check if volAvailability is already specified, if not get preferred from Topology + // Required, in case vol AZ is different from node AZ var volAvailability string if cs.Driver.withTopology { - // First check if volAvailability is already specified, if not get preferred from Topology - // Required, incase vol AZ is different from node AZ - volAvailability = volParams["availability"] - if volAvailability == "" { - accessibleTopologyReq := req.GetAccessibilityRequirements() - // Check from Topology - if accessibleTopologyReq != nil { - volAvailability = sharedcsi.GetAZFromTopology(topologyKey, accessibleTopologyReq) - } + if volParams["availability"] != "" { + volAvailability = volParams["availability"] + } else if accessibleTopologyReq != nil { + volAvailability = sharedcsi.GetAZFromTopology(topologyKey, accessibleTopologyReq) } } - ignoreVolumeAZ := cloud.GetBlockStorageOpts().IgnoreVolumeAZ - // get the PVC annotation pvcAnnotations := sharedcsi.GetPVCAnnotations(cs.Driver.pvcLister, volParams) for k, v := range pvcAnnotations { @@ -120,8 +120,11 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol return nil, status.Error(codes.AlreadyExists, "Volume Already exists with same name and different capacity") } klog.V(4).Infof("Volume %s already exists in Availability Zone: %s of size %d GiB", vols[0].ID, vols[0].AvailabilityZone, vols[0].Size) - return getCreateVolumeResponse(&vols[0], nil, ignoreVolumeAZ, req.GetAccessibilityRequirements()), nil - } else if len(vols) > 1 { + accessibleTopology := getTopology(&vols[0], accessibleTopologyReq, ignoreVolumeAZ) + return getCreateVolumeResponse(&vols[0], nil, accessibleTopology), nil + } + + if len(vols) > 1 { klog.V(3).Infof("found multiple existing volumes with selected name (%s) during create", volName) return nil, status.Error(codes.Internal, "Multiple volumes reported by Cinder with same name") } @@ -247,7 +250,9 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol klog.V(4).Infof("CreateVolume: Successfully created volume %s in Availability Zone: %s of size %d GiB", vol.ID, vol.AvailabilityZone, vol.Size) - return getCreateVolumeResponse(vol, volCtx, ignoreVolumeAZ, req.GetAccessibilityRequirements()), nil + accessibleTopology := getTopology(vol, accessibleTopologyReq, ignoreVolumeAZ) + + return getCreateVolumeResponse(vol, volCtx, accessibleTopology), nil } func (d *controllerServer) ControllerModifyVolume(ctx context.Context, req *csi.ControllerModifyVolumeRequest) (*csi.ControllerModifyVolumeResponse, error) { @@ -1035,7 +1040,24 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi }, nil } -func getCreateVolumeResponse(vol *volumes.Volume, volCtx map[string]string, ignoreVolumeAZ bool, accessibleTopologyReq *csi.TopologyRequirement) *csi.CreateVolumeResponse { +func getTopology(vol *volumes.Volume, topologyReq *csi.TopologyRequirement, ignoreVolumeAZ bool) []*csi.Topology { + var accessibleTopology []*csi.Topology + if ignoreVolumeAZ { + if topologyReq != nil { + accessibleTopology = topologyReq.GetPreferred() + } + } else if vol.AvailabilityZone != "" { + // NOTE(stephenfin): We retrieve the AZ from the created volume rather than + // using the value we provided in our request since these can differ due to + // Cinder's '[DEFAULT] allow_availability_zone_fallback' option. + accessibleTopology = []*csi.Topology{ + {Segments: map[string]string{topologyKey: vol.AvailabilityZone}}, + } + } + return accessibleTopology +} + +func getCreateVolumeResponse(vol *volumes.Volume, volCtx map[string]string, accessibleTopology []*csi.Topology) *csi.CreateVolumeResponse { var volsrc *csi.VolumeContentSource volCnx := map[string]string{} @@ -1075,21 +1097,6 @@ func getCreateVolumeResponse(vol *volumes.Volume, volCtx map[string]string, igno } } - var accessibleTopology []*csi.Topology - // If ignore-volume-az is true , dont set the accessible topology to volume az, - // use from preferred topologies instead. - if ignoreVolumeAZ { - if accessibleTopologyReq != nil { - accessibleTopology = accessibleTopologyReq.GetPreferred() - } - } else if vol.AvailabilityZone != "" { - accessibleTopology = []*csi.Topology{ - { - Segments: map[string]string{topologyKey: vol.AvailabilityZone}, - }, - } - } - resp := &csi.CreateVolumeResponse{ Volume: &csi.Volume{ VolumeId: vol.ID,