|
| 1 | +/* |
| 2 | +Copyright 2021-2025 The Kubernetes Authors. |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | +*/ |
| 16 | + |
| 17 | +package provisioning |
| 18 | + |
| 19 | +import ( |
| 20 | + "context" |
| 21 | + "fmt" |
| 22 | + "os" |
| 23 | + "strconv" |
| 24 | + "strings" |
| 25 | + "time" |
| 26 | + |
| 27 | + "github.com/onsi/ginkgo/v2" |
| 28 | + "github.com/onsi/gomega" |
| 29 | + "github.com/vmware/govmomi/find" |
| 30 | + "github.com/vmware/govmomi/object" |
| 31 | + "github.com/vmware/govmomi/vim25/types" |
| 32 | + v1 "k8s.io/api/core/v1" |
| 33 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 34 | + clientset "k8s.io/client-go/kubernetes" |
| 35 | + "k8s.io/kubernetes/test/e2e/framework" |
| 36 | + fnodes "k8s.io/kubernetes/test/e2e/framework/node" |
| 37 | + fpod "k8s.io/kubernetes/test/e2e/framework/pod" |
| 38 | + fpv "k8s.io/kubernetes/test/e2e/framework/pv" |
| 39 | + admissionapi "k8s.io/pod-security-admission/api" |
| 40 | + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/bootstrap" |
| 41 | + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/config" |
| 42 | + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/constants" |
| 43 | + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/env" |
| 44 | + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/k8testutil" |
| 45 | + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/vcutil" |
| 46 | +) |
| 47 | + |
| 48 | +var e2eTestConfig *config.E2eTestConfig |
| 49 | + |
| 50 | +var _ = ginkgo.Describe("Basic Static Provisioning", func() { |
| 51 | + f := framework.NewDefaultFramework("e2e-csistaticprovision") |
| 52 | + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged |
| 53 | + framework.TestContext.DeleteNamespace = true |
| 54 | + var ( |
| 55 | + client clientset.Interface |
| 56 | + namespace string |
| 57 | + fcdID string |
| 58 | + pv *v1.PersistentVolume |
| 59 | + pvc *v1.PersistentVolumeClaim |
| 60 | + defaultDatacenter *object.Datacenter |
| 61 | + defaultDatastore *object.Datastore |
| 62 | + deleteFCDRequired bool |
| 63 | + pandoraSyncWaitTime int |
| 64 | + err error |
| 65 | + datastoreURL string |
| 66 | + storagePolicyName string |
| 67 | + isVsanHealthServiceStopped bool |
| 68 | + isSPSserviceStopped bool |
| 69 | + ctx context.Context |
| 70 | + ) |
| 71 | + |
| 72 | + ginkgo.BeforeEach(func() { |
| 73 | + e2eTestConfig = bootstrap.Bootstrap() |
| 74 | + client = f.ClientSet |
| 75 | + namespace = vcutil.GetNamespaceToRunTests(f, e2eTestConfig) |
| 76 | + var cancel context.CancelFunc |
| 77 | + ctx, cancel = context.WithCancel(context.Background()) |
| 78 | + defer cancel() |
| 79 | + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) |
| 80 | + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") |
| 81 | + storagePolicyName = env.GetAndExpectStringEnvVar(constants.EnvStoragePolicyNameForSharedDatastores) |
| 82 | + if !(len(nodeList.Items) > 0) { |
| 83 | + framework.Failf("Unable to find ready and schedulable Node") |
| 84 | + } |
| 85 | + |
| 86 | + if os.Getenv(constants.EnvPandoraSyncWaitTime) != "" { |
| 87 | + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(constants.EnvPandoraSyncWaitTime)) |
| 88 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 89 | + } else { |
| 90 | + pandoraSyncWaitTime = constants.DefaultPandoraSyncWaitTime |
| 91 | + } |
| 92 | + deleteFCDRequired = false |
| 93 | + isVsanHealthServiceStopped = false |
| 94 | + isSPSserviceStopped = false |
| 95 | + var datacenters []string |
| 96 | + datastoreURL = env.GetAndExpectStringEnvVar(constants.EnvSharedDatastoreURL) |
| 97 | + var fullSyncWaitTime int |
| 98 | + finder := find.NewFinder(e2eTestConfig.VcClient.Client, false) |
| 99 | + cfg, err := config.GetConfig() |
| 100 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 101 | + dcList := strings.Split(cfg.Global.Datacenters, ",") |
| 102 | + for _, dc := range dcList { |
| 103 | + dcName := strings.TrimSpace(dc) |
| 104 | + if dcName != "" { |
| 105 | + datacenters = append(datacenters, dcName) |
| 106 | + } |
| 107 | + } |
| 108 | + for _, dc := range datacenters { |
| 109 | + defaultDatacenter, err = finder.Datacenter(ctx, dc) |
| 110 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 111 | + finder.SetDatacenter(defaultDatacenter) |
| 112 | + defaultDatastore, err = k8testutil.GetDatastoreByURL(ctx, datastoreURL, defaultDatacenter) |
| 113 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 114 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 115 | + } |
| 116 | + if e2eTestConfig.TestInput.ClusterFlavor.GuestCluster { |
| 117 | + // Get a config to talk to the apiserver |
| 118 | + restConfig := vcutil.GetRestConfigClient(e2eTestConfig) |
| 119 | + _, svNamespace := k8testutil.GetSvcClientAndNamespace() |
| 120 | + k8testutil.SetStoragePolicyQuota(ctx, restConfig, storagePolicyName, svNamespace, constants.RqLimit) |
| 121 | + } |
| 122 | + |
| 123 | + if os.Getenv(constants.EnvFullSyncWaitTime) != "" { |
| 124 | + fullSyncWaitTime, err = strconv.Atoi(os.Getenv(constants.EnvFullSyncWaitTime)) |
| 125 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 126 | + if fullSyncWaitTime <= 0 || fullSyncWaitTime > constants.DefaultFullSyncWaitTime { |
| 127 | + framework.Failf("The FullSync Wait time %v is not set correctly", fullSyncWaitTime) |
| 128 | + } |
| 129 | + } |
| 130 | + }) |
| 131 | + |
| 132 | + ginkgo.AfterEach(func() { |
| 133 | + ctx, cancel := context.WithCancel(context.Background()) |
| 134 | + defer cancel() |
| 135 | + ginkgo.By("Performing test cleanup") |
| 136 | + if deleteFCDRequired { |
| 137 | + ginkgo.By(fmt.Sprintf("Deleting FCD: %s", fcdID)) |
| 138 | + |
| 139 | + err := vcutil.DeleteFCD(ctx, fcdID, e2eTestConfig, defaultDatastore.Reference()) |
| 140 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 141 | + } |
| 142 | + if pvc != nil { |
| 143 | + framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace), |
| 144 | + "Failed to delete PVC", pvc.Name) |
| 145 | + } |
| 146 | + |
| 147 | + if pv != nil { |
| 148 | + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, |
| 149 | + client, pv.Name, constants.Poll, constants.PollTimeoutShort)) |
| 150 | + framework.ExpectNoError(vcutil.WaitForCNSVolumeToBeDeleted(e2eTestConfig, pv.Spec.CSI.VolumeHandle)) |
| 151 | + } |
| 152 | + |
| 153 | + if isVsanHealthServiceStopped { |
| 154 | + ginkgo.By(fmt.Sprintln("Starting vsan-health on the vCenter host")) |
| 155 | + err = vcutil.InvokeVCenterServiceControl( |
| 156 | + &e2eTestConfig.TestInput.TestBedInfo, ctx, |
| 157 | + constants.StartOperation, constants.VsanhealthServiceName, |
| 158 | + e2eTestConfig.TestInput.TestBedInfo.VcAddress) |
| 159 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 160 | + ginkgo.By( |
| 161 | + fmt.Sprintf("Sleeping for %v seconds to allow vsan-health to come up again", |
| 162 | + constants.VsanHealthServiceWaitTime)) |
| 163 | + time.Sleep(time.Duration(constants.VsanHealthServiceWaitTime) * time.Second) |
| 164 | + } |
| 165 | + |
| 166 | + if isSPSserviceStopped { |
| 167 | + ginkgo.By(fmt.Sprintln("Starting sps on the vCenter host")) |
| 168 | + err = vcutil.InvokeVCenterServiceControl( |
| 169 | + &e2eTestConfig.TestInput.TestBedInfo, ctx, |
| 170 | + constants.StartOperation, constants.SpsServiceName, e2eTestConfig.TestInput.TestBedInfo.VcAddress) |
| 171 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 172 | + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow sps to come up again", constants.VsanHealthServiceWaitTime)) |
| 173 | + time.Sleep(time.Duration(constants.VsanHealthServiceWaitTime) * time.Second) |
| 174 | + } |
| 175 | + |
| 176 | + if e2eTestConfig.TestInput.ClusterFlavor.GuestCluster { |
| 177 | + svcClient, svNamespace := k8testutil.GetSvcClientAndNamespace() |
| 178 | + k8testutil.SetResourceQuota(svcClient, svNamespace, constants.RqLimit) |
| 179 | + k8testutil.DumpSvcNsEventsOnTestFailure(svcClient, svNamespace) |
| 180 | + } |
| 181 | + if e2eTestConfig.TestInput.ClusterFlavor.SupervisorCluster { |
| 182 | + k8testutil.DumpSvcNsEventsOnTestFailure(client, namespace) |
| 183 | + } |
| 184 | + }) |
| 185 | + |
| 186 | + // This test verifies the static provisioning workflow. |
| 187 | + // |
| 188 | + // Test Steps: |
| 189 | + // 1. Create FCD and wait for fcd to allow syncing with pandora. |
| 190 | + // 2. Create PV Spec with volumeID set to FCDID created in Step-1, and |
| 191 | + // PersistentVolumeReclaimPolicy is set to Delete. |
| 192 | + // 3. Create PVC with the storage request set to PV's storage capacity. |
| 193 | + // 4. Wait for PV and PVC to bound. |
| 194 | + // 5. Create a POD. |
| 195 | + // 6. Verify volume is attached to the node and volume is accessible in the pod. |
| 196 | + // 7. Verify container volume metadata is present in CNS cache. |
| 197 | + // 8. Delete POD. |
| 198 | + // 9. Verify volume is detached from the node. |
| 199 | + // 10. Delete PVC. |
| 200 | + // 11. Verify PV is deleted automatically. |
| 201 | + ginkgo.It("[csi-block-vanilla] [csi-block-vanilla-parallelized] Verify basic static provisioning "+ |
| 202 | + "workflow", func() { |
| 203 | + var err error |
| 204 | + |
| 205 | + ctx, cancel := context.WithCancel(context.Background()) |
| 206 | + defer cancel() |
| 207 | + |
| 208 | + ginkgo.By("Creating FCD Disk") |
| 209 | + fcdID, err := vcutil.CreateFCD(ctx, |
| 210 | + e2eTestConfig, "BasicStaticFCD", constants.DiskSizeInMb, defaultDatastore.Reference()) |
| 211 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 212 | + deleteFCDRequired = true |
| 213 | + |
| 214 | + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", |
| 215 | + pandoraSyncWaitTime, fcdID)) |
| 216 | + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) |
| 217 | + |
| 218 | + // Creating label for PV. |
| 219 | + // PVC will use this label as Selector to find PV. |
| 220 | + staticPVLabels := make(map[string]string) |
| 221 | + staticPVLabels["fcd-id"] = fcdID |
| 222 | + |
| 223 | + ginkgo.By("Creating the PV") |
| 224 | + pv = k8testutil.GetPersistentVolumeSpec(fcdID, v1.PersistentVolumeReclaimDelete, staticPVLabels, constants.Ext4FSType) |
| 225 | + pv, err = client.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) |
| 226 | + if err != nil { |
| 227 | + return |
| 228 | + } |
| 229 | + err = vcutil.WaitForCNSVolumeToBeCreated(e2eTestConfig, pv.Spec.CSI.VolumeHandle) |
| 230 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 231 | + |
| 232 | + ginkgo.By("Creating the PVC") |
| 233 | + pvc = k8testutil.GetPersistentVolumeClaimSpec(namespace, staticPVLabels, pv.Name) |
| 234 | + pvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvc, metav1.CreateOptions{}) |
| 235 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 236 | + |
| 237 | + // Wait for PV and PVC to Bind. |
| 238 | + framework.ExpectNoError(fpv.WaitOnPVandPVC(ctx, client, f.Timeouts, namespace, pv, pvc)) |
| 239 | + |
| 240 | + // Set deleteFCDRequired to false. |
| 241 | + // After PV, PVC is in the bind state, Deleting PVC should delete |
| 242 | + // container volume. So no need to delete FCD directly using vSphere |
| 243 | + // API call. |
| 244 | + deleteFCDRequired = false |
| 245 | + |
| 246 | + ginkgo.By("Verifying CNS entry is present in cache") |
| 247 | + _, err = vcutil.QueryCNSVolumeWithResult(e2eTestConfig, pv.Spec.CSI.VolumeHandle) |
| 248 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 249 | + |
| 250 | + ginkgo.By("Creating the Pod") |
| 251 | + var pvclaims []*v1.PersistentVolumeClaim |
| 252 | + pvclaims = append(pvclaims, pvc) |
| 253 | + pod, err := k8testutil.CreatePod(ctx, e2eTestConfig, client, namespace, nil, pvclaims, false, "") |
| 254 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 255 | + |
| 256 | + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) |
| 257 | + vmUUID := k8testutil.GetNodeUUID(ctx, client, pod.Spec.NodeName) |
| 258 | + isDiskAttached, err := vcutil.IsVolumeAttachedToVM(client, e2eTestConfig, pv.Spec.CSI.VolumeHandle, vmUUID) |
| 259 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 260 | + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached") |
| 261 | + |
| 262 | + ginkgo.By("Verify container volume metadata is present in CNS cache") |
| 263 | + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolume with VolumeID: %s", pv.Spec.CSI.VolumeHandle)) |
| 264 | + _, err = vcutil.QueryCNSVolumeWithResult(e2eTestConfig, pv.Spec.CSI.VolumeHandle) |
| 265 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 266 | + |
| 267 | + labels := []types.KeyValue{{Key: "fcd-id", Value: fcdID}} |
| 268 | + ginkgo.By("Verify container volume metadata is matching the one in CNS cache") |
| 269 | + err = vcutil.VerifyVolumeMetadataInCNS(pv.Spec.CSI.VolumeHandle, e2eTestConfig, |
| 270 | + pvc.Name, pv.ObjectMeta.Name, pod.Name, labels...) |
| 271 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 272 | + |
| 273 | + ginkgo.By("Deleting the Pod") |
| 274 | + framework.ExpectNoError(fpod.DeletePodWithWait(ctx, client, pod), "Failed to delete pod", pod.Name) |
| 275 | + |
| 276 | + ginkgo.By(fmt.Sprintf("Verify volume is detached from the node: %s", pod.Spec.NodeName)) |
| 277 | + isDiskDetached, err := vcutil.WaitForVolumeDetachedFromNode(e2eTestConfig, |
| 278 | + client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) |
| 279 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 280 | + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), "Volume is not detached from the node") |
| 281 | + |
| 282 | + ginkgo.By("Deleting the PV Claim") |
| 283 | + framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace), |
| 284 | + "Failed to delete PVC", pvc.Name) |
| 285 | + pvc = nil |
| 286 | + |
| 287 | + ginkgo.By("Verify PV should be deleted automatically") |
| 288 | + framework.ExpectNoError( |
| 289 | + fpv.WaitForPersistentVolumeDeleted(ctx, |
| 290 | + client, pv.Name, constants.Poll, constants.PollTimeout)) |
| 291 | + pv = nil |
| 292 | + }) |
| 293 | + |
| 294 | +}) |
0 commit comments