diff --git a/tests/e2e/csi_snapshot_basic.go b/tests/e2e/csi_snapshot_basic.go index bbd6bf1e29..9f469dae24 100644 --- a/tests/e2e/csi_snapshot_basic.go +++ b/tests/e2e/csi_snapshot_basic.go @@ -75,6 +75,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { scName string volHandle string isQuotaValidationSupported bool + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -84,13 +85,17 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { bootstrap() client = f.ClientSet namespace = getNamespaceToRunTests(f) + + var err error + var nodeList *v1.NodeList + adminClient, client = initializeClusterClientsByUserRoles(client) scParameters = make(map[string]string) // reading shared datastoreurl and shared storage policy datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) // fetching node list and checking node status - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") @@ -200,14 +205,12 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if guestCluster && svcClient != nil && svcNamespace != "" { + if guestCluster { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() framework.Logf("Collecting supervisor PVC events before performing PV/PVC cleanup") eventList, err := svcClient.CoreV1().Events(svcNamespace).List(ctx, metav1.ListOptions{}) - if err != nil { - framework.Logf("Failed to list events in namespace %q: %v", svcNamespace, err) - return - } - + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, item := range eventList.Items { framework.Logf("%q", item.Message) } @@ -223,6 +226,14 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { if isVsanHealthServiceStopped { startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) } + + if supervisorCluster { + dumpSvcNsEventsOnTestFailure(client, namespace) + } + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + } }) /* @@ -244,9 +255,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 13. Cleanup: Delete PVC, SC (validate they are removed) */ - ginkgo.It("[cf-wcp] [cf-vanilla-block][block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot]"+ - "[ef-vks-snapshot] Verify snapshot dynamic provisioning workflow", ginkgo.Label(p0, block, tkg, - vanilla, wcp, snapshot, stable, vc90), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] [supervisor-snapshot] Verify snapshot dynamic provisioning "+ + "workflow", ginkgo.Label(p0, block, tkg, vanilla, wcp, snapshot, stable, vc90), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -359,7 +369,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volume snapshot as the policy is delete 10. Cleanup the pvc */ - ginkgo.It("[block-vanilla-snapshot][cf-vanilla-block] Verify snapshot static provisioning through K8s "+ + ginkgo.It("[block-vanilla-snapshot] Verify snapshot static provisioning through K8s "+ "API workflow", ginkgo.Label(p0, block, vanilla, snapshot, stable, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -520,7 +530,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 11. The snapshot that was created via CNS in step-2 should be deleted as part of k8s snapshot delete 12. Delete the pvc */ - ginkgo.It("[ef-vanilla-block][block-vanilla-snapshot] Verify snapshot static provisioning "+ + ginkgo.It("[block-vanilla-snapshot] Verify snapshot static provisioning "+ "via CNS", ginkgo.Label(p0, block, vanilla, snapshot, stable, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -661,7 +671,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 12. Delete volume snapshot content 2 13. Cleanup the pvc, volume snapshot class and storage class */ - ginkgo.It("[block-vanilla-snapshot][cf-vanilla-block] Verify snapshot static provisioning with "+ + ginkgo.It("[block-vanilla-snapshot] Verify snapshot static provisioning with "+ "deletion policy Retain", ginkgo.Label(p0, block, vanilla, snapshot, stable, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -722,11 +732,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { snapshotCreated := true defer func() { - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) - } - if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, @@ -736,6 +741,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } }() ginkgo.By("Verify volume snapshot is created") @@ -842,7 +852,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 9. Query the Snasphot from CNS side using the volumeId 10. Cleanup the snapshot and delete the volume */ - ginkgo.It("[ef-vanilla-block][block-vanilla-snapshot] Verify snapshot static provisioning with deletion "+ + ginkgo.It("[block-vanilla-snapshot] Verify snapshot static provisioning with deletion "+ "policy Retain - test2", ginkgo.Label(p0, block, vanilla, snapshot, stable, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -989,7 +999,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 10. Run cleanup: Delete snapshots, restored-volumes, pods */ - ginkgo.It("[block-vanilla-snapshot][cf-vanilla-block] Volume restore using snapshot a dynamic snapshot b "+ + ginkgo.It("[block-vanilla-snapshot] Volume restore using snapshot a dynamic snapshot b "+ "pre-provisioned snapshot", ginkgo.Label(p0, block, vanilla, snapshot, stable, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -1050,11 +1060,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { snapshotCreated := true defer func() { - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) - } - if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, @@ -1064,6 +1069,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } }() ginkgo.By("Verify volume snapshot is created") @@ -1239,11 +1249,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { snapshotContentCreated3 := true defer func() { - if snapshotCreated3 { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) - } - if snapshotContentCreated3 { framework.Logf("Deleting volume snapshot content") deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, @@ -1253,6 +1258,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + + if snapshotCreated3 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } }() ginkgo.By("Verify volume snapshot is created") @@ -1330,9 +1340,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 16. Query the snapshot from CNS side - it shouldn't be available 17. Delete SC and VolumeSnapshotClass */ - ginkgo.It("[cf-vks][cf-vanilla-block][block-vanilla-snapshot][tkg-snapshot]"+ - "Volume snapshot creation and restoration workflow with xfs "+ - "filesystem", ginkgo.Label(p0, block, vanilla, tkg, snapshot, stable, vc80), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume snapshot creation and restoration workflow "+ + "with xfs filesystem", ginkgo.Label(p0, block, vanilla, tkg, snapshot, stable, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1428,6 +1437,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -1437,11 +1451,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() ginkgo.By("Restore snapshot to new PVC") @@ -1528,9 +1537,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { while the source pvc was created usig thin provisioned psp-operatorlicy 6. cleanup spbm policies, sc's, pvc's */ - ginkgo.It("[cf-wcp] [cf-vanilla-block][block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot]"+ - "[ef-vks-snapshot] Volume restore using snapshot on a different storageclass", ginkgo.Label(p0, block, - vanilla, wcp, snapshot, tkg, stable, vc90), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] [supervisor-snapshot] Volume "+ + "restore using snapshot on a different "+ + "storageclass", ginkgo.Label(p0, block, vanilla, wcp, snapshot, tkg, stable, vc90), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1667,10 +1676,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 8. Ensure the pvc with source as snapshot creates successfully and is bound 9. Cleanup the snapshot, pvcs and ns */ - ginkgo.It("[block-vanilla-snapshot][cf-vanilla-block][tkg-snapshot]"+ - "[ef-vks-snapshot-f] Delete the namespace hosting the pvcs and volume-snapshots "+ - "and recover the data using snapshot-content", ginkgo.Label(p0, block, vanilla, - snapshot, tkg, stable, vc80), func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Delete the namespace hosting the pvcs and "+ + "volume-snapshots and recover the data "+ + "using snapshot-content", ginkgo.Label(p0, block, vanilla, snapshot, tkg, stable, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1891,7 +1899,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim2, err := fpv.CreatePVC(ctx, client, namespace2Name, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle @@ -1959,9 +1967,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { designed to return success even though it cannot find a snapshot in the backend) */ - ginkgo.It("[ef-vanilla-block][cf-wcp][block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot]"+ - "[ef-vks-snapshot] Delete a non-existent snapshot", ginkgo.Label(p0, block, vanilla, - wcp, snapshot, tkg, negative, vc90), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] Delete a non-existent "+ + "snapshot", ginkgo.Label(p0, block, vanilla, wcp, snapshot, tkg, negative, vc90), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -2042,9 +2049,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 3. Validate the fields after snapshot creation succeeds (snapshotClass, retentionPolicy) */ - ginkgo.It("[cf-wcp][cf-vanilla-block][block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot]"+ - "[ef-vks-snapshot] Create snapshots using default VolumeSnapshotClass", ginkgo.Label(p0, block, - vanilla, snapshot, wcp, tkg, vc90), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] Create snapshots using default "+ + "VolumeSnapshotClass", ginkgo.Label(p0, block, vanilla, snapshot, wcp, tkg, vc90), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -2144,9 +2150,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 4. Create with exact size and ensure it succeeds */ - ginkgo.It("[ef-vanilla-block][cf-wcp][block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot]"+ - "Create Volume from snapshot with different size", ginkgo.Label(p1, block, - vanilla, snapshot, tkg, wcp, stable, negative, vc90), func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] Create Volume from snapshot with "+ + "different size", ginkgo.Label(p1, block, vanilla, snapshot, tkg, wcp, stable, negative, vc90), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -2166,7 +2171,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { if storageclass.AllowVolumeExpansion == nil || *storageclass.AllowVolumeExpansion != allowExpansion { storageclass.AllowVolumeExpansion = &allowExpansion storageclass.Parameters = scParameters - storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } @@ -2227,7 +2232,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Expecting the volume bound to fail") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, + _, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionShortTimeout) gomega.Expect(err).To(gomega.HaveOccurred()) pvc2Deleted := false @@ -2278,10 +2283,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 10. Verify if the new pod attaches to the PV created in step-8 11. Cleanup the sts and the snapshot + pv that was left behind in step-7 */ - ginkgo.It("[ef-f-vanilla-block][ef-f-wcp-snapshot][block-vanilla-snapshot][tkg-snapshot]"+ - "[supervisor-snapshot][ef-vks-snapshot-f] Snapshot workflow for "+ - "statefulsets", ginkgo.Label(p0, block, vanilla, snapshot, wcp, - tkg, vc80), func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] Snapshot workflow for "+ + "statefulsets", ginkgo.Label(p0, block, vanilla, snapshot, wcp, tkg, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() quota := make(map[string]*resource.Quantity) @@ -2321,7 +2324,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { replicas := *(statefulset.Spec.Replicas) defer func() { ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() // Waiting for pods status to be Ready @@ -2537,9 +2540,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 5. Expect VolumeFailedDelete error with an appropriate err-msg 6. Run cleanup - delete the snapshots and then delete pv */ - ginkgo.It("[ef-vanilla-block][ef-wcp-snapshot][block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot]"+ - "Volume deletion with existing snapshots", ginkgo.Label(p0, block, vanilla, snapshot, - tkg, wcp, stable, negative, vc80), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] Volume deletion with"+ + "existing snapshots", ginkgo.Label(p0, block, vanilla, snapshot, tkg, wcp, stable, negative, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -2587,6 +2589,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -2596,11 +2603,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() ginkgo.By("Delete PVC before deleting the snapshot") @@ -2608,7 +2610,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).To(gomega.HaveOccurred()) ginkgo.By("Get PV and check the PV is still not deleted") - _, err = client.CoreV1().PersistentVolumes().Get(ctx, persistentvolumes[0].Name, metav1.GetOptions{}) + if vanillaCluster { + _, err = client.CoreV1().PersistentVolumes().Get(ctx, persistentvolumes[0].Name, metav1.GetOptions{}) + } else { + _, err = adminClient.CoreV1().PersistentVolumes().Get(ctx, persistentvolumes[0].Name, metav1.GetOptions{}) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Delete dynamic volume snapshot") @@ -2632,8 +2638,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 4. create a pre-provisioned snapshot (which uses VolumeSnapshotContent as source) using the VSC from step(3) 5. Ensure this provisioning fails with appropriate error: SnapshotContentMismatch error */ - ginkgo.It("[ef-vanilla-block][block-vanilla-snapshot][tkg-snapshot][ef-vks-snapshot] Create a pre-provisioned "+ - "snapshot using VolumeSnapshotContent as source", ginkgo.Label(p1, block, vanilla, snapshot, tkg, stable, + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Create a pre-provisioned snapshot using "+ + "VolumeSnapshotContent as source", ginkgo.Label(p1, block, vanilla, snapshot, tkg, stable, negative, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -2681,6 +2687,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -2690,11 +2701,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Create a volume snapshot2") @@ -2733,8 +2739,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { (note the snapshotHandle its pointing to has been deleted) 9. Volume Create should fail with an appropriate error on k8s side */ - ginkgo.It("[ef-vanilla-block][block-vanilla-snapshot] Pre-provisioned snapshot using "+ - "incorrect/non-existing static snapshot", ginkgo.Label(p0, block, vanilla, snapshot, negative, vc80), func() { + ginkgo.It("[block-vanilla-snapshot] Pre-provisioned snapshot using incorrect/non-existing "+ + "static snapshot", ginkgo.Label(p0, block, vanilla, snapshot, negative, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -2890,7 +2896,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 7. Validate the pvc is Bound 8. Cleanup the snapshot and pvc */ - ginkgo.It("[ef-vanilla-block][block-vanilla-snapshot] Create a volume from a snapshot that is still not "+ + ginkgo.It("[block-vanilla-snapshot] Create a volume from a snapshot that is still not "+ "ready-to-use", ginkgo.Label(p0, block, vanilla, snapshot, stable, negative, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -3037,9 +3043,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 7. Cleanup dep-1 pv snapshots and pvs, delete dep-2 */ - ginkgo.It("[ef-f-wcp-snapshot][cf-vanilla-block][block-vanilla-snapshot] [tkg-snapshot]"+ - "[supervisor-snapshot] [ef-vks-snapshot] Snapshot workflow for deployments", ginkgo.Label(p0, block, - vanilla, tkg, snapshot, wcp, stable, vc80), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] Snapshot workflow for "+ + "deployments", ginkgo.Label(p0, block, vanilla, tkg, snapshot, wcp, stable, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -3125,7 +3130,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolume2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, + persistentvolume2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolume2[0].Spec.CSI.VolumeHandle @@ -3184,9 +3189,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 6. Run resize and it should succeed 7. Cleanup the pvc */ - ginkgo.It("[ef-wcp-snapshot][cf-vanilla-block][block-vanilla-snapshot] [tkg-snapshot]"+ - "[supervisor-snapshot] Verify offline resize of a volume having snapshots", ginkgo.Label(p0, block, - vanilla, tkg, snapshot, stable, wcp, negative, vc90), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] Volume offline resize of a volume "+ + "having snapshots", ginkgo.Label(p0, block, vanilla, tkg, snapshot, stable, wcp, negative, vc90), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -3206,7 +3210,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { if storageclass.AllowVolumeExpansion == nil || *storageclass.AllowVolumeExpansion != allowExpansion { storageclass.AllowVolumeExpansion = &allowExpansion storageclass.Parameters = scParameters - storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } @@ -3284,6 +3288,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, newDiskSize, true) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -3293,11 +3302,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Expanding current pvc before deleting volume snapshot") @@ -3365,9 +3369,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 6. Run resize and it should succeed 7. Cleanup the pvc */ - ginkgo.It("[ef-vanilla-block][ef-wcp-snapshot][block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot]"+ - "Volume online resize of a volume having snapshots", ginkgo.Label(p0, block, vanilla, - tkg, snapshot, stable, negative, vc90), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] Volume online resize of a volume having "+ + "snapshots", ginkgo.Label(p0, block, vanilla, tkg, snapshot, stable, negative, vc90), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -3495,6 +3498,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, newDiskSize, true) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -3504,11 +3512,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Modify the PVC spec to enable online volume expansion when " + @@ -3603,8 +3606,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 8. cleanup the snapshots, restore-pvc and source-pvc */ - ginkgo.It("[pq-f-wcp-neg-snpt][block-vanilla-snapshot][supervisor-snapshot] Snapshot restore "+ - "while the Host is Down", ginkgo.Label(p2, block, vanilla, snapshot, disruptive, vc80), func() { + ginkgo.It("[block-vanilla-snapshot][supervisor-snapshot] Snapshot restore while the Host "+ + "is Down", ginkgo.Label(p2, block, vanilla, snapshot, disruptive, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var totalQuotaUsedBefore, pvc_storagePolicyQuotaBefore, pvc_storagePolicyUsageBefore *resource.Quantity @@ -3666,6 +3669,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -3675,12 +3683,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() snapshotSize := getAggregatedSnapshotCapacityInMb(e2eVSphere, volHandle) @@ -3714,7 +3716,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle @@ -3777,9 +3779,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 9. Delete both deployments and. the pvcs */ - ginkgo.It("[pq-wcp-neg-snpt][block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] VC reboot "+ - "with deployment pvcs having snapshot", ginkgo.Label(p1, block, vanilla, tkg, snapshot, disruptive, - negative, flaky, vc90), func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] VC reboot with deployment pvcs "+ + "having snapshot", ginkgo.Label(p1, block, vanilla, tkg, snapshot, disruptive, negative, flaky, vc90), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -3811,7 +3812,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } ginkgo.By("Expect claim to provision volume successfully") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + _, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { for _, pvclaim := range pvclaims { @@ -3958,7 +3959,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { restoredpvclaims = append(restoredpvclaims, pvclaim2) - persistentvolume2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, + persistentvolume2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout*2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolume2[0].Spec.CSI.VolumeHandle @@ -4258,9 +4259,10 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 7. Verify pvcs all are in Bound state. 8. Cleanup all the snapshots and the pvc. */ - ginkgo.It("[ef-f-vanilla-block][ef-f-wcp-snapshot][block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot]"+ - "[ef-vks-snapshot-f] Multi-master and snapshot workflow", ginkgo.Label(p1, block, vanilla, - tkg, snapshot, vc80), func() { + + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] Multi-master and "+ + "snapshot workflow", ginkgo.Label(p1, block, vanilla, tkg, snapshot, vc80), func() { + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -4483,7 +4485,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } framework.Logf("Waiting for PVCs to come to bound state") - persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle @@ -4529,7 +4531,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 5. Validate creation of additional snapshots beyond the configured max-snapshots per volume fails - check error returned */ - ginkgo.It("[ef-f-vanilla-block][block-vanilla-snapshot] Max Snapshots per volume test", ginkgo.Label(p1, block, + ginkgo.It("[block-vanilla-snapshot] Max Snapshots per volume test", ginkgo.Label(p1, block, vanilla, snapshot, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -4745,7 +4747,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 3. Expected behavior: resize operation should succeed and the snapshot creation should succeed after resize completes */ - ginkgo.It("[ef-wcp-snapshot][block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] Volume "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] Volume "+ "snapshot creation when resize is in progress", ginkgo.Label(p2, block, vanilla, snapshot, vc80), func() { @@ -4843,6 +4845,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { snapshotContentCreated, snapshotId, _, _ := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, newDiskSize, true) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -4852,10 +4859,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Waiting for file system resize to finish") @@ -4893,12 +4896,12 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 2. Create Snapshot class and take a snapshot of the volume 3. Cleanup of snapshot, pvc and sc */ - ginkgo.It("[pq-f-wcp-snpt][block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot]"+ - "[ef-vks-snapshot-f] Volume provision and snapshot creation/restore on VVOL "+ - "Datastore", ginkgo.Label(p0, block, vanilla, snapshot, tkg, vc80), func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] Volume provision and "+ + "snapshot creation/restore on VVOL Datastore", ginkgo.Label(p0, block, vanilla, snapshot, + tkg, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - invokeSnapshotOperationsOnSharedDatastore(client, ctx, namespace, scParameters, snapc, "VVOL", + invokeSnapshotOperationsOnSharedDatastore(client, adminClient, ctx, namespace, scParameters, snapc, "VVOL", pandoraSyncWaitTime) }) @@ -4909,13 +4912,12 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 2. Create Snapshot class and take a snapshot of the volume 3. Cleanup of snapshot, pvc and sc */ - - ginkgo.It("[ef-f-vanilla-block][pq-wcp-snpt][block-vanilla-snapshot][tkg-snapshot]"+ - "[supervisor-snapshot][ef-vks-snapshot-f] Volume provision and snapshot creation/restore "+ - "on VMFS Datastore", ginkgo.Label(p0, block, vanilla, snapshot, tkg, vc80), func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] [supervisor-snapshot] Volume provision and "+ + "snapshot creation/restore on VMFS Datastore", ginkgo.Label(p0, block, vanilla, snapshot, + tkg, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - invokeSnapshotOperationsOnSharedDatastore(client, ctx, namespace, scParameters, snapc, "VMFS", pandoraSyncWaitTime) + invokeSnapshotOperationsOnSharedDatastore(client, adminClient, ctx, namespace, scParameters, snapc, "VMFS", pandoraSyncWaitTime) }) /* @@ -4924,12 +4926,12 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 2. Create Snapshot class and take a snapshot of the volume 3. Cleanup of snapshot, pvc and sc */ - ginkgo.It("[ef-vanilla-block][ef-f-wcp-snapshot][block-vanilla-snapshot][tkg-snapshot] [supervisor-snapshot]"+ - "[ef-vks-snapshot-f] Volume provision and snapshot creation/restore on "+ - "NFS Datastore", ginkgo.Label(p0, block, vanilla, snapshot, tkg, vc80), func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] [supervisor-snapshot] Volume provision and "+ + "snapshot creation/restore on NFS Datastore", ginkgo.Label(p0, block, vanilla, snapshot, + tkg, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - invokeSnapshotOperationsOnSharedDatastore(client, ctx, namespace, scParameters, snapc, "NFS", pandoraSyncWaitTime) + invokeSnapshotOperationsOnSharedDatastore(client, adminClient, ctx, namespace, scParameters, snapc, "NFS", pandoraSyncWaitTime) }) /* @@ -4938,12 +4940,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 2. Create Snapshot class and take a snapshot of the volume 3. Cleanup of snapshot, pvc and sc */ - ginkgo.It("[pq-f-wcp-snpt] [tkg-snapshot][ef-vks-snapshot-f][supervisor-snapshot] Volume provision "+ - "and snapshot creation/restore on VSAN2 Datastore", ginkgo.Label(p0, snapshot, tkg, - newTest, vc80), func() { + ginkgo.It("[tkg-snapshot] [supervisor-snapshot] Volume provision and snapshot creation/restore on "+ + "VSAN2 Datastore", ginkgo.Label(p0, snapshot, tkg, newTest, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - invokeSnapshotOperationsOnSharedDatastore(client, ctx, namespace, scParameters, snapc, "VSAN", pandoraSyncWaitTime) + invokeSnapshotOperationsOnSharedDatastore(client, adminClient, ctx, namespace, scParameters, snapc, "VSAN", pandoraSyncWaitTime) }) /* @@ -4968,9 +4969,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 4. Volume restore 5. snapshot create/delete workflow */ - ginkgo.It("[ef-vanilla-block][ef-wcp-snapshot][block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot]"+ - "[ef-vks-snapshot] Scale-up creation of snapshots across multiple volumes", ginkgo.Label(p1, block, vanilla, - snapshot, tkg, stable, vc80), func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] Scale-up creation of snapshots "+ + "across multiple volumes", ginkgo.Label(p1, block, vanilla, snapshot, tkg, stable, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -5020,7 +5020,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + persistentvolumes, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -5057,7 +5057,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } ginkgo.By("Wait for the PVC to be bound") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims2, framework.ClaimProvisionTimeout) + _, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims2, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for i := 0; i < volumeOpsScale; i++ { @@ -5112,8 +5112,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 5. Perform cleanup */ - ginkgo.It("[tkg-snapshot][ef-vks-snapshot] Verify pre-provisioned static snapshot "+ - "workflow", ginkgo.Label(p0, snapshot, tkg, vc80), func() { + ginkgo.It("[tkg-snapshot] Verify pre-provisioned static snapshot workflow", ginkgo.Label(p0, snapshot, + tkg, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -5225,8 +5225,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 10. Run cleanup: Delete snapshots, restored-volumes, pods */ - ginkgo.It("[tkg-snapshot][ef-vks-snapshot-f] Volume restore using dynamic and pre-provisioned "+ - "snapshot on guest cluster", ginkgo.Label(p0, snapshot, tkg, flaky, vc80), func() { + ginkgo.It("[tkg-snapshot] Volume restore using dynamic and pre-provisioned snapshot on "+ + "guest cluster", ginkgo.Label(p0, snapshot, tkg, flaky, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -5266,6 +5266,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -5275,12 +5280,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() ginkgo.By("Restore PVC using dynamic volume snapshot") @@ -5412,8 +5411,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { (note the snapshotHandle its pointing to has been deleted) 8. Volume Create should fail with an appropriate error on k8s side */ - ginkgo.It("[tkg-snapshot][ef-vks-snapshot] Restore volume using non-existing static "+ - "snapshot", ginkgo.Label(p0, snapshot, tkg, vc80), func() { + ginkgo.It("[tkg-snapshot] Restore volume using non-existing static snapshot", ginkgo.Label(p0, snapshot, tkg, + vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -5453,6 +5452,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -5462,11 +5466,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() framework.Logf("Get volume snapshot handle from Supervisor Cluster") @@ -5511,7 +5510,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, + _, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionShortTimeout) gomega.Expect(err).To(gomega.HaveOccurred()) @@ -5541,8 +5540,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 7. Validate the pvc is Bound 8. Cleanup the snapshot and pvc */ - ginkgo.It("[tkg-snapshot][ef-vks-snapshot-f] Restore volume from a static snapshot "+ - "that is still not ready-to-use", ginkgo.Label(p0, snapshot, tkg, vc80), func() { + ginkgo.It("[tkg-snapshot] Restore volume from a static snapshot that is still not "+ + "ready-to-use", ginkgo.Label(p0, snapshot, tkg, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -5580,6 +5579,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -5589,10 +5593,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() framework.Logf("Get volume snapshot handle from Supervisor Cluster") @@ -5623,8 +5623,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { staticSnapshotContent.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Snapshotcontent name is %s", staticSnapshotContent.ObjectMeta.Name) - staticSnapshotContent, err = waitForVolumeSnapshotContentReadyToUse(*snapc, ctx, staticSnapshotContent.Name) - if err != nil && !*staticSnapshotContent.Status.ReadyToUse { + if !*staticSnapshotContent.Status.ReadyToUse { framework.Logf("VolumeSnapshotContent is not ready to use") } @@ -5643,7 +5642,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle @@ -5713,8 +5712,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 9. Run cleanup: Delete snapshots, restored-volumes, pods. */ - ginkgo.It("[ef-f-wcp-snapshot][tkg-snapshot][supervisor-snapshot][ef-vks-snapshot-f] Perform online "+ - "resize on restored volume", ginkgo.Label(p0, snapshot, tkg, stable, vc80), func() { + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Perform online resize on restored volume", ginkgo.Label(p0, + snapshot, tkg, stable, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -5764,7 +5763,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") @@ -5795,10 +5794,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) defer func() { - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) - } if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, @@ -5808,6 +5803,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } }() ginkgo.By("Create PVC from Snapshot and verify restore volume operations") @@ -5914,8 +5914,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 7. Run cleanup: Delete snapshots, restored-volumes, pods. */ - ginkgo.It("[ef-wcp-snapshot][tkg-snapshot][supervisor-snapshot][ef-vks-snapshot] Offline "+ - "relocation of FCD with snapshots", ginkgo.Label(p0, snapshot, tkg, stable, vc80), func() { + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Offline relocation of FCD "+ + "with snapshots", ginkgo.Label(p0, snapshot, tkg, stable, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -5976,16 +5976,16 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim, volHandle, diskSize, true) framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) defer func() { - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) - } - if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } }() ginkgo.By("Verify if VolumeID is created on the given datastores") @@ -6161,7 +6161,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - _, err = fpv.WaitForPVClaimBoundPhase(ctx, clientNewGc, + _, err = WaitForPVClaimBoundPhase(ctx, clientNewGc, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).To(gomega.HaveOccurred()) expectedErrMsg := "error getting handle for DataSource Type VolumeSnapshot by Name " + volumeSnapshot.Name @@ -6189,7 +6189,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 5. Restore PVC creation should fail and be stuck in Pending state with appropriate error message. 6. Perform Cleanup. */ - ginkgo.It("[ef-wcp-snapshot][tkg-snapshot][supervisor-snapshot][ef-vks-snapshot] Volume mode "+ + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Volume mode "+ "conversion", ginkgo.Label(p2, snapshot, tkg, newTest, stable, negative, vc90), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -6233,16 +6233,16 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) defer func() { - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) - } - if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } }() ginkgo.By("Creating a PVC from a snapshot but with different access mode") @@ -6256,12 +6256,12 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaims = append(pvclaims, pvclaim2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, + _, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionShortTimeout) framework.Logf("Error from creating pvc with %s accessmode is : %s", accessMode, err.Error()) gomega.Expect(err).To(gomega.HaveOccurred()) - expectedErrMsg := "file services are disabled on supervisor cluster" + expectedErrMsg := "no datastores found to create file volume" framework.Logf("Expected failure message: %+q", expectedErrMsg) err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim2.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) @@ -6297,9 +6297,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { Should fail with an appropriate error */ - ginkgo.It("[tkg-snapshot][ef-vks-snapshot-f] Volume snapshot creation on a file-share "+ - "volume on a guest cluster", ginkgo.Label(p1, snapshot, tkg, newTest, negative, - stable, vc80), func() { + ginkgo.It("[tkg-snapshot] Volume snapshot creation on a file-share volume on a guest "+ + "cluster", ginkgo.Label(p1, snapshot, tkg, newTest, negative, stable, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -6320,7 +6319,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { err = fpv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout) gomega.Expect(err).To(gomega.HaveOccurred()) - expectedErrMsg := "file services are disabled on supervisor cluster" + expectedErrMsg := "no datastores found to create file volume, vsan file service may be disabled" err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) }) @@ -6343,8 +6342,10 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { And also write new data to the restored volumes and it should succeed Run cleanup: Delete snapshots, restored-volumes, pods. */ - ginkgo.It("[ef-f-wcp-snapshot][tkg-snapshot][supervisor-snapshot][ef-vks-snapshot-f] Create restore volume "+ - "snapshot in consistent order", ginkgo.Label(p0, snapshot, tkg, stable, vc90), func() { + + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Create restore volume snapshot "+ + "in consistent order", ginkgo.Label(p0, snapshot, tkg, stable, vc90), func() { + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -6395,7 +6396,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle1, vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs1[0].Spec.CSI.VolumeHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") @@ -6516,8 +6517,10 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 7. Verify the older data. It should be intact and write new data. 8. Perform cleanup. */ - ginkgo.It("[ef-f-wcp-snapshot][tkg-snapshot][supervisor-snapshot][ef-vks-snapshot-f] Detach volume with "+ + + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Detach volume with "+ "snapshot", ginkgo.Label(p1, snapshot, tkg, newTest, stable, vc90), func() { + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -6562,7 +6565,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") defer func() { @@ -6582,11 +6585,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) defer func() { - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) - } - if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, @@ -6597,6 +6595,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } }() ginkgo.By("Deleting pod and wait for it to be detached from node") @@ -6642,8 +6645,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 9. Delete all the above created PV, PVC and resource quota. */ - ginkgo.It("[tkg-snapshot][ef-vks-snapshot-f] Provisioning of static volume on guest "+ - "cluster using FCD with snapshot creation", ginkgo.Label(p0, snapshot, tkg, stable, vc90), func() { + ginkgo.It("[tkg-snapshot] Provisioning of static volume on guest cluster using FCD with snapshot "+ + "creation", ginkgo.Label(p0, snapshot, tkg, stable, vc90), func() { var err error ctx, cancel := context.WithCancel(context.Background()) @@ -6760,11 +6763,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) - } - if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, @@ -6774,6 +6772,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } }() framework.Logf("Get volume snapshot handle from Supervisor Cluster") @@ -6849,7 +6852,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { framework.Logf("Waiting for PV to move to released state") // TODO: replace sleep with polling mechanism. time.Sleep(time.Duration(100) * time.Second) - gcPV, err = client.CoreV1().PersistentVolumes().Get(ctx, gcPVName, metav1.GetOptions{}) + gcPV, err = adminClient.CoreV1().PersistentVolumes().Get(ctx, gcPVName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gcPVStatus := gcPV.Status.Phase if gcPVStatus != "Released" { @@ -6882,8 +6885,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { b) snapshot create/delete workflow c) Restart services */ - ginkgo.It("[ef-f-wcp-snapshot][tkg-snapshot][supervisor-snapshot] Scale up snapshot creation by "+ - "increasing the volume counts and in between restart services", ginkgo.Label(p1, snapshot, tkg, vc90), func() { + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Scale up snapshot creation by increasing the volume counts and "+ + "in between restart services", ginkgo.Label(p1, snapshot, tkg, vc90), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -6916,7 +6919,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + persistentvolumes, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -6996,7 +6999,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } ginkgo.By("Wait for the PVC to be bound") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims2, framework.ClaimProvisionTimeout) + _, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims2, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for i := 0; i < volumeOpsScale; i++ { @@ -7041,73 +7044,75 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 9. Create new snapshots on restore volume and verify it succeeds 10. Run cleanup: Delete snapshots, restored-volumes, pods */ - ginkgo.It("[ef-f-wcp-snapshot][tkg-snapshot][supervisor-snapshot][ef-vks-snapshot-f] Max Snapshots per volume on "+ - "wcp and gc", ginkgo.Label(p1, snapshot, tkg, vc90), func() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - var volumeSnapshots []*snapV1.VolumeSnapshot - var snapshotIds []string - snapDeleted := false - noOfSnapshotToCreate := 33 + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Max Snapshots per volume on wcp and gc", + ginkgo.Label(p1, snapshot, tkg, vc90), func() { - ginkgo.By("Get storage class") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ginkgo.By("Create PVC") - pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, v1.ReadWriteOnce, - diskSize, storageclass, true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle = persistentVolumes[0].Spec.CSI.VolumeHandle - if guestCluster { - volHandle = getVolumeIDFromSupervisorCluster(volHandle) - } - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { - err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() + var volumeSnapshots []*snapV1.VolumeSnapshot + var snapshotIds []string + snapDeleted := false + noOfSnapshotToCreate := 33 - ginkgo.By("Get volume snapshot class") - volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Get storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } - for i := 0; i < noOfSnapshotToCreate; i++ { - ginkgo.By(fmt.Sprintf("Creating snapshot no: %d for pvc %s", i+1, pvclaim.Name)) - volumeSnapshot, _, _, _, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, - volumeSnapshotClass, pvclaim, volHandle, diskSize, true) + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, v1.ReadWriteOnce, + diskSize, storageclass, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle = persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() - volumeSnapshots = append(volumeSnapshots, volumeSnapshot) - snapshotIds = append(snapshotIds, snapshotId) - } + ginkgo.By("Get volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } - defer func() { - if !snapDeleted { - for i := 0; i < noOfSnapshotToCreate; i++ { - ginkgo.By("Delete dynamic volume snapshot") - _, _, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshots[i], pandoraSyncWaitTime, volHandle, snapshotIds[i], true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + for i := 0; i < noOfSnapshotToCreate; i++ { + ginkgo.By(fmt.Sprintf("Creating snapshot no: %d for pvc %s", i+1, pvclaim.Name)) + volumeSnapshot, _, _, _, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + volumeSnapshots = append(volumeSnapshots, volumeSnapshot) + snapshotIds = append(snapshotIds, snapshotId) } - }() - for i := 0; i < noOfSnapshotToCreate; i++ { - ginkgo.By("Delete dynamic volume snapshot") - _, _, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshots[i], pandoraSyncWaitTime, volHandle, snapshotIds[i], true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - snapDeleted = true - }) + defer func() { + if !snapDeleted { + for i := 0; i < noOfSnapshotToCreate; i++ { + ginkgo.By("Delete dynamic volume snapshot") + _, _, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshots[i], pandoraSyncWaitTime, volHandle, snapshotIds[i], true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + }() + + for i := 0; i < noOfSnapshotToCreate; i++ { + ginkgo.By("Delete dynamic volume snapshot") + _, _, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshots[i], pandoraSyncWaitTime, volHandle, snapshotIds[i], true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + snapDeleted = true + }) /* @@ -7318,8 +7323,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 18. Perform cleanup: Delete Snapshot, Pod, PVC. */ - ginkgo.It("[ef-f-wcp-snapshot][supervisor-snapshot] Volume restore using a dynamic "+ - "snapshot", ginkgo.Label(p0, wcp, snapshot, block, stable, vc90), func() { + ginkgo.It("[supervisor-snapshot] Volume restore using a "+ + "dynamic snapshot", ginkgo.Label(p0, wcp, snapshot, block, stable, vc90), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -7398,6 +7403,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -7407,11 +7417,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() if !guestCluster { @@ -7468,7 +7473,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 11. Verify CRD deleted automatically. */ - ginkgo.It("[ef-f-wcp-snapshot][supervisor-snapshot] Verify static provisioning workflow "+ + ginkgo.It("[supervisor-snapshot] Verify static provisioning workflow "+ "with snapshot", ginkgo.Label(p0, block, wcp, vc90), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -7539,6 +7544,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volumeSnapshotClass, pvc, volHandle, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -7548,10 +7558,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Deleting the pod") @@ -7599,7 +7605,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 18. Perform cleanup. */ - ginkgo.It("[ef-f-wcp-snapshot][supervisor-snapshot] Snapshot restoration with delete vsc policy and with "+ + ginkgo.It("[supervisor-snapshot] Snapshot restoration with delete vsc policy and with "+ "retain pv policy", ginkgo.Label(p0, block, tkg, vanilla, wcp, snapshot, stable, vc90), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -7822,8 +7828,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 13. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks */ - ginkgo.It("[ef-f-wcp-snapshot][supervisor-snapshot] Supervisor password rotation during snapshot "+ - "creation", ginkgo.Label(p1, block, wcp, snapshot, disruptive, vc90), func() { + ginkgo.It("[supervisor-snapshot] Supervisor password rotation during snapshot creation", ginkgo.Label(p1, block, + wcp, snapshot, disruptive, vc90), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -7888,7 +7894,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Perform password rotation on the supervisor") csiNamespace := GetAndExpectStringEnvVar(envCSINamespace) - passwordRotated, err := performPasswordRotationOnSupervisor(client, ctx, csiNamespace, vcAddress) + passwordRotated, err := performPasswordRotationOnSupervisor(adminClient, ctx, csiNamespace, vcAddress) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(passwordRotated).To(gomega.BeTrue()) @@ -7944,7 +7950,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle @@ -7975,7 +7981,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { // invokeSnapshotOperationsOnSharedDatastore is a wrapper method which invokes creation of volume snapshot // and restore of volume snapshot on shared datastore -func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx context.Context, namespace string, +func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, adminClient clientset.Interface, ctx context.Context, namespace string, scParameters map[string]string, snapc *snapclient.Clientset, sharedDatastoreType string, pandoraSyncWaitTime int) { var storageclass *storagev1.StorageClass @@ -8037,7 +8043,7 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c }() } else if supervisorCluster { ginkgo.By("Get storage class and create PVC") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -8048,7 +8054,7 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() } @@ -8087,10 +8093,6 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) defer func() { - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) - } if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, @@ -8101,6 +8103,11 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } }() pvclaim2, persistentVolumes2, pod2 := verifyVolumeRestoreOperation(ctx, client, diff --git a/tests/e2e/csi_static_provisioning_basic.go b/tests/e2e/csi_static_provisioning_basic.go index c9118badf4..06ec43990c 100644 --- a/tests/e2e/csi_static_provisioning_basic.go +++ b/tests/e2e/csi_static_provisioning_basic.go @@ -78,6 +78,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { nonSharedDatastoreURL string fullSyncWaitTime int isQuotaValidationSupported bool + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -87,6 +88,16 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { var cancel context.CancelFunc ctx, cancel = context.WithCancel(context.Background()) defer cancel() + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) @@ -167,7 +178,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { } if pv != nil { - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeoutShort)) framework.ExpectNoError(e2eVSphere.waitForCNSVolumeToBeDeleted(pv.Spec.CSI.VolumeHandle)) } @@ -210,15 +221,17 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { framework.Logf("Profile ID :%s", profileID) scParameters := make(map[string]string) scParameters["storagePolicyID"] = profileID - err = client.StorageV1().StorageClasses().Delete(ctx, vsanDefaultStorageClassInSVC, metav1.DeleteOptions{}) - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if !supervisorCluster { + err = adminClient.StorageV1().StorageClasses().Delete(ctx, vsanDefaultStorageClassInSVC, metav1.DeleteOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } } storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, vsanDefaultStorageClassInSVC) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("storageclass name :%s", storageclass.GetName()) - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storageclass.GetName(), metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, storageclass.GetName(), metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("storageclass name :%s", storageclass.GetName()) @@ -229,6 +242,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { } testCleanUpUtil := func(ctx context.Context, restClientConfig *restclient.Config, + adminClient clientset.Interface, cnsRegistervolume *cnsregistervolumev1alpha1.CnsRegisterVolume, namespace string, pvcName string, pvName string) { if guestCluster { @@ -240,7 +254,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pvc = nil ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pvName, poll, + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pvName, poll, supervisorClusterOperationsTimeout)) pv = nil @@ -356,7 +370,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pvc = nil ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeout)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeout)) pv = nil }) @@ -477,7 +491,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pvc = nil ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeout)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeout)) pv = nil }) @@ -495,8 +509,8 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { // 7. Wait for the volume entry to be created in CNS. // 8. Delete PV2. // 9. Wait for PV2 to be deleted, and also entry is deleted from CNS. - ginkgo.It("[csi-block-vanilla][csi-block-vanilla-parallelized][pq-vanilla-block]Verify static provisioning "+ - "workflow using same PV name twice", ginkgo.Label(p2, block, vanilla, core, vc70), func() { + ginkgo.It("[csi-block-vanilla] [csi-block-vanilla-parallelized] Verify static provisioning workflow using "+ + "same PV name twice", ginkgo.Label(p2, block, vanilla, core, vc70), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -572,7 +586,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { // 12. Delete the PVC in GC. // 13. Verifying if PVC and PV also deleted in the SV cluster. // 14. Verify volume is deleted on CNS. - ginkgo.It("[[ef-vks] [ef-vks-n1][ef-vks-n2] [csi-guest] Static provisioning workflow in guest "+ + ginkgo.It("[[ef-vks] csi-guest] Static provisioning workflow in guest "+ "cluster", ginkgo.Label(p1, block, tkg, vc70), func() { var err error ctx, cancel := context.WithCancel(context.Background()) @@ -587,7 +601,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() defer func() { @@ -654,7 +668,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pvc = nil ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeoutShort)) pv = nil ginkgo.By("Verify volume is deleted in Supervisor Cluster") @@ -674,7 +688,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { // 6. Delete the PVC in GC. // 7. Verifying if PVC and PV also deleted in the SV cluster. // 8. Verify volume is deleted on CNS. - ginkgo.It("[ef-vks] [ef-vks-n1][ef-vks-n2][csi-guest] Static provisioning workflow II in guest "+ + ginkgo.It("[ef-vks] [csi-guest] Static provisioning workflow II in guest "+ "cluster", ginkgo.Label(p1, block, tkg, vc70), func() { var err error @@ -738,7 +752,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pvc = nil ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeoutShort)) pv = nil ginkgo.By("Verify volume is deleted in Supervisor Cluster") @@ -827,7 +841,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { vmUUID, pv.Spec.CSI.VolumeHandle)) defer func() { - testCleanUpUtil(ctx, restConfig, cnsRegisterVolume, namespace, pvc.Name, pv.Name) + testCleanUpUtil(ctx, restConfig, adminClient, cnsRegisterVolume, namespace, pvc.Name, pv.Name) }() }) @@ -846,7 +860,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { // 9. Verify PV is deleted automatically. // 10. Verify Volume id deleted automatically. // 11. Verify CRD deleted automatically. - ginkgo.It("[ef-wcp][ef-f-stretched-svc][csi-supervisor] [stretched-svc] Verify static provisioning workflow on "+ + ginkgo.It("[cf-wcp][ef-stretched-svc][csi-supervisor] [stretched-svc] Verify static provisioning workflow on "+ "SVC import FCD", ginkgo.Label(p0, block, wcp, vc70), func() { var err error var totalQuotaUsedBefore, storagePolicyQuotaBefore, storagePolicyUsageBefore *resource.Quantity @@ -929,7 +943,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { vmUUID, pv.Spec.CSI.VolumeHandle)) defer func() { - testCleanUpUtil(ctx, restConfig, cnsRegisterVolume, namespace, pvc.Name, pv.Name) + testCleanUpUtil(ctx, restConfig, adminClient, cnsRegisterVolume, namespace, pvc.Name, pv.Name) //Validates PVC quota in both StoragePolicyQuota and StoragePolicyUsage CR _, _, storagePolicyQuota_afterCleanUp, _, storagePolicyUsage_AfterCleanup, _ := @@ -1052,7 +1066,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { fmt.Sprintf("PodVM with vmUUID: %s still exists. So volume: %s is not detached from the PodVM", vmUUID, pv.Spec.CSI.VolumeHandle)) defer func() { - testCleanUpUtil(ctx, restConfig, cnsRegisterVolume, namespace, pvc.Name, pv.Name) + testCleanUpUtil(ctx, restConfig, adminClient, cnsRegisterVolume, namespace, pvc.Name, pv.Name) if isQuotaValidationSupported { //Validates PVC quota in both StoragePolicyQuota and StoragePolicyUsage CR _, _, storagePolicyQuota_afterCleanUp, _, storagePolicyUsage_AfterCleanup, _ := @@ -1078,7 +1092,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { // 5. Create CNS register volume with above created FCD, AccessMode as "ReadOnlyMany". // 6. verify the error message. // 7. Delete Resource quota. - ginkgo.It("[ef-f-wcp][csi-supervisor] Verify static provisioning when AccessMode is ReadWriteMany or "+ + ginkgo.It("[ef-wcp][csi-supervisor] Verify static provisioning when AccessMode is ReadWriteMany or "+ "ReadOnlyMany", ginkgo.Label(p1, block, wcp, vc70), func() { var err error ctx, cancel := context.WithCancel(context.Background()) @@ -1137,7 +1151,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { // 8. Verify PV is deleted automatically. // 9. Verify Volume id deleted automatically. // 10. Verify CRD deleted automatically. - ginkgo.It("[ef-f-wcp][csi-supervisor] Verify static provisioning workflow - when "+ + ginkgo.It("[ef-wcp][csi-supervisor] Verify static provisioning workflow - when "+ "DuplicateFCD is used", ginkgo.Label(p2, block, wcp, vc70), func() { var err error @@ -1217,10 +1231,10 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { "Failed to delete PVC", pvc2.Name) ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv2.Name, poll, supervisorClusterOperationsTimeout)) - testCleanUpUtil(ctx, restConfig, nil, namespace, pvc1.Name, pv1.Name) + testCleanUpUtil(ctx, restConfig, adminClient, nil, namespace, pvc1.Name, pv1.Name) }() }) @@ -1319,10 +1333,10 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { "Failed to delete PVC ", pvc2.Name) ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv2.Name, + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv2.Name, poll, supervisorClusterOperationsTimeout)) - testCleanUpUtil(ctx, restConfig, nil, namespace, pvc1.Name, pv1.Name) + testCleanUpUtil(ctx, restConfig, adminClient, nil, namespace, pvc1.Name, pv1.Name) }() }) @@ -1403,7 +1417,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { verifyBidirectionalReferenceOfPVandPVC(ctx, client, pvc, pv, fcdID) defer func() { - testCleanUpUtil(ctx, restConfig, nil, namespace, pvc.Name, pv.Name) + testCleanUpUtil(ctx, restConfig, adminClient, nil, namespace, pvc.Name, pv.Name) }() }) @@ -1483,7 +1497,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { verifyBidirectionalReferenceOfPVandPVC(ctx, client, pvc, pv, fcdID) defer func() { - testCleanUpUtil(ctx, restConfig, nil, namespace, pvc.Name, pv.Name) + testCleanUpUtil(ctx, restConfig, adminClient, nil, namespace, pvc.Name, pv.Name) }() }) @@ -1614,7 +1628,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { // 2. Create a storage policy. // 3. Create FCD with the above created storage policy. // 4. Import the volume created in step 3 to namespace created in step 1. - ginkgo.It("[ef-f-wcp][csi-supervisor] static provisioning workflow - when tried to import volume with a storage "+ + ginkgo.It("[ef-wcp][csi-supervisor] static provisioning workflow - when tried to import volume with a storage "+ "policy that doesn't belong to the namespace", ginkgo.Label(p2, block, wcp, negative, vc70), func() { var err error @@ -1677,12 +1691,12 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pvName := "static-pv-" + fcdID framework.Logf("Deleting PersistentVolume %s", pvName) framework.ExpectNoError(fpv.DeletePersistentVolume(ctx, client, pvName)) - pv, err = client.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) + pv, err = adminClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if pv != nil { - framework.ExpectNoError(fpv.DeletePersistentVolume(ctx, client, pvName)) + framework.ExpectNoError(fpv.DeletePersistentVolume(ctx, adminClient, pvName)) } pv = nil }() @@ -1766,8 +1780,8 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { // 7. Wait for PV , PVC to get bound. // 8. Create POD, verify the status. // 9. Delete all the above created PV, PVC and resource quota. - ginkgo.It("[ef-vks-f] [ef-vks-n1-f][ef-vks-n2-f][csi-guest] static volume provisioning "+ - "on guest cluster", ginkgo.Label(p0, block, tkg, vc70), func() { + ginkgo.It("[ef-vks] [csi-guest] static volume provisioning on guest "+ + "cluster", ginkgo.Label(p0, block, tkg, vc70), func() { var err error ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1867,7 +1881,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { framework.Logf("Waiting for PV to move to released state") // TODO: replace sleep with polling mechanism. time.Sleep(time.Duration(100) * time.Second) - gcPV, err = client.CoreV1().PersistentVolumes().Get(ctx, gcPVName, metav1.GetOptions{}) + gcPV, err = adminClient.CoreV1().PersistentVolumes().Get(ctx, gcPVName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gcPVStatus := gcPV.Status.Phase if gcPVStatus != "Released" { @@ -1880,7 +1894,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { gomega.Expect(volumeExists).NotTo(gomega.BeFalse()) defer func() { - testCleanUpUtil(ctx, restConfig, nil, svNamespace, svcPVC.Name, svcPV.Name) + testCleanUpUtil(ctx, restConfig, adminClient, nil, svNamespace, svcPVC.Name, svcPV.Name) }() }) @@ -1977,11 +1991,11 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { client, namespaceToDelete, poll, supervisorClusterOperationsTimeout)) ginkgo.By("Verify PV got deleted") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv1.Name, poll, supervisorClusterOperationsTimeout)) framework.ExpectNoError(e2eVSphere.waitForCNSVolumeToBeDeleted(pv1.Spec.CSI.VolumeHandle)) - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv2.Name, poll, supervisorClusterOperationsTimeout)) framework.ExpectNoError(e2eVSphere.waitForCNSVolumeToBeDeleted(pv2.Spec.CSI.VolumeHandle)) @@ -1995,7 +2009,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { // 2. Create Resource quota. // 3. Create CNS register volume with above created VMDK. // 4. verify PV, PVC got created , check the bidirectional reference. - ginkgo.It("[pq-f-wcp][csi-supervisor] Verify static provisioning - import VMDK", ginkgo.Label(p1, + ginkgo.It("[pq-wcp][csi-supervisor] Verify static provisioning - import VMDK", ginkgo.Label(p1, block, wcp, vc70), func() { var err error ctx, cancel := context.WithCancel(context.Background()) @@ -2055,7 +2069,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pvc = nil ginkgo.By("PV will be in released state , hence delete PV explicitly") - framework.ExpectNoError(fpv.DeletePersistentVolume(ctx, client, pv.GetName())) + framework.ExpectNoError(fpv.DeletePersistentVolume(ctx, adminClient, pv.GetName())) pv = nil ginkgo.By("Verify CRD should be deleted automatically") @@ -2076,7 +2090,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { // 3. Create CNS register volume with above created VMDK and FCDID. // 4. Verify the error message "VolumeID and DiskURLPath cannot be specified // together". - ginkgo.It("[pq-f-wcp]csi-supervisor] Specify VolumeID and DiskURL together and "+ + ginkgo.It("[pq-wcp]csi-supervisor] Specify VolumeID and DiskURL together and "+ "verify the error message", ginkgo.Label(p2, block, wcp, negative, vc70), func() { var err error @@ -2148,7 +2162,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { 7.Verify Volume is deleted. 8.Delete FCD. */ - ginkgo.It("[ef-wcp][csi-block-vanilla][csi-supervisor][pq-vanilla-block] Full sync to deregister/delete "+ + ginkgo.It("[ef-wcp][csi-block-vanilla] [csi-supervisor] Full sync to deregister/delete "+ "volume", ginkgo.Label(p0, block, wcp, vanilla, core, vc70), func() { var err error ctx, cancel := context.WithCancel(context.Background()) @@ -2260,9 +2274,13 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { } framework.Logf("Deleting PersistentVolume %s", pv.Name) - err = fpv.DeletePersistentVolume(ctx, client, pv.Name) + if vanillaCluster { + err = fpv.DeletePersistentVolume(ctx, client, pv.Name) + } else { + err = fpv.DeletePersistentVolume(ctx, adminClient, pv.Name) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = client.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) + _, err = adminClient.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -2302,7 +2320,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { 5.Wait for volume to be deleted from K8s. 6.Wait for Volume to be deleted on CNS */ - ginkgo.It("[ef-f-wcp][csi-block-vanilla][csi-supervisor][pq-vanilla-block] VMDK is deleted from datastore "+ + ginkgo.It("[ef-wcp][csi-block-vanilla] [csi-supervisor] VMDK is deleted from datastore "+ "but CNS volume is still present", ginkgo.Label(p1, block, wcp, vanilla, core, vc70), func() { var err error ctx, cancel := context.WithCancel(context.Background()) @@ -2441,9 +2459,9 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { } framework.Logf("Deleting PersistentVolume %s", pv.Name) - err = fpv.DeletePersistentVolume(ctx, client, pv.Name) + err = fpv.DeletePersistentVolume(ctx, adminClient, pv.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = client.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) + _, err = adminClient.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } diff --git a/tests/e2e/e2e_common.go b/tests/e2e/e2e_common.go index 55598f0055..beb497a0c5 100644 --- a/tests/e2e/e2e_common.go +++ b/tests/e2e/e2e_common.go @@ -517,6 +517,7 @@ var ( audienceForSvcAccountName = "https://kubernetes.default.svc.cluster.local" envIsDevopsUser = "IS_DEVOPS_USER" serviceAccountKeyword = "ServiceAccount" + envUserName = "USERNAME" ) // storage policy usages for storage quota validation diff --git a/tests/e2e/snapshot_stretched_supervisor.go b/tests/e2e/snapshot_stretched_supervisor.go index e5aabb9721..82dca92ee1 100644 --- a/tests/e2e/snapshot_stretched_supervisor.go +++ b/tests/e2e/snapshot_stretched_supervisor.go @@ -68,13 +68,14 @@ var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() { bootstrap() client = f.ClientSet + var err error + var nodeList *v1.NodeList namespace = getNamespaceToRunTests(f) - // parameters set for storage policy scParameters = make(map[string]string) // fetching node list and checking node status - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") @@ -155,7 +156,7 @@ var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() { defer cancel() ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) err := client.CoreV1().Services(namespace).Delete(ctx, servicename, *metav1.NewDeleteOptions(0)) if !apierrors.IsNotFound(err) { @@ -488,7 +489,7 @@ var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() { int32(stsReplicas), true, allowedTopologies, true, true, zonalPolicy, "", storageclass, zonalPolicy) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() framework.Logf("Verify PV node affinity and that the PODS are running on appropriate node") @@ -544,7 +545,7 @@ var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() { v1.ReadWriteOnce, volumeSnapshot1.Name, snapshotapigroup) restoreVol1, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - restorepv1, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + restorepv1, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{restoreVol1}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) restoreVolHandle1 := restorepv1[0].Spec.CSI.VolumeHandle diff --git a/tests/e2e/snapshot_vmservice_vm.go b/tests/e2e/snapshot_vmservice_vm.go index 2cfff77155..8bc00391b6 100644 --- a/tests/e2e/snapshot_vmservice_vm.go +++ b/tests/e2e/snapshot_vmservice_vm.go @@ -70,6 +70,8 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { pandoraSyncWaitTime int dsRef types.ManagedObjectReference labelsMap map[string]string + adminClient clientset.Interface + userName string ) ginkgo.BeforeEach(func() { @@ -79,13 +81,14 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { // client connection client = f.ClientSet bootstrap() - + var err error + adminClient, client = initializeClusterClientsByUserRoles(client) // fetch the testbed type for executing testcases topologyFeature := os.Getenv(topologyFeature) // fetching nodes and reading storage policy name if topologyFeature != topologyTkgHaName && topologyFeature != podVMOnStretchedSupervisor { - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, adminClient) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") @@ -113,6 +116,8 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { dsRef.Value, &e2eVSphere) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + userName = GetAndExpectStringEnvVar(envUserName) + vmClass = os.Getenv(envVMClass) if vmClass == "" { vmClass = vmClassBestEffortSmall @@ -121,7 +126,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { framework.Logf("Create a WCP namespace for the test") // creating wcp test namespace and setting vmclass, contlib, storage class fields in test ns namespace = createTestWcpNs( - vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId, &e2eVSphere)) + vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId, &e2eVSphere), userName) framework.Logf("Verifying storage policies usage for each storage class") restConfig = getRestConfigClient() @@ -178,7 +183,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { dumpSvcNsEventsOnTestFailure(client, namespace) delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }) /* @@ -198,14 +203,14 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { 11. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks */ - ginkgo.It("[cf-f-wcp] Taking snapshot of a vm service vm attached to a dynamic "+ + ginkgo.It("[cf-wcp] Taking snapshot of a vm service vm attached to a dynamic "+ "volume", ginkgo.Label(p0, block, wcp, snapshot, vmServiceVm, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -267,10 +272,8 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info gomega.Expect(err).NotTo(gomega.HaveOccurred()) for i, vol := range vm.Status.Volumes { - if vol.Name == pvc.Name { - volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) - verifyDataIntegrityOnVmDisk(vmIp, volFolder) - } + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) } } @@ -284,6 +287,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -293,10 +301,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Delete dynamic volume snapshot") @@ -323,7 +327,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { 12. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks */ - ginkgo.It("[ef-f-vmsvc] Taking snapshot of a vm service vm attached to a static "+ + ginkgo.It("[ef-vmsvc] Taking snapshot of a vm service vm attached to a static "+ "volume", ginkgo.Label(p0, block, wcp, snapshot, vmServiceVm, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -417,6 +421,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc, volHandle, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -426,10 +435,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Delete dynamic volume snapshot") @@ -468,7 +473,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -542,6 +547,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -551,11 +561,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() ginkgo.By("Create a volume from a snapshot") @@ -645,7 +650,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -719,6 +724,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated1 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) @@ -728,11 +738,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated1 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() ginkgo.By("Create a dynamic volume snapshot-2 for the volume") @@ -741,6 +746,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated2 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) @@ -750,10 +760,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated2 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Create a volume from a snapshot") @@ -852,7 +858,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -926,6 +932,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated1 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) @@ -935,10 +946,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated1 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Create a volume from a snapshot") @@ -1032,6 +1039,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc2, volHandle2, newDiskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated2 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) @@ -1041,10 +1053,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated2 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Delete volume snapshot-1") @@ -1093,7 +1101,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC1") @@ -1233,6 +1241,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc1, volHandle1, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated1 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) @@ -1242,11 +1255,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated1 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() ginkgo.By("Create a dynamic volume snapshot-2 for the volume") @@ -1255,6 +1263,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc2, volHandle2, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated2 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) @@ -1264,10 +1277,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated2 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Create restorevol1 from snapshot1") @@ -1394,14 +1403,14 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { Confirm that the Pod reaches the running state and that read and write operations can be performed on the volume. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks */ - ginkgo.It("[ef-f-vmsvc] Attaching same volume to a pod and vm service vm", ginkgo.Label(p1, block, wcp, snapshot, + ginkgo.It("[ef-vmsvc] Attaching same volume to a pod and vm service vm", ginkgo.Label(p1, block, wcp, snapshot, vmServiceVm, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -1461,6 +1470,10 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } if snapshotCreated { framework.Logf("Deleting volume snapshot") @@ -1471,11 +1484,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") @@ -1604,7 +1612,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -1686,6 +1694,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -1695,10 +1708,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Power off vm1") @@ -1826,7 +1835,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { snapshotContents := make([]*snapV1.VolumeSnapshotContent, volumeOpsScale) ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -2071,7 +2080,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { var datastoreUrls []string ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC-1") @@ -2172,6 +2181,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc, volHandle, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated1 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) @@ -2181,10 +2195,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated1 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Create volume from snapshot") @@ -2224,6 +2234,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, restorepvc, restoreVolHandle, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated2 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) @@ -2233,10 +2248,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated2 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Creating VMs") @@ -2315,7 +2326,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC1") @@ -2432,6 +2443,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc1, volHandle1, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated1 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) @@ -2441,11 +2457,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - if snapshotContentCreated1 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Create a dynamic volume snapshot-2 for the volume") @@ -2454,6 +2465,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc2, volHandle2, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated2 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) @@ -2463,10 +2479,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated2 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Create restorevol1 from snapshot1") @@ -2598,7 +2610,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -2672,6 +2684,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated1 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) @@ -2681,10 +2698,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated1 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") @@ -2699,6 +2712,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated2 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) @@ -2708,10 +2726,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated2 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") @@ -2726,6 +2740,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated3 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent3, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated3 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot3.Name, pandoraSyncWaitTime) @@ -2735,10 +2754,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot3.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated3 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent3, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Restore volume from latest snapshot") @@ -2836,7 +2851,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks */ - ginkgo.It("[ef-f-stretched-svc][stretched-svc] Taking snapshot of a vm service vm in a stretched supervisor "+ + ginkgo.It("[ef-stretched-svc][stretched-svc] Taking snapshot of a vm service vm in a stretched supervisor "+ "cluster", ginkgo.Label(p0, block, stretchedSvc, vmServiceVm, snapshot, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -2859,7 +2874,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { framework.Logf("annotationVal :%s, pvcAnnotations: %v", annotationVal, pvcAnnotations) ginkgo.By("Creating Pvc with Immediate topology storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvcSpec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", storageclass, nil, "") pvcSpec.Annotations = pvcAnnotations @@ -2867,7 +2882,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for SV PVC to come to bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) topologykey := pvs[0].Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values[0] @@ -2944,6 +2959,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc, volumeID, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) @@ -2953,10 +2973,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Restore volume from snapshot") @@ -3057,8 +3073,8 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { 25. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks */ - ginkgo.It("[ef-f-stretched-svc][stretched-svc] Restoring snapshots and attaching it to a new vm service vms in "+ - "a stretched supervisor env", ginkgo.Label(p0, block, stretchedSvc, snapshot, vc80), func() { + ginkgo.It("[ef-stretched-svc][stretched-svc] Restoring snapshots and attaching it to a new vm service vms in a "+ + "stretched supervisor env", ginkgo.Label(p0, block, stretchedSvc, snapshot, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -3081,7 +3097,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { framework.Logf("annotationVal :%s, pvcAnnotations: %v", annotationVal, pvcAnnotations) ginkgo.By("Creating Pvc with Immediate topology storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvcSpec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", storageclass, nil, "") pvcSpec.Annotations = pvcAnnotations @@ -3089,7 +3105,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for SV PVC to come to bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) topologykey := pvs[0].Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values[0] @@ -3166,6 +3182,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, pvc, volumeId, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated1 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) @@ -3175,10 +3196,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated1 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Restore volume from snapshot-1") @@ -3244,6 +3261,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, restorepvc1, restorevolHandle1, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated2 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) @@ -3253,10 +3275,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated2 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Restore volume from snapshot-2") @@ -3322,6 +3340,11 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { volumeSnapshotClass, restorepvc2, restorevolHandle2, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + if snapshotContentCreated3 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent3, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated3 { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot3.Name, pandoraSyncWaitTime) @@ -3331,10 +3354,6 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { *volumeSnapshot3.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotContentCreated3 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent3, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Delete volume snapshot-1") diff --git a/tests/e2e/statefulsets.go b/tests/e2e/statefulsets.go index f38b38c543..16a4643810 100644 --- a/tests/e2e/statefulsets.go +++ b/tests/e2e/statefulsets.go @@ -82,18 +82,25 @@ var _ = ginkgo.Describe("statefulset", func() { stsReplicas int32 allowedTopologies []v1.TopologySelectorLabelRequirement isQuotaValidationSupported bool + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - namespace = getNamespaceToRunTests(f) + var err error client = f.ClientSet + namespace = getNamespaceToRunTests(f) + adminClient, client = initializeClusterClientsByUserRoles(client) + bootstrap() - sc, err := client.StorageV1().StorageClasses().Get(ctx, defaultNginxStorageClassName, metav1.GetOptions{}) - if err == nil && sc != nil { - gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, sc.Name, - *metav1.NewDeleteOptions(0))).NotTo(gomega.HaveOccurred()) + + if vanillaCluster { + sc, err := client.StorageV1().StorageClasses().Get(ctx, defaultNginxStorageClassName, metav1.GetOptions{}) + if err == nil && sc != nil { + gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, sc.Name, + *metav1.NewDeleteOptions(0))).NotTo(gomega.HaveOccurred()) + } } scParameters = make(map[string]string) @@ -127,7 +134,7 @@ var _ = ginkgo.Describe("statefulset", func() { } if stretchedSVC { - nodeList, err = fnodes.GetReadySchedulableNodes(ctx, client) + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") } @@ -142,12 +149,13 @@ var _ = ginkgo.Describe("statefulset", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) err := client.CoreV1().Services(namespace).Delete(ctx, servicename, *metav1.NewDeleteOptions(0)) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + if supervisorCluster { dumpSvcNsEventsOnTestFailure(client, namespace) } @@ -195,8 +203,6 @@ var _ = ginkgo.Describe("statefulset", func() { } else { storageClassName = storagePolicyName ginkgo.By("Running for WCP setup") - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) - scParameters[scParamStoragePolicyID] = profileID } restConfig := getRestConfigClient() @@ -220,7 +226,7 @@ var _ = ginkgo.Describe("statefulset", func() { if stretchedSVC { scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -232,6 +238,10 @@ var _ = ginkgo.Describe("statefulset", func() { CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) + + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() // Waiting for pods status to be Ready fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) @@ -438,8 +448,6 @@ var _ = ginkgo.Describe("statefulset", func() { } else { storageClassName = storagePolicyName ginkgo.By("Running for WCP setup") - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) - scParameters[scParamStoragePolicyID] = profileID } ginkgo.By("Creating service") @@ -456,6 +464,9 @@ var _ = ginkgo.Describe("statefulset", func() { ginkgo.By("Creating statefulset") CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() // Waiting for pods status to be Ready fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) @@ -637,8 +648,6 @@ var _ = ginkgo.Describe("statefulset", func() { storageClassName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) framework.Logf("storageClassName %v", storageClassName) ginkgo.By("CNS_TEST: Running for WCP setup") - profileID := e2eVSphere.GetSpbmPolicyID(storageClassName) - scParameters[scParamStoragePolicyID] = profileID } if !vcptocsi { @@ -668,6 +677,10 @@ var _ = ginkgo.Describe("statefulset", func() { Spec.StorageClassName = &storageClassName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() + // Waiting for pods status to be Ready fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) @@ -827,7 +840,7 @@ var _ = ginkgo.Describe("statefulset", func() { 12. Inncrease the CSI driver replica to 3 */ - ginkgo.It("[ef-wcp][csi-block-vanilla][csi-supervisor][pq-vanilla-block] ListVolumeResponse "+ + ginkgo.It("[ef-wcp][csi-block-vanilla] [csi-supervisor] ListVolumeResponse "+ "Validation", ginkgo.Label(p1, listVolume, block, vanilla, wcp, vc70), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -843,21 +856,18 @@ var _ = ginkgo.Describe("statefulset", func() { storageClassName = "nginx-sc-default" } else { ginkgo.By("Running for WCP setup") - - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) - scParameters[scParamStoragePolicyID] = profileID - // create resource quota + storageClassName = storagePolicyName } ginkgo.By("scale down CSI driver POD to 1 , so that it will" + "be easy to validate all Listvolume response on one driver POD") - collectPodLogs(ctx, client, csiSystemNamespace) - scaledownCSIDriver, err := scaleCSIDriver(ctx, client, namespace, 1) + collectPodLogs(ctx, adminClient, csiSystemNamespace) + scaledownCSIDriver, err := scaleCSIDriver(ctx, adminClient, namespace, 1) gomega.Expect(scaledownCSIDriver).To(gomega.BeTrue(), "csi driver scaledown is not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Scale up the csi-driver replica to 3") - success, err := scaleCSIDriver(ctx, client, namespace, 3) + success, err := scaleCSIDriver(ctx, adminClient, namespace, 3) gomega.Expect(success).To(gomega.BeTrue(), "csi driver scale up to 3 replica not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -884,6 +894,10 @@ var _ = ginkgo.Describe("statefulset", func() { Spec.StorageClassName = &storageClassName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) + + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() // Waiting for pods status to be Ready fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) @@ -937,7 +951,7 @@ var _ = ginkgo.Describe("statefulset", func() { HostKeyCallback: ssh.InsecureIgnoreHostKey(), } } - _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, + _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, adminClient, sshClientConfig, containerName, logMessage, volumesBeforeScaleUp) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -957,7 +971,7 @@ var _ = ginkgo.Describe("statefulset", func() { ginkgo.By("Validate pagination") logMessage = "token for next set: 3" - _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, nil) + _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, adminClient, sshClientConfig, containerName, logMessage, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if vanillaCluster { @@ -970,7 +984,7 @@ var _ = ginkgo.Describe("statefulset", func() { //List volume responses will show up in the interval of every 1 minute. //To see the error, It is required to wait for 1 min after deleteting few Volumes time.Sleep(pollTimeoutShort) - _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, nil) + _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, adminClient, sshClientConfig, containerName, logMessage, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -996,7 +1010,7 @@ var _ = ginkgo.Describe("statefulset", func() { ginkgo.By("Validate ListVolume Response when no volumes are present") logMessage = "ListVolumes served 0 results" - _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, nil) + _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, adminClient, sshClientConfig, containerName, logMessage, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -1009,7 +1023,7 @@ var _ = ginkgo.Describe("statefulset", func() { 5. Scale up replica to 5. 6. Exit MM and clean up all pods and PVs. */ - ginkgo.It("[ef-f-wcp][csi-supervisor] Test MM workflow on statefulset", ginkgo.Label(p1, block, wcp, + ginkgo.It("[ef-wcp][csi-supervisor] Test MM workflow on statefulset", ginkgo.Label(p1, block, wcp, disruptive, vc70), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1021,7 +1035,7 @@ var _ = ginkgo.Describe("statefulset", func() { setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimit) ginkgo.By("Get the storageclass from Supervisor") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Creating service") @@ -1117,7 +1131,7 @@ var _ = ginkgo.Describe("statefulset", func() { 7. clean up the data */ - ginkgo.It("[ef-f-stretched-svc][stretched-svc] Statefulset-parallel-podManagementPolicy-wffc", + ginkgo.It("[ef-stretched-svc][stretched-svc] Statefulset-parallel-podManagementPolicy-wffc", ginkgo.Label(p0, block, stretchedSvc, vc70), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1130,7 +1144,7 @@ var _ = ginkgo.Describe("statefulset", func() { parallelStatefulSetCreation := false scParameters[svStorageClassName] = zonalWffcPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1147,7 +1161,7 @@ var _ = ginkgo.Describe("statefulset", func() { stsReplicas, nodeAffinityToSet, nil, podAntiAffinityToSet, true, "", "", storageclass, storageClassName) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() framework.Logf("Verify PV node affinity and that the PODS are running on appropriate node") @@ -1205,7 +1219,7 @@ var _ = ginkgo.Describe("statefulset", func() { parallelStatefulSetCreation := false scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1222,7 +1236,7 @@ var _ = ginkgo.Describe("statefulset", func() { stsReplicas, nodeAffinityToSet, allowedTopologies, podAntiAffinityToSet, true, "", "", storageclass, storageClassName) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() framework.Logf("Verify PV node affinity and that the PODS are running on appropriate node") @@ -1278,7 +1292,7 @@ var _ = ginkgo.Describe("statefulset", func() { parallelStatefulSetCreation := false scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1295,7 +1309,7 @@ var _ = ginkgo.Describe("statefulset", func() { stsReplicas, nodeAffinityToSet, allowedTopologies, podAntiAffinityToSet, true, "", "", storageclass, storageClassName) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up/down") diff --git a/tests/e2e/vm_service_vsan_stretch_cluster.go b/tests/e2e/vm_service_vsan_stretch_cluster.go index 82cd159cd8..44fb7066b5 100644 --- a/tests/e2e/vm_service_vsan_stretch_cluster.go +++ b/tests/e2e/vm_service_vsan_stretch_cluster.go @@ -63,6 +63,8 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests isVsanHealthServiceStopped bool isSPSserviceStopped bool nodeList *v1.NodeList + adminClient clientset.Interface + userName string ) ginkgo.BeforeEach(func() { @@ -71,6 +73,8 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests client = f.ClientSet var err error + adminClient, client = initializeClusterClientsByUserRoles(client) + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -84,19 +88,11 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests initialiseFdsVar(ctx) vcRestSessionId = createVcSession4RestApis(ctx) + userName = GetAndExpectStringEnvVar(envUserName) - if !latebinding { - ginkgo.By("Reading Immediate binding mode storage policy") - storageClassName = strings.ReplaceAll(storagePolicyName, " ", "-") - storageClassName = strings.ToLower(storageClassName) - framework.Logf("storageClassName: %s", storageClassName) - } else { - ginkgo.By("Reading late binding mode storage policy") - storageClassName = strings.ReplaceAll(storagePolicyName, " ", "-") - storageClassName = strings.ToLower(storageClassName) - storageClassName = storageClassName + lateBinding - framework.Logf("storageClassName: %s", storageClassName) - } + storageClassName = strings.ReplaceAll(storagePolicyName, " ", "-") // since this is a wcp setup + storageClassName = strings.ToLower(storageClassName) + framework.Logf("storageClassName: %s", storageClassName) datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) dsRef := getDsMoRefFromURL(ctx, datastoreURL) @@ -113,7 +109,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests vmClass = vmClassBestEffortSmall } namespace = createTestWcpNs( - vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId, &e2eVSphere)) + vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId, &e2eVSphere), userName) time.Sleep(5 * time.Minute) @@ -148,7 +144,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests } dumpSvcNsEventsOnTestFailure(client, namespace) delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }) /* @@ -168,28 +164,37 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests 11. Bring primary site up and wait for testbed to be back to normal. 12. Delete all objects created in this test. */ - ginkgo.It("[pq-vmsvc-vsanstretch] VMService - primary site down", + ginkgo.It("VMService - primary site down", ginkgo.Label(p0, vmsvc, vsanStretch, block, wcp), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var pvcCount int = 5 var err error - var pvs []*v1.PersistentVolume ginkgo.By("Creating StorageClass") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create multiple PVCs") pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil) - if !latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state when " + - "using an Immediate binding mode storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + for i, pvc := range pvclaimsList { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for CNS volumes to be deleted") + volHandle := pvs[i].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() ginkgo.By("Creating VM bootstrap data") secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) @@ -203,7 +208,6 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests vms := createVMServiceVmWithMultiplePvcs( ctx, vmopC, namespace, vmClass, pvclaimsList, vmi, storageClassName, secretName) defer func() { - ginkgo.By("Performing cleanup...") for _, vm := range vms { ginkgo.By("Deleting VM") err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ @@ -212,30 +216,13 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests }}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - for i, pvc := range pvclaimsList { - ginkgo.By("Delete PVCs") - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Waiting for CNS volumes to be deleted") - volHandle := pvs[i].Spec.CSI.VolumeHandle - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Creates a loadbalancing service for ssh with each VM" + "and waits for VM IP to come up to come up and verify PVCs are accessible in the VM") createVMServiceandWaitForVMtoGetIP(ctx, vmopC, cnsopC, namespace, vms, pvclaimsList, true, true) - if latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state after the " + - "volume is attached to the VM using a late-binding storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - - csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) + csipods, err := adminClient.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Bring down the primary site") @@ -258,15 +245,15 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - time.Sleep(8 * time.Minute) + time.Sleep(5 * time.Minute) // Check if csi pods are running fine after site failure ginkgo.By("Check if csi pods are running fine after site failure") - err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiNs, len(csipods.Items), time.Duration(pollTimeout*2)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, vm := range vms { @@ -306,28 +293,37 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests 11.Bring secondary site up and wait for testbed to be back to normal. 12.Delete all objects created in this test. */ - ginkgo.It("[pq-f-vmsvc-vsanstretch] VMService - secondary site down", + ginkgo.It("VMService - secondary site down", ginkgo.Label(p0, vmsvc, vsanStretch, block, wcp), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var pvcCount int = 10 var err error - var pvs []*v1.PersistentVolume ginkgo.By("Get StorageClass for volume creation") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create multiple PVCs") pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil) - if !latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state when " + - "using an Immediate binding mode storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + for i, pvc := range pvclaimsList { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for CNS volumes to be deleted") + volHandle := pvs[i].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() ginkgo.By("Creating VM bootstrap data") secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) @@ -341,7 +337,6 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests vms := createVMServiceVmWithMultiplePvcs( ctx, vmopC, namespace, vmClass, pvclaimsList, vmi, storageClassName, secretName) defer func() { - ginkgo.By("Performing cleanup...") for _, vm := range vms { ginkgo.By("Deleting VM") err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ @@ -350,31 +345,13 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests }}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - for i, pvc := range pvclaimsList { - ginkgo.By("Delete PVCs") - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Waiting for CNS volumes to be deleted") - volHandle := pvs[i].Spec.CSI.VolumeHandle - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Creates a loadbalancing service for ssh with each VM" + "and waits for VM IP to come up to come up and verify PVCs are accessible in the VM") createVMServiceandWaitForVMtoGetIP(ctx, vmopC, cnsopC, namespace, vms, pvclaimsList, true, true) - if latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state after the " + - "volume is attached to the VM using a late-binding storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - - csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) + csipods, err := adminClient.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Bring down the secondary site") @@ -397,15 +374,15 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - time.Sleep(10 * time.Minute) + time.Sleep(5 * time.Minute) // Check if csi pods are running fine after site failure ginkgo.By("Check if csi pods are running fine after site failure") - err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiNs, len(csipods.Items), time.Duration(pollTimeout*2)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, vm := range vms { @@ -443,7 +420,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests 7. Bring primary site up and wait for testbed to be back to normal. 8. Delete all objects created in the test. */ - ginkgo.It("[pq-f-vmsvc-vsanstretch] VMService VM creation while primary site goes down", + ginkgo.It("VMService VM creation while primary site goes down", ginkgo.Label(p0, vmsvc, vsanStretch, block, wcp), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -451,21 +428,30 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests var vmCount = 9 var err error var vms []*vmopv1.VirtualMachine - var pvs []*v1.PersistentVolume ginkgo.By("Creating StorageClass") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create multiple PVCs") pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil) - if !latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state when " + - "using an Immediate binding mode storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + for i, pvc := range pvclaimsList { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for CNS volumes to be deleted") + volHandle := pvs[i].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() ginkgo.By("Creating VM bootstrap data") secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) @@ -475,7 +461,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) + csipods, err := adminClient.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ch := make(chan *vmopv1.VirtualMachine) @@ -503,7 +489,6 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests }() defer func() { - ginkgo.By("Performing cleanup...") for _, vm := range vms { ginkgo.By("Deleting VM") err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ @@ -512,17 +497,6 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests }}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - for i, pvc := range pvclaimsList { - ginkgo.By("Delete PVCs") - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Waiting for CNS volumes to be deleted") - volHandle := pvs[i].Spec.CSI.VolumeHandle - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Wait for k8s cluster to be healthy") @@ -536,25 +510,18 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests // Check if csi pods are running fine after site failure ginkgo.By("Check if csi pods are running fine after site failure") - err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiNs, len(csipods.Items), time.Duration(pollTimeout*2)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Creates a loadbalancing service for ssh with each VM" + "and waits for VM IP to come up to come up and verify PVCs are accessible in the VM") createVMServiceandWaitForVMtoGetIP(ctx, vmopC, cnsopC, namespace, vms, pvclaimsList, true, true) - if latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state after the " + - "volume is attached to the VM using a late-binding storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - ginkgo.By("Verify volume lifecycle actions when there is a fault induced") performVolumeLifecycleActionForVmServiceVM(ctx, client, vmopC, cnsopC, vmClass, namespace, vmi, sc, secretName) @@ -589,28 +556,37 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests 10.Bring secondary site up and wait for testbed to be back to normal. 11.Delete all objects created in this test. */ - ginkgo.It("[pq-f-vmsvc-vsanstretch] VMService VM deletion while secondary site goes down", + ginkgo.It("VMService VM deletion while secondary site goes down", ginkgo.Label(p0, vmsvc, vsanStretch, block, wcp), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var pvcCount int = 10 var err error - var pvs []*v1.PersistentVolume ginkgo.By("Creating StorageClass") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create multiple PVCs") pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil) - if !latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state when " + - "using an Immediate binding mode storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + for i, pvc := range pvclaimsList { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for CNS volumes to be deleted") + volHandle := pvs[i].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() ginkgo.By("Creating VM bootstrap data") secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) @@ -620,14 +596,13 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) + csipods, err := adminClient.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Creating VM") vms := createVMServiceVmWithMultiplePvcs( ctx, vmopC, namespace, vmClass, pvclaimsList, vmi, storageClassName, secretName) defer func() { - ginkgo.By("Performing cleanup...") for _, vm := range vms { ginkgo.By("Deleting VM") _, err := getVmsvcVM(ctx, vmopC, namespace, vm.Name) @@ -639,30 +614,12 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } - - for i, pvc := range pvclaimsList { - ginkgo.By("Delete PVCs") - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Waiting for CNS volumes to be deleted") - volHandle := pvs[i].Spec.CSI.VolumeHandle - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Creates a loadbalancing service for ssh with each VM" + "and waits for VM IP to come up to come up and verify PVCs are accessible in the VM") createVMServiceandWaitForVMtoGetIP(ctx, vmopC, cnsopC, namespace, vms, pvclaimsList, true, true) - if latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state after the " + - "volume is attached to the VM using a late-binding storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - var wg sync.WaitGroup ginkgo.By("Deleting VM in parallel to secondary site failure") wg.Add(2) @@ -690,12 +647,12 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests time.Sleep(5 * time.Minute) // Check if csi pods are running fine after site failure ginkgo.By("Check if csi pods are running fine after site failure") - err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiNs, len(csipods.Items), time.Duration(pollTimeout*2)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify all the VMservice vms created before " + @@ -735,27 +692,36 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests 10.Wait for psod timeout to be over and wait for testbed to be back to normal. 11.Delete all objects created in this test. */ - ginkgo.It("[pq-f-vmsvc-vsanstretch] VMService - psod hosts on secondary site", + ginkgo.It("VMService - psod hosts on secondary site", ginkgo.Label(p0, vmsvc, vsanStretch, block, wcp), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var vms []*vmopv1.VirtualMachine - var pvs []*v1.PersistentVolume ginkgo.By("Creating StorageClass") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create multiple PVCs") pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, 10, nil) - if !latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state when " + - "using an Immediate binding mode storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + for i, pvc := range pvclaimsList { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for CNS volumes to be deleted") + volHandle := pvs[i].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() ginkgo.By("Creating VM bootstrap data") secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) @@ -765,7 +731,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) + csipods, err := adminClient.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ch := make(chan *vmopv1.VirtualMachine) @@ -783,31 +749,6 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests go psodHostsInParallel(true, "600", &wg) wg.Wait() close(ch) - defer func() { - ginkgo.By("Performing cleanup...") - for _, vm := range vms { - ginkgo.By("Deleting VM") - _, err := getVmsvcVM(ctx, vmopC, namespace, vm.Name) - if !apierrors.IsNotFound(err) { - err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ - Name: vm.Name, - Namespace: namespace, - }}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } - - for i, pvc := range pvclaimsList { - ginkgo.By("Delete PVCs") - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Waiting for CNS volumes to be deleted") - volHandle := pvs[i].Spec.CSI.VolumeHandle - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() if vanillaCluster { wait4AllK8sNodesToBeUp(nodeList) @@ -819,21 +760,18 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests time.Sleep(5 * time.Minute) ginkgo.By("Check if csi pods are running fine after site recovery") - err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiNs, len(csipods.Items), time.Duration(pollTimeout*2)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err = WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Creates a loadbalancing service for ssh with each VM" + "and waits for VM IP to come up to come up and verify PVCs are accessible in the VM") createVMServiceandWaitForVMtoGetIP(ctx, vmopC, cnsopC, namespace, vms, pvclaimsList, true, true) - if latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state after the " + - "volume is attached to the VM using a late-binding storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - ginkgo.By("Verify volume lifecycle actions when there is a fault induced") performVolumeLifecycleActionForVmServiceVM(ctx, client, vmopC, cnsopC, vmClass, namespace, vmi, sc, secretName) @@ -862,19 +800,18 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests 6. Bring primary site up and wait for testbed to be back to normal 7. Delete all objects created in step 2 and 5 */ - ginkgo.It("[pq-f-vmsvc-vsanstretch] VMService - witness failure", + ginkgo.It("VMService - witness failure", ginkgo.Label(p0, vmsvc, vsanStretch, block, wcp), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var pvcCount int = 10 var err error - var pvs []*v1.PersistentVolume ginkgo.By("Creating StorageClass") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) + csipods, err := adminClient.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for k8s cluster to be healthy") @@ -897,19 +834,29 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests // Check if csi pods are running fine after site failure ginkgo.By("Check if csi pods are running fine after site failure") - err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiNs, len(csipods.Items), time.Duration(pollTimeout*2)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create multiple PVCs") pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil) - if !latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state when " + - "using an Immediate binding mode storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + for i, pvc := range pvclaimsList { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for CNS volumes to be deleted") + volHandle := pvs[i].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() ginkgo.By("Creating VM bootstrap data") secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) @@ -924,7 +871,6 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests vms := createVMServiceVmWithMultiplePvcs( ctx, vmopC, namespace, vmClass, pvclaimsList, vmi, storageClassName, secretName) defer func() { - ginkgo.By("Performing cleanup...") for _, vm := range vms { ginkgo.By("Deleting VM") err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ @@ -933,30 +879,12 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests }}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - for i, pvc := range pvclaimsList { - ginkgo.By("Delete PVCs") - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Waiting for CNS volumes to be deleted") - volHandle := pvs[i].Spec.CSI.VolumeHandle - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Creates a loadbalancing service for ssh with each VM" + "and waits for VM IP to come up to come up and verify PVCs are accessible in the VM") createVMServiceandWaitForVMtoGetIP(ctx, vmopC, cnsopC, namespace, vms, pvclaimsList, true, true) - if latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state after the " + - "volume is attached to the VM using a late-binding storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - ginkgo.By("Check storage compliance") comp := checkVmStorageCompliance(storagePolicyName) if comp { @@ -995,28 +923,37 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests 11. Re-establish primary site network and wait for testbed to be back to normal 12. Delete all objects created in this test. */ - ginkgo.It("[pq-f-vmsvc-vsanstretch] VMService - Primary site network isolation", + ginkgo.It("VMService - Primary site network isolation", ginkgo.Label(p0, vmsvc, vsanStretch, block, wcp), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var pvcCount int = 10 var err error - var pvs []*v1.PersistentVolume ginkgo.By("Creating StorageClass") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create multiple PVCs") pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil) - if !latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state when " + - "using an Immediate binding mode storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + for i, pvc := range pvclaimsList { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for CNS volumes to be deleted") + volHandle := pvs[i].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() ginkgo.By("Creating VM bootstrap data") secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) @@ -1038,31 +975,13 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests }}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - for i, pvc := range pvclaimsList { - ginkgo.By("Delete PVCs") - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Waiting for CNS volumes to be deleted") - volHandle := pvs[i].Spec.CSI.VolumeHandle - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Creates a loadbalancing service for ssh with each VM" + "and waits for VM IP to come up to come up and verify PVCs are accessible in the VM") createVMServiceandWaitForVMtoGetIP(ctx, vmopC, cnsopC, namespace, vms, pvclaimsList, true, true) - if latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state after the " + - "volume is attached to the VM using a late-binding storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - - csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) + csipods, err := adminClient.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Cause a network failure on primary site @@ -1084,12 +1003,12 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests // Check if csi pods are running fine after site failure ginkgo.By("Check if csi pods are running fine after site failure") - err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiNs, len(csipods.Items), time.Duration(pollTimeout*2)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, vm := range vms { diff --git a/tests/e2e/vmservice_utils.go b/tests/e2e/vmservice_utils.go index 4a59ff406a..f29733d16c 100644 --- a/tests/e2e/vmservice_utils.go +++ b/tests/e2e/vmservice_utils.go @@ -72,7 +72,7 @@ const vmServiceVmLabelKey = "topology.kubernetes.io/zone" // createTestWcpNs create a wcp namespace with given storage policy, vm class and content lib via REST API func createTestWcpNs( vcRestSessionId string, storagePolicyId string, vmClass string, contentLibId string, - supervisorId string) string { + supervisorId string, userName string) string { vcIp := e2eVSphere.Config.Global.VCenterHostname r := rand.New(rand.NewSource(time.Now().Unix())) @@ -87,6 +87,12 @@ func createTestWcpNs( nsCreationUrl := "https://" + vcIp + ":" + e2eVSphere.Config.Global.VCenterPort + "/api/vcenter/namespaces/instances/v2" reqBody := fmt.Sprintf(`{ + "access_list": [ { + "domain": "vsphere.local", + "role": "OWNER", + "subject": "%s", + "subject_type": "USER" + } ], "namespace": "%s", "storage_specs": [ { "policy": "%s" @@ -100,7 +106,7 @@ func createTestWcpNs( ] }, "supervisor": "%s" - }`, namespace, storagePolicyId, vmClass, contentLibId, supervisorId) + }`, userName, namespace, storagePolicyId, vmClass, contentLibId, supervisorId) fmt.Println(reqBody) diff --git a/tests/e2e/vmservice_vm.go b/tests/e2e/vmservice_vm.go index d54e2895f2..95723cb5a8 100644 --- a/tests/e2e/vmservice_vm.go +++ b/tests/e2e/vmservice_vm.go @@ -66,6 +66,8 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { isQuotaValidationSupported bool defaultDatastore *object.Datastore volHandle string + adminClient clientset.Interface + userName string ) ginkgo.BeforeEach(func() { @@ -73,6 +75,8 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { defer cancel() client = f.ClientSet var err error + + adminClient, client = initializeClusterClientsByUserRoles(client) topologyFeature := os.Getenv(topologyFeature) if topologyFeature != topologyTkgHaName { nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) @@ -84,6 +88,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { } else { storageClassName = GetAndExpectStringEnvVar(envZonalStoragePolicyName) } + userName = GetAndExpectStringEnvVar(envUserName) bootstrap() isVsanHealthServiceStopped = false isSPSserviceStopped = false @@ -112,7 +117,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { framework.Logf("Create a WCP namespace for the test") namespace = createTestWcpNs( - vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId, &e2eVSphere)) + vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId, &e2eVSphere), userName) ginkgo.By("Verifying storage policies usage for each storage class") restConfig = getRestConfigClient() @@ -176,7 +181,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { } dumpSvcNsEventsOnTestFailure(client, namespace) delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }) /* @@ -203,14 +208,13 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { 8 delete pvcs 9 Remove spbm policy attached to test namespace */ - ginkgo.It("[cf-wcp-f] verify vmservice vm creation with a pvc in its spec", ginkgo.Label(p0, + ginkgo.It("[cf-wcp] verify vmservice vm creation with a pvc in its spec", ginkgo.Label(p0, vmServiceVm, block, wcp, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var pandoraSyncWaitTime int var err error - var vmIp string curtime := time.Now().Unix() curtimestring := strconv.FormatInt(curtime, 10) pvcName := "cns-pvc-" + curtimestring @@ -247,7 +251,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { verifyBidirectionalReferenceOfPVandPVC(ctx, client, staticPvc, staticPv, fcdID) ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a PVC") @@ -255,7 +259,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) if !latebinding { ginkgo.By("Validating that the PVC transitions to Bound state when using an Immediate binding mode storage policy") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, staticPvc}, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, staticPvc}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := pvs[0] volHandle = pv.Spec.CSI.VolumeHandle @@ -304,26 +308,26 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - isPrivateNetwork := GetBoolEnvVarOrDefault("IS_PRIVATE_NETWORK", false) - if !isPrivateNetwork { - ginkgo.By("Wait for VM to come up and get an IP") - vmIp, err = waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Wait and verify PVCs are attached to the VM") - gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, - []*v1.PersistentVolumeClaim{pvc, staticPvc})).NotTo(gomega.HaveOccurred()) + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc, staticPvc})).NotTo(gomega.HaveOccurred()) - if latebinding { - ginkgo.By("Validating that the PVC transitions to Bound state after the " + - "volume is attached to the VM using a late-binding storage policy") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, staticPvc}, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - pv := pvs[0] - volHandle = pv.Spec.CSI.VolumeHandle - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - } + if latebinding { + ginkgo.By("Validating that the PVC transitions to Bound state after the " + + "volume is attached to the VM using a late-binding storage policy") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, staticPvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle = pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + } + isPrivateNetwork := GetBoolEnvVarOrDefault("IS_PRIVATE_NETWORK", false) + if !isPrivateNetwork { ginkgo.By("Verify PVCs are accessible to the VM") ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info @@ -350,20 +354,20 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { 8 delete pvcs from step2 9 Remove spbm policy attached to test namespace */ - ginkgo.It("[ef-f-vmsvc] hot detach and attach pvc to vmservice vms", ginkgo.Label(p0, + ginkgo.It("[ef-vmsvc] hot detach and attach pvc to vmservice vms", ginkgo.Label(p0, vmServiceVm, block, wcp, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a PVC") pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) if !latebinding { ginkgo.By("Validating that the PVC transitions to Bound state when using an Immediate binding mode storage policy") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := pvs[0] volHandle = pv.Spec.CSI.VolumeHandle @@ -561,7 +565,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { var pvs []*v1.PersistentVolume ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a PVC") pvc1, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") @@ -570,7 +574,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) if !latebinding { ginkgo.By("Validating that the PVC transitions to Bound state when using an Immediate binding mode storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1, pvc2}, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1, pvc2}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } ginkgo.By("Creating VM bootstrap data") @@ -746,7 +750,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { 8 delete pvc1, pvc2 9 Remove spbm policy attached to test namespace in step1 */ - ginkgo.It("[ef-f-stretched-svc][stretched-svc] VM and PVC both belong to same zone", ginkgo.Label(p0, + ginkgo.It("[ef-stretched-svc][stretched-svc] VM and PVC both belong to same zone", ginkgo.Label(p0, vmServiceVm, block, wcp, stretchedSvc, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -771,7 +775,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { framework.Logf("annotationVal :%s, pvcAnnotations: %v", annotationVal, pvcAnnotations) ginkgo.By("Creating Pvc with Immediate topology storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvcSpec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", storageclass, nil, "") pvcSpec.Annotations = pvcAnnotations @@ -779,7 +783,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for SV PVC to come to bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volumeID := pvs[0].Spec.CSI.VolumeHandle @@ -861,7 +865,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { 9 delete pvc1 10 Remove spbm policy attached to test namespace in step1 */ - ginkgo.It("[ef-f-stretched-svc][stretched-svc] VM and PVC both belong to same zone", ginkgo.Label(p0, + ginkgo.It("[ef-stretched-svc][stretched-svc] VM and PVC both belong to same zone", ginkgo.Label(p0, vmServiceVm, block, wcp, stretchedSvc, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -886,7 +890,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { framework.Logf("annotationVal :%s, pvcAnnotations: %v", annotationVal, pvcAnnotations) ginkgo.By("Create a PVC say pvc1 under zone2") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvcSpec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", storageclass, nil, "") pvcSpec.Annotations = pvcAnnotations @@ -894,7 +898,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for PVC to come to bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volumeID := pvs[0].Spec.CSI.VolumeHandle @@ -1004,14 +1008,14 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a PVC") pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) if !latebinding { ginkgo.By("Validating that the PVC transitions to Bound state when using an Immediate binding mode storage policy") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := pvs[0] volHandle = pv.Spec.CSI.VolumeHandle @@ -1156,7 +1160,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { pvs := []*v1.PersistentVolume{} ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a PVC") for i := 0; i < 3; i++ { @@ -1167,7 +1171,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { if !latebinding { ginkgo.By("Validating that the PVC transitions to Bound state when using an Immediate binding mode storage policy") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, pvcs, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1372,14 +1376,14 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a PVC") pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) if !latebinding { ginkgo.By("Validating that the PVC transitions to Bound state when using an Immediate binding mode storage policy") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := pvs[0] volHandle = pv.Spec.CSI.VolumeHandle diff --git a/tests/e2e/vsphere_volume_expansion.go b/tests/e2e/vsphere_volume_expansion.go index fd14d40c32..ca121e51ae 100644 --- a/tests/e2e/vsphere_volume_expansion.go +++ b/tests/e2e/vsphere_volume_expansion.go @@ -66,6 +66,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { isVsanHealthServiceStopped bool isSPSServiceStopped bool fsType string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -76,6 +77,16 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defer cancel() var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + isVsanHealthServiceStopped = false isSPSServiceStopped = false @@ -157,8 +168,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { // 10. Delete PVC, PV and Storage Class. ginkgo.It("[ef-wcp][csi-block-vanilla] [csi-supervisor] [csi-guest] [csi-block-vanilla-parallelized] [csi-vcp-mig]"+ - "[ef-vks][cf-vanilla-block][ef-vks-n1][ef-vks-n2] Verify volume expansion with no filesystem "+ - "before expansion", ginkgo.Label(p0, block, vanilla, wcp, tkg, core, vc70), func() { + "[ef-vks][cf-vanilla-block] Verify volume expansion with no filesystem before expansion", ginkgo.Label(p0, + block, vanilla, wcp, tkg, core, vc70), func() { invokeTestForVolumeExpansion(f, client, namespace, "", storagePolicyName, profileID) }) @@ -182,7 +193,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { // 13. Delete PVC, PV and Storage Class. ginkgo.It("[csi-block-vanilla] [csi-guest] [csi-block-vanilla-parallelized] [csi-vcp-mig] "+ - "[ef-vks][ef-vks-n1][ef-vks-n2][cf-vanilla-block] Verify volume expansion with initial filesystem before "+ + "[ef-vks][cf-vanilla-block] Verify volume expansion with initial filesystem before "+ "expansion", ginkgo.Label(p0, block, vanilla, tkg, core, vc70), func() { invokeTestForVolumeExpansionWithFilesystem(f, client, namespace, fsType, "", storagePolicyName, profileID) }) @@ -204,7 +215,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { // 12. Delete pod and Wait for Volume Disk to be detached from the Node. // 13. Delete PVC, PV and Storage Class. - ginkgo.It("[ef-vanilla-block][cf-vks][csi-block-vanilla] [csi-guest] [csi-block-vanilla-parallelized]"+ + ginkgo.It("[ef-vanilla-block][cf-vks][csi-block-vanilla] [csi-guest] [csi-block-vanilla-parallelized] [ef-vks] "+ "Verify offline volume expansion workflow with xfs filesystem", ginkgo.Label(p0, block, vanilla, tkg, core, vc70), func() { invokeTestForVolumeExpansionWithFilesystem(f, client, namespace, xfsFSType, xfsFSType, storagePolicyName, profileID) @@ -221,8 +232,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { // 5. Modify PVC's size to trigger offline volume expansion. // 6. Verify if the PVC expansion fails. - ginkgo.It("[cf-vks][csi-block-vanilla][csi-guest][csi-block-vanilla-parallelized][csi-vcp-mig][pq-vanilla-block] "+ - "Verify volume expansion not allowed", ginkgo.Label(p2, block, vanilla, tkg, core, vc70), func() { + ginkgo.It("[cf-vks][csi-block-vanilla] [csi-guest] [csi-block-vanilla-parallelized] [csi-vcp-mig] "+ + "[ef-vks] Verify volume expansion not allowed", ginkgo.Label(p2, block, vanilla, tkg, core, vc70), func() { invokeTestForInvalidVolumeExpansion(f, client, namespace, storagePolicyName, profileID) }) @@ -238,8 +249,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { // 6. Verify if the PVC expansion fails. ginkgo.It("[ef-vanilla-block][ef-wcp][csi-block-vanilla][csi-guest][csi-supervisor]"+ - "[csi-block-vanilla-parallelized][csi-vcp-mig][ef-vks][ef-vks-n1][ef-vks-n2] Verify volume shrinking "+ - "not allowed", ginkgo.Label(p1, block, vanilla, wcp, tkg, core, vc70), func() { + "[csi-block-vanilla-parallelized][csi-vcp-mig][ef-vks] Verify volume shrinking not allowed", ginkgo.Label(p1, + block, vanilla, wcp, tkg, core, vc70), func() { invokeTestForInvalidVolumeShrink(f, client, namespace, storagePolicyName, profileID) }) @@ -259,7 +270,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.It("[csi-block-vanilla][cf-vanilla-block][csi-block-vanilla-parallelized]Verify volume "+ "expansion is not supported for PVC using vSAN-Default-Storage-Policy", ginkgo.Label(p0, block, vanilla, core, vc70), func() { - invokeTestForInvalidVolumeExpansionStaticProvision(f, client, namespace, storagePolicyName, profileID) + invokeTestForInvalidVolumeExpansionStaticProvision(f, adminClient, client, namespace, storagePolicyName, profileID) }) // Test to verify volume expansion can happen multiple times @@ -278,9 +289,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { // 10. Delete PVC, PV and Storage Class. ginkgo.It("[ef-vanilla-block][ef-wcp][csi-block-vanilla][csi-guest][csi-supervisor]"+ - "[csi-block-vanilla-parallelized][csi-vcp-mig][ef-vks-f]"+ - "[ef-vks-n1-f][ef-vks-n2-f] Verify volume expansion "+ - "can happen multiple times", ginkgo.Label(p1, block, vanilla, wcp, core, vc70), func() { + "[csi-block-vanilla-parallelized][csi-vcp-mig][ef-vks] Verify volume expansion can happen multiple "+ + "times", ginkgo.Label(p1, block, vanilla, wcp, core, vc70), func() { invokeTestForExpandVolumeMultipleTimes(f, client, namespace, "", storagePolicyName, profileID) }) @@ -331,7 +341,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -454,7 +464,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - volHandle, pvclaim, pv, storageclass := createStaticPVC(ctx, f, client, + volHandle, pvclaim, pv, storageclass := createStaticPVC(ctx, f, adminClient, client, namespace, defaultDatastore, pandoraSyncWaitTime) defer func() { @@ -498,8 +508,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 7. Verify that the PVC size does not change because volume shrinking is not supported. */ ginkgo.It("[ef-vanilla-block][ef-wcp][csi-block-vanilla][csi-supervisor][csi-guest]"+ - "[csi-block-vanilla-parallelized][csi-vcp-mig][ef-vks][ef-vks-n1][ef-vks-n2] Verify online volume expansion "+ - "shrinking volume not allowed", ginkgo.Label(p1, block, vanilla, wcp, tkg, core, vc70), func() { + "[csi-block-vanilla-parallelized][csi-vcp-mig][ef-vks] Verify online volume expansion shrinking volume not "+ + "allowed", ginkgo.Label(p1, block, vanilla, wcp, tkg, core, vc70), func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -510,7 +520,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -575,7 +585,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 9. Make sure file system has increased */ ginkgo.It("[ef-wcp][csi-block-vanilla][cf-vanilla-block][ef-vks][csi-supervisor] [csi-guest]"+ - "[csi-block-vanilla-parallelized][csi-vcp-mig][ef-vks-n1][ef-vks-n2] Verify volume expansion multiple times"+ + "[csi-block-vanilla-parallelized] [csi-vcp-mig] Verify volume expansion multiple times"+ " on the same PVC", ginkgo.Label(p0, block, vanilla, wcp, tkg, core, vc70), func() { ginkgo.By("Invoking Test for Volume Expansion") @@ -586,7 +596,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ctx, f, client, "", storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -626,7 +636,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Increase PVC size and verify Volume resize") - increaseOnlineVolumeMultipleTimes(ctx, f, client, namespace, volHandle, pvclaim, pod) + increaseOnlineVolumeMultipleTimes(ctx, f, adminClient, client, namespace, volHandle, pvclaim, pod) }) /* @@ -644,9 +654,9 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 11. Make sure file system has increased */ - ginkgo.It("[stable-pq-vks][pq-wcp][csi-block-vanilla][csi-supervisor][csi-guest][csi-block-vanilla-serialized]"+ - "[csi-vcp-mig][pq-vanilla-block][pq-vks][pq-vks-n1][pq-vks-n2] Verify online volume expansion when VSAN-health "+ - "is down", ginkgo.Label(p1, block, vanilla, wcp, tkg, core, vc70), func() { + ginkgo.It("[pq-wcp][csi-block-vanilla] [csi-supervisor] [csi-guest] [csi-block-vanilla-serialized] [csi-vcp-mig]"+ + "Verify online volume expansion when VSAN-health is down", ginkgo.Label(p1, block, vanilla, wcp, tkg, core, + vc70), func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -659,7 +669,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ctx, f, client, "", storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -771,9 +781,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 10. Make sure data is intact on the PV mounted on the pod 11. Make sure file system has increased */ - ginkgo.It("[pq-wcp][csi-block-vanilla][csi-supervisor][csi-guest][csi-block-vanilla-serialized][pq-vanilla-block]"+ - "[pq-vks][pq-vks-n1][pq-vks-n2] Verify online volume expansion when SPS-Service is down", ginkgo.Label(p1, block, - vanilla, wcp, tkg, core, vc70), func() { + ginkgo.It("[pq-wcp][csi-block-vanilla] [csi-supervisor] [csi-guest] [csi-block-vanilla-serialized] Verify online "+ + "volume expansion when SPS-Service is down", ginkgo.Label(p1, block, vanilla, wcp, tkg, core, vc70), func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -792,7 +801,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ctx, f, client, "", storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -891,7 +900,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 10. Make sure file system has increased */ - ginkgo.It("[ef-f-vanilla-block][ef-f-wcp][csi-block-vanilla][csi-supervisor][csi-block-vanilla-parallelized]"+ + ginkgo.It("[ef-vanilla-block][ef-wcp][csi-block-vanilla][csi-supervisor][csi-block-vanilla-parallelized]"+ "[csi-vcp-mig] Verify online volume expansion by updating PVC with different sizes "+ "concurrently", ginkgo.Label(p1, block, vanilla, wcp, core, vc70), func() { @@ -1049,7 +1058,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ctx, f, client, sharedVVOLdatastoreURL, storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -1108,8 +1117,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 10. Make sure file system has increased */ ginkgo.It("[ef-wcp][csi-block-vanilla][cf-vanilla-block][csi-supervisor][csi-guest][csi-block-vanilla-parallelized]"+ - "[csi-vcp-mig][ef-vks][ef-vks-n1][ef-vks-n2] Volume expansion on shared NFS datastore", ginkgo.Label(p0, block, - vanilla, wcp, tkg, core, vc70), func() { + "[csi-vcp-mig][ef-vks] Volume expansion on shared NFS datastore", ginkgo.Label(p0, block, vanilla, wcp, + tkg, core, vc70), func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1140,7 +1149,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ctx, f, client, sharedNFSdatastoreURL, storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -1205,8 +1214,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 10. Make sure file system has increased */ ginkgo.It("[ef-wcp][csi-block-vanilla][cf-vanilla-block][ef-vks][csi-supervisor] [csi-guest] "+ - "[csi-block-vanilla-parallelized][csi-vcp-mig][ef-vks-n1][ef-vks-n2] Volume expansion on shared "+ - "VMFS datastore", ginkgo.Label(p0, block, vanilla, wcp, tkg, core, vc70), func() { + "[csi-block-vanilla-parallelized][csi-vcp-mig] Volume expansion on shared VMFS datastore", ginkgo.Label(p0, + block, vanilla, wcp, tkg, core, vc70), func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1237,7 +1246,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ctx, f, client, sharedVMFSdatastoreURL, storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -1307,7 +1316,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 13. Verify File system has increased 14. Delete POD, PVC, PV, CNSregisterVolume and SC */ - ginkgo.It("[ef-wcp] [csi-supervisor] Offline and Online volume resize on statically "+ + ginkgo.It("[cf-wcp] [csi-supervisor] Offline and Online volume resize on statically "+ "created volume", ginkgo.Label(p0, block, wcp, vc70), func() { var err error var fsSize int64 @@ -1481,7 +1490,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 13. Make sure file system has increased */ - ginkgo.It("[ef-f-wcp][csi-supervisor] Verify offline and online volume expansion when there is no quota "+ + ginkgo.It("[ef-wcp][csi-supervisor] Verify offline and online volume expansion when there is no quota "+ "available", ginkgo.Label(p1, block, wcp, vc70), func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) @@ -1491,13 +1500,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { var pv *v1.PersistentVolume storagePolicyName2 := GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores2) - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName2) - framework.Logf("Profile ID : %s", profileID) - scParameters := make(map[string]string) - scParameters["storagePolicyID"] = profileID - ginkgo.By("get StorageClass to Create PVC") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName2, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName2, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) restClientConfig := getRestConfigClient() @@ -1505,7 +1509,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, + _, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv = getPvFromClaim(client, namespace, pvclaim.Name) @@ -1518,7 +1522,6 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Delete existing resource quota") setStoragePolicyQuota(ctx, restClientConfig, storagePolicyName2, namespace, "2Gi") defer func() { - ginkgo.By("In defer block, Setting quota back to 500Gi") setStoragePolicyQuota(ctx, restClientConfig, storagePolicyName2, namespace, rqLimit) }() @@ -1585,8 +1588,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 10. Make sure data is intact on the PV mounted on the pod 11. Make sure file system has increased */ - ginkgo.It("[pq-wcp][csi-supervisor][pq-vks][pq-vks-n1][pq-vks-n2] Verify in Offline volume expansion "+ - "FileSystemResize works when SPS-Service is down", ginkgo.Label(p1, block, wcp, vc70), func() { + ginkgo.It("[pq-wcp][csi-supervisor] Verify in Offline volume expansion FileSystemResize works "+ + "when SPS-Service is down", ginkgo.Label(p1, block, wcp, vc70), func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) @@ -1642,12 +1645,9 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { err = waitForPvResizeForGivenPvc(pvclaim, client, totalResizeWaitPeriod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - isPrivateNetwork := GetBoolEnvVarOrDefault("IS_PRIVATE_NETWORK", false) - if isPrivateNetwork { - ginkgo.By("Checking for conditions on pvc") - pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Checking for conditions on pvc") + pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Bringup SPS service") startVCServiceWait4VPs(ctx, vcAddress, spsServiceName, &isSPSServiceStopped) @@ -1687,7 +1687,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { vmUUID, exists = annotations[vmUUIDLabel] gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) } else { - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + vmUUID = getNodeUUID(ctx, adminClient, pod.Spec.NodeName) } framework.Logf("VMUUID : %s", vmUUID) isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) @@ -1738,8 +1738,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 11. Make sure file system has increased */ - ginkgo.It("[pq-wcp][csi-supervisor][pq-vks][pq-vks-n1][pq-vks-n2] Verify Offline volume expansion when "+ - "VSAN-health is down", ginkgo.Label(p1, block, wcp, vc70), func() { + ginkgo.It("[pq-wcp][csi-supervisor] Verify Offline volume expansion when VSAN-health is "+ + "down", ginkgo.Label(p1, block, wcp, vc70), func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) @@ -1800,12 +1800,9 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { err = waitForPvResizeForGivenPvc(pvclaim, client, totalResizeWaitPeriod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - isPrivateNetwork := GetBoolEnvVarOrDefault("IS_PRIVATE_NETWORK", false) - if isPrivateNetwork { - ginkgo.By("Checking for conditions on pvc") - pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Checking for conditions on pvc") + pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) @@ -1891,8 +1888,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { */ ginkgo.It("[ef-vanilla-block][pq-wcp][csi-supervisor][csi-block-vanilla][csi-guest][csi-block-vanilla-parallelized]"+ - "[csi-vcp-mig] [ef-vks][ef-vks-n1][ef-vks-n2] Verify online volume expansion when POD is deleted and "+ - "re-created", ginkgo.Label(p1, block, wcp, vc70), func() { + "[csi-vcp-mig] [ef-vks] Verify online volume expansion when POD is deleted and re-created", ginkgo.Label(p1, + block, wcp, vc70), func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1910,7 +1907,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -2020,8 +2017,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 8. Verify there should not be any PVC entry in CNS */ ginkgo.It("[ef-vanilla-block][ef-wcp][csi-supervisor][csi-block-vanilla][csi-guest]"+ - "[csi-block-vanilla-parallelized][csi-vcp-mig][ef-vks] [ef-vks-n1][ef-vks-n2] Verify online volume expansion "+ - "when PVC is deleted", ginkgo.Label(p1, vanilla, block, wcp, tkg, core, vc70), func() { + "[csi-block-vanilla-parallelized][csi-vcp-mig][ef-vks] Verify online volume expansion when PVC is "+ + "deleted", ginkgo.Label(p1, vanilla, block, wcp, tkg, core, vc70), func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -2038,7 +2035,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if pvclaim != nil { @@ -2137,7 +2134,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 9. Make sure data is intact on the PV mounted on the pod 10.Make sure file system has increased */ - ginkgo.It("[pq-wcp][csi-supervisor][pq-vks][pq-vks-n1][pq-vks-n2] Verify online volume expansion when CSI Pod is "+ + ginkgo.It("[pq-wcp][csi-supervisor] Verify online volume expansion when CSI Pod is "+ "down", ginkgo.Label(p1, block, wcp, vc70), func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) @@ -2185,11 +2182,11 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Bringing SVC CSI controller down...") - svcCsiDeployment := updateDeploymentReplica(client, 0, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) + svcCsiDeployment := updateDeploymentReplica(adminClient, 0, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) defer func() { if *svcCsiDeployment.Spec.Replicas == 0 { ginkgo.By("Bringing SVC CSI controller up (cleanup)...") - updateDeploymentReplica(client, 3, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) + updateDeploymentReplica(adminClient, 3, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) } }() @@ -2220,7 +2217,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Bringing SVC CSI controller up...") - svcCsiDeployment = updateDeploymentReplica(client, 3, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) + svcCsiDeployment = updateDeploymentReplica(adminClient, 3, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) ginkgo.By("Waiting for controller volume resize to finish PVC1 (online volume expansion)") err = waitForPvResizeForGivenPvc(pvclaim, client, totalResizeWaitPeriod) @@ -2261,7 +2258,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 13. Make sure file system has increased */ // TODO: Need to add test case for Vanilla - ginkgo.It("[pq-wcp][csi-supervisor][pq-vks][pq-vks-n1][pq-vks-n2] Verify Offline volume expansion when CSI Pod is "+ + ginkgo.It("[pq-wcp][csi-supervisor] Verify Offline volume expansion when CSI Pod is "+ "down", ginkgo.Label(p1, block, wcp, vc70), func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) @@ -2285,11 +2282,11 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Bringing SVC CSI controller down...") - svcCsiDeployment := updateDeploymentReplica(client, 0, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) + svcCsiDeployment := updateDeploymentReplica(adminClient, 0, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) defer func() { if *svcCsiDeployment.Spec.Replicas == 0 { ginkgo.By("Bringing SVC CSI controller up (cleanup)...") - updateDeploymentReplica(client, 1, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) + updateDeploymentReplica(adminClient, 1, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) } }() @@ -2309,7 +2306,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { gomega.Expect(pvclaim).NotTo(gomega.BeNil()) ginkgo.By("Bringing SVC CSI controller up...") - svcCsiDeployment = updateDeploymentReplica(client, 1, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) + svcCsiDeployment = updateDeploymentReplica(adminClient, 1, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) ginkgo.By("Create Pod using the above PVC") pod, vmUUID = createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") @@ -2387,7 +2384,6 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimit, thickProvPolicy) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", true, "", thickProvPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2446,7 +2442,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { */ ginkgo.It("[csi-block-vanilla][cf-vanilla-block][ef-vks][csi-supervisor] [csi-guest] "+ - "[csi-block-vanilla-parallelized] [csi-vcp-mig][ef-vks-n1][ef-vks-n2] Verify online volume expansion"+ + "[csi-block-vanilla-parallelized] [csi-vcp-mig] Verify online volume expansion"+ " on deployment", ginkgo.Label(p0, vanilla, block, wcp, tkg, vc70), func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) @@ -2464,7 +2460,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if pvclaim != nil { @@ -2558,7 +2554,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { zonalPolicy := GetAndExpectStringEnvVar(envZonalStoragePolicyName) scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -2567,7 +2563,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Expect claim to provision volume successfully") pvclaims = append(pvclaims, pvclaim) - pv, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + pv, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := pv[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) @@ -2597,12 +2593,9 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { err = waitForPvResizeForGivenPvc(pvclaim, client, totalResizeWaitPeriod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - isPrivateNetwork := GetBoolEnvVarOrDefault("IS_PRIVATE_NETWORK", false) - if isPrivateNetwork { - ginkgo.By("Checking for conditions on pvc") - pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Checking for conditions on pvc") + pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) @@ -2678,7 +2671,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { // increaseOnlineVolumeMultipleTimes this method increases the same volume // multiple times and verifies PVC and Filesystem size. -func increaseOnlineVolumeMultipleTimes(ctx context.Context, f *framework.Framework, +func increaseOnlineVolumeMultipleTimes(ctx context.Context, f *framework.Framework, adminClient clientset.Interface, client clientset.Interface, namespace string, volHandle string, pvclaim *v1.PersistentVolumeClaim, pod *v1.Pod) { var originalSizeInMb, fsSize int64 @@ -2716,13 +2709,11 @@ func increaseOnlineVolumeMultipleTimes(ctx context.Context, f *framework.Framewo err = waitForPvResize(pv, client, pvcSize, totalResizeWaitPeriod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - isPrivateNetwork := GetBoolEnvVarOrDefault("IS_PRIVATE_NETWORK", false) - if isPrivateNetwork { - ginkgo.By("Checking for conditions on pvc") - pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition( - client, namespace, pvclaim.Name, totalResizeWaitPeriod) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Checking for conditions on pvc") + framework.Logf("PVC Name %s:", pvclaim.Name) + pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition( + client, namespace, pvclaim.Name, totalResizeWaitPeriod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = waitForFSResize(pvclaim, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2763,7 +2754,7 @@ func increaseOnlineVolumeMultipleTimes(ctx context.Context, f *framework.Framewo } // createStaticPVC this method creates static PVC -func createStaticPVC(ctx context.Context, f *framework.Framework, +func createStaticPVC(ctx context.Context, f *framework.Framework, adminClient clientset.Interface, client clientset.Interface, namespace string, defaultDatastore *object.Datastore, pandoraSyncWaitTime int) (string, *v1.PersistentVolumeClaim, *v1.PersistentVolume, *storagev1.StorageClass) { curtime := time.Now().Unix() @@ -2850,11 +2841,13 @@ func createSCwithVolumeExpansionTrueAndDynamicPVC(ctx context.Context, f *framew scParameters[scParamFsType] = fstype } + var err error + adminClient, _ := initializeClusterClientsByUserRoles(client) + // Create Storage class and PVC ginkgo.By("Creating Storage Class and PVC with allowVolumeExpansion = true") var storageclass *storagev1.StorageClass var pvclaim *v1.PersistentVolumeClaim - var err error var volHandle string if vanillaCluster { @@ -2875,10 +2868,8 @@ func createSCwithVolumeExpansionTrueAndDynamicPVC(ctx context.Context, f *framew } else if supervisorCluster { ginkgo.By("CNS_TEST: Running for WCP setup") framework.Logf("storagePolicyName: %s", storagePolicyName) - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) - scParameters[scParamStoragePolicyID] = profileID - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) restConfig = getRestConfigClient() @@ -2899,7 +2890,7 @@ func createSCwithVolumeExpansionTrueAndDynamicPVC(ctx context.Context, f *framew var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := persistentvolumes[0] if vcptocsi { @@ -3016,6 +3007,7 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter namespace string, expectedContent string, storagePolicyName string, profileID string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + adminClient, _ := initializeClusterClientsByUserRoles(client) ginkgo.By("Invoking Test for Volume Expansion") scParameters := make(map[string]string) if windowsEnv { @@ -3036,7 +3028,6 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter namespace, nil, scParameters, "", nil, "", true, "") } else if supervisorCluster { scParameters[scParamStoragePolicyID] = profileID - storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", true, "", storagePolicyName) } else if vanillaCluster { @@ -3054,7 +3045,7 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -3063,7 +3054,7 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter gomega.Expect(err).NotTo(gomega.HaveOccurred()) if supervisorCluster { ginkgo.By("Delete Resource quota") - deleteResourceQuota(client, namespace) + deleteResourceQuota(adminClient, namespace) } }() @@ -3072,7 +3063,7 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter var volHandle, svcPVCName string pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := persistentvolumes[0] if vcptocsi { @@ -3145,12 +3136,9 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter verifyPVSizeinSupervisor(svcPVCName, newSize) } - isPrivateNetwork := GetBoolEnvVarOrDefault("IS_PRIVATE_NETWORK", false) - if isPrivateNetwork { - ginkgo.By("Checking for conditions on pvc") - pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Checking for conditions on pvc") + pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if guestCluster { ginkgo.By("Checking for 'FileSystemResizePending' status condition on SVC PVC") @@ -3190,7 +3178,7 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter var exists bool ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, pod.Spec.NodeName)) if vanillaCluster { - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + vmUUID = getNodeUUID(ctx, adminClient, pod.Spec.NodeName) } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3264,6 +3252,7 @@ func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client c namespace string, fstype string, expectedContent string, storagePolicyName string, profileID string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + adminClient, _ := initializeClusterClientsByUserRoles(client) ginkgo.By("Invoking Test for Volume Expansion 2") scParameters := make(map[string]string) scParameters[scParamFsType] = fstype @@ -3294,7 +3283,7 @@ func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client c } defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() defer func() { @@ -3307,7 +3296,7 @@ func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client c var volHandle, svcPVCName string pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := persistentvolumes[0] @@ -3357,7 +3346,7 @@ func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client c var vmUUID string ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, pod.Spec.NodeName)) - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + vmUUID = getNodeUUID(ctx, adminClient, pod.Spec.NodeName) if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3415,12 +3404,9 @@ func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client c verifyPVSizeinSupervisor(svcPVCName, newSize) } - isPrivateNetwork := GetBoolEnvVarOrDefault("IS_PRIVATE_NETWORK", false) - if isPrivateNetwork { - ginkgo.By("Checking for conditions on pvc") - pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Checking for conditions on pvc") + pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if guestCluster { ginkgo.By("Checking for 'FileSystemResizePending' status condition on SVC PVC") @@ -3451,7 +3437,7 @@ func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client c ginkgo.By(fmt.Sprintf("Verify volume after expansion: %s is attached to the node: %s", volHandle, pod.Spec.NodeName)) - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + vmUUID = getNodeUUID(ctx, adminClient, pod.Spec.NodeName) if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3504,6 +3490,7 @@ func invokeTestForInvalidVolumeExpansion(f *framework.Framework, client clientse namespace string, storagePolicyName string, profileID string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + adminClient, _ := initializeClusterClientsByUserRoles(client) scParameters := make(map[string]string) if windowsEnv { scParameters[scParamFsType] = ntfsFSType @@ -3534,8 +3521,10 @@ func invokeTestForInvalidVolumeExpansion(f *framework.Framework, client clientse } defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -3546,7 +3535,7 @@ func invokeTestForInvalidVolumeExpansion(f *framework.Framework, client clientse var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if vcptocsi { ginkgo.By("Verify annotations on PV/PVCs created after migration") @@ -3581,6 +3570,7 @@ func invokeTestForInvalidVolumeShrink(f *framework.Framework, client clientset.I namespace string, storagePolicyName string, profileID string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + adminClient, _ := initializeClusterClientsByUserRoles(client) scParameters := make(map[string]string) if windowsEnv { scParameters[scParamFsType] = ntfsFSType @@ -3603,7 +3593,7 @@ func invokeTestForInvalidVolumeShrink(f *framework.Framework, client clientset.I } else if supervisorCluster { scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", true, "", storagePolicyName) } else if vanillaCluster { @@ -3621,7 +3611,7 @@ func invokeTestForInvalidVolumeShrink(f *framework.Framework, client clientset.I gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -3638,7 +3628,7 @@ func invokeTestForInvalidVolumeShrink(f *framework.Framework, client clientset.I var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := persistentvolumes[0] if vcptocsi { @@ -3682,7 +3672,7 @@ func invokeTestForInvalidVolumeShrink(f *framework.Framework, client clientset.I gomega.Expect(err).To(gomega.HaveOccurred()) } -func invokeTestForInvalidVolumeExpansionStaticProvision(f *framework.Framework, +func invokeTestForInvalidVolumeExpansionStaticProvision(f *framework.Framework, adminClient clientset.Interface, client clientset.Interface, namespace string, storagePolicyName string, profileID string) { ginkgo.By("Invoking Test for Invalid Volume Expansion for Static Provisioning") @@ -3762,14 +3752,14 @@ func invokeTestForInvalidVolumeExpansionStaticProvision(f *framework.Framework, ginkgo.By("Creating the PV") pv = getPersistentVolumeSpec(fcdID, v1.PersistentVolumeReclaimDelete, staticPVLabels, ext4FSType) - pv, err = client.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) + pv, err = adminClient.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) if err != nil { return } defer func() { ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeout)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeout)) }() err = e2eVSphere.waitForCNSVolumeToBeCreated(pv.Spec.CSI.VolumeHandle) @@ -3788,7 +3778,7 @@ func invokeTestForInvalidVolumeExpansionStaticProvision(f *framework.Framework, }() // Wait for PV and PVC to Bind - framework.ExpectNoError(fpv.WaitOnPVandPVC(ctx, client, f.Timeouts, namespace, pv, pvc)) + framework.ExpectNoError(fpv.WaitOnPVandPVC(ctx, adminClient, f.Timeouts, namespace, pv, pvc)) // Set deleteFCDRequired to false. // After PV, PVC is in the bind state, Deleting PVC should delete container volume. @@ -3816,6 +3806,7 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien namespace string, expectedContent string, storagePolicyName string, profileID string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + adminClient, _ := initializeClusterClientsByUserRoles(client) ginkgo.By("Invoking Test to verify Multiple Volume Expansions on the same volume") scParameters := make(map[string]string) if windowsEnv { @@ -3837,7 +3828,7 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien } else if supervisorCluster { scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", true, "", storagePolicyName) } else if vanillaCluster { @@ -3854,7 +3845,7 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -3864,7 +3855,7 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien if supervisorCluster { ginkgo.By("Delete Resource quota") - deleteResourceQuota(client, namespace) + deleteResourceQuota(adminClient, namespace) } }() @@ -3873,7 +3864,7 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien var volHandle, svcPVCName string pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := persistentvolumes[0] @@ -3951,12 +3942,9 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - isPrivateNetwork := GetBoolEnvVarOrDefault("IS_PRIVATE_NETWORK", false) - if isPrivateNetwork { - ginkgo.By("Checking for conditions on pvc") - pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Checking for conditions on pvc") + pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if guestCluster { ginkgo.By("Checking for 'FileSystemResizePending' status condition on SVC PVC") @@ -3996,7 +3984,7 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien var exists bool ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, pod.Spec.NodeName)) if vanillaCluster { - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + vmUUID = getNodeUUID(ctx, adminClient, pod.Spec.NodeName) } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4070,6 +4058,7 @@ func invokeTestForUnsupportedFileVolumeExpansion(f *framework.Framework, client clientset.Interface, namespace string, storagePolicyName string, profileID string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + adminClient, _ := initializeClusterClientsByUserRoles(client) ginkgo.By("Invoking Test for Unsupported File Volume Expansion") scParameters := make(map[string]string) scParameters[scParamFsType] = nfs4FSType @@ -4084,7 +4073,7 @@ func invokeTestForUnsupportedFileVolumeExpansion(f *framework.Framework, namespace, nil, scParameters, "", nil, "", true, v1.ReadWriteMany) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() defer func() { @@ -4097,7 +4086,7 @@ func invokeTestForUnsupportedFileVolumeExpansion(f *framework.Framework, pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") // persistentvolumes - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + _, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Modify PVC spec to trigger volume expansion @@ -4156,9 +4145,12 @@ func expandPVCSize(origPVC *v1.PersistentVolumeClaim, size resource.Quantity, func waitForPvResizeForGivenPvc(pvc *v1.PersistentVolumeClaim, c clientset.Interface, duration time.Duration) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var adminClient clientset.Interface + var err error + adminClient, c = initializeClusterClientsByUserRoles(c) pvName := pvc.Spec.VolumeName pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] - pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) + pv, err := adminClient.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) return waitForPvResize(pv, c, pvcSize, duration) } @@ -4168,9 +4160,10 @@ func waitForPvResize(pv *v1.PersistentVolume, c clientset.Interface, size resource.Quantity, duration time.Duration) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + adminClient, c := initializeClusterClientsByUserRoles(c) return wait.PollUntilContextTimeout(ctx, resizePollInterval, duration, true, func(ctx context.Context) (bool, error) { - pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) + pv, err := adminClient.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching pv %q for resizing %v", pv.Name, err) @@ -4273,6 +4266,7 @@ func sizeInMb(size resource.Quantity) int64 { func testCleanUpUtil(ctx context.Context, restClientConfig *restclient.Config, c clientset.Interface, cnsRegistervolume *cnsregistervolumev1alpha1.CnsRegisterVolume, namespace string, pvcName string, pvName string) { + adminClient, _ := initializeClusterClientsByUserRoles(c) if guestCluster { c, _ = getSvcClientAndNamespace() } @@ -4280,7 +4274,7 @@ func testCleanUpUtil(ctx context.Context, restClientConfig *restclient.Config, c framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(ctx, c, pvcName, namespace), "Failed to delete PVC", pvcName) ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, c, pvName, poll, supervisorClusterOperationsTimeout)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pvName, poll, supervisorClusterOperationsTimeout)) if cnsRegistervolume != nil { ginkgo.By("Verify CRD should be deleted automatically") @@ -4315,12 +4309,9 @@ func offlineVolumeExpansionOnSupervisorPVC(client clientset.Interface, f *framew err = waitForPvResizeForGivenPvc(pvclaim, client, totalResizeWaitPeriod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - isPrivateNetwork := GetBoolEnvVarOrDefault("IS_PRIVATE_NETWORK", false) - if isPrivateNetwork { - ginkgo.By("Checking for conditions on pvc") - pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Checking for conditions on pvc") + pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle)