diff --git a/tests/e2e/config_change_test.go b/tests/e2e/config_change_test.go index 54869d17cd..ec8dff49e8 100644 --- a/tests/e2e/config_change_test.go +++ b/tests/e2e/config_change_test.go @@ -35,6 +35,7 @@ var _ bool = ginkgo.Describe("[csi-supervisor] config-change-test", func() { namespace = getNamespaceToRunTests(f) ctx, cancel = context.WithCancel(context.Background()) defer cancel() + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -81,7 +82,7 @@ var _ bool = ginkgo.Describe("[csi-supervisor] config-change-test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) diff --git a/tests/e2e/csi_snapshot_basic.go b/tests/e2e/csi_snapshot_basic.go index f4b68d33b2..bb17b6bf37 100644 --- a/tests/e2e/csi_snapshot_basic.go +++ b/tests/e2e/csi_snapshot_basic.go @@ -75,6 +75,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { scName string volHandle string isQuotaValidationSupported bool + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -84,13 +85,17 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { bootstrap() client = f.ClientSet namespace = getNamespaceToRunTests(f) + + var err error + var nodeList *v1.NodeList + adminClient, client = initializeClusterClientsByUserRoles(client) scParameters = make(map[string]string) // reading shared datastoreurl and shared storage policy datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) // fetching node list and checking node status - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") @@ -200,7 +205,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if guestCluster { + /*if guestCluster { ctx, cancel := context.WithCancel(context.Background()) defer cancel() framework.Logf("Collecting supervisor PVC events before performing PV/PVC cleanup") @@ -209,7 +214,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { for _, item := range eventList.Items { framework.Logf("%q", item.Message) } - } + }*/ // restarting pending and stopped services after vc reboot if any if isVcRebooted { @@ -221,6 +226,14 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { if isVsanHealthServiceStopped { startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) } + + if supervisorCluster { + dumpSvcNsEventsOnTestFailure(client, namespace) + } + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + } }) /* @@ -1985,7 +1998,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim2, err := fpv.CreatePVC(ctx, client, namespace2Name, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle @@ -2267,7 +2280,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { if storageclass.AllowVolumeExpansion == nil || *storageclass.AllowVolumeExpansion != allowExpansion { storageclass.AllowVolumeExpansion = &allowExpansion storageclass.Parameters = scParameters - storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } @@ -2328,7 +2341,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Expecting the volume bound to fail") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, + _, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionShortTimeout) gomega.Expect(err).To(gomega.HaveOccurred()) pvc2Deleted := false @@ -2420,7 +2433,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { replicas := *(statefulset.Spec.Replicas) defer func() { ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() // Waiting for pods status to be Ready @@ -2706,7 +2719,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).To(gomega.HaveOccurred()) ginkgo.By("Get PV and check the PV is still not deleted") - _, err = client.CoreV1().PersistentVolumes().Get(ctx, persistentvolumes[0].Name, metav1.GetOptions{}) + if vanillaCluster { + _, err = client.CoreV1().PersistentVolumes().Get(ctx, persistentvolumes[0].Name, metav1.GetOptions{}) + } else { + _, err = adminClient.CoreV1().PersistentVolumes().Get(ctx, persistentvolumes[0].Name, metav1.GetOptions{}) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Delete dynamic volume snapshot") @@ -3222,7 +3239,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolume2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, + persistentvolume2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolume2[0].Spec.CSI.VolumeHandle @@ -3302,7 +3319,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { if storageclass.AllowVolumeExpansion == nil || *storageclass.AllowVolumeExpansion != allowExpansion { storageclass.AllowVolumeExpansion = &allowExpansion storageclass.Parameters = scParameters - storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } @@ -3808,7 +3825,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle @@ -3904,7 +3921,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } ginkgo.By("Expect claim to provision volume successfully") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + _, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { for _, pvclaim := range pvclaims { @@ -4051,7 +4068,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { restoredpvclaims = append(restoredpvclaims, pvclaim2) - persistentvolume2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, + persistentvolume2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout*2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolume2[0].Spec.CSI.VolumeHandle @@ -4577,7 +4594,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } framework.Logf("Waiting for PVCs to come to bound state") - persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle @@ -4993,7 +5010,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { tkg, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - invokeSnapshotOperationsOnSharedDatastore(client, ctx, namespace, scParameters, snapc, "VVOL", + invokeSnapshotOperationsOnSharedDatastore(client, adminClient, ctx, namespace, scParameters, snapc, "VVOL", pandoraSyncWaitTime) }) @@ -5009,7 +5026,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { tkg, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - invokeSnapshotOperationsOnSharedDatastore(client, ctx, namespace, scParameters, snapc, "VMFS", pandoraSyncWaitTime) + invokeSnapshotOperationsOnSharedDatastore(client, adminClient, ctx, namespace, scParameters, snapc, "VMFS", pandoraSyncWaitTime) }) /* @@ -5023,7 +5040,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { tkg, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - invokeSnapshotOperationsOnSharedDatastore(client, ctx, namespace, scParameters, snapc, "NFS", pandoraSyncWaitTime) + invokeSnapshotOperationsOnSharedDatastore(client, adminClient, ctx, namespace, scParameters, snapc, "NFS", pandoraSyncWaitTime) }) /* @@ -5036,7 +5053,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { "VSAN2 Datastore", ginkgo.Label(p0, snapshot, tkg, newTest, vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - invokeSnapshotOperationsOnSharedDatastore(client, ctx, namespace, scParameters, snapc, "VSAN", pandoraSyncWaitTime) + invokeSnapshotOperationsOnSharedDatastore(client, adminClient, ctx, namespace, scParameters, snapc, "VSAN", pandoraSyncWaitTime) }) /* @@ -5112,7 +5129,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + persistentvolumes, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -5149,7 +5166,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } ginkgo.By("Wait for the PVC to be bound") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims2, framework.ClaimProvisionTimeout) + _, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims2, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for i := 0; i < volumeOpsScale; i++ { @@ -5602,7 +5619,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, + _, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionShortTimeout) gomega.Expect(err).To(gomega.HaveOccurred()) @@ -5734,7 +5751,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle @@ -6253,7 +6270,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - _, err = fpv.WaitForPVClaimBoundPhase(ctx, clientNewGc, + _, err = WaitForPVClaimBoundPhase(ctx, clientNewGc, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).To(gomega.HaveOccurred()) expectedErrMsg := "error getting handle for DataSource Type VolumeSnapshot by Name " + volumeSnapshot.Name @@ -6348,7 +6365,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaims = append(pvclaims, pvclaim2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, + _, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionShortTimeout) framework.Logf("Error from creating pvc with %s accessmode is : %s", accessMode, err.Error()) gomega.Expect(err).To(gomega.HaveOccurred()) @@ -6944,7 +6961,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { framework.Logf("Waiting for PV to move to released state") // TODO: replace sleep with polling mechanism. time.Sleep(time.Duration(100) * time.Second) - gcPV, err = client.CoreV1().PersistentVolumes().Get(ctx, gcPVName, metav1.GetOptions{}) + gcPV, err = adminClient.CoreV1().PersistentVolumes().Get(ctx, gcPVName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gcPVStatus := gcPV.Status.Phase if gcPVStatus != "Released" { @@ -7011,7 +7028,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + persistentvolumes, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -7091,7 +7108,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } ginkgo.By("Wait for the PVC to be bound") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims2, framework.ClaimProvisionTimeout) + _, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims2, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for i := 0; i < volumeOpsScale; i++ { @@ -7986,7 +8003,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Perform password rotation on the supervisor") csiNamespace := GetAndExpectStringEnvVar(envCSINamespace) - passwordRotated, err := performPasswordRotationOnSupervisor(client, ctx, csiNamespace, vcAddress) + passwordRotated, err := performPasswordRotationOnSupervisor(adminClient, ctx, csiNamespace, vcAddress) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(passwordRotated).To(gomega.BeTrue()) @@ -8042,7 +8059,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle @@ -8073,7 +8090,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { // invokeSnapshotOperationsOnSharedDatastore is a wrapper method which invokes creation of volume snapshot // and restore of volume snapshot on shared datastore -func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx context.Context, namespace string, +func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, adminClient clientset.Interface, ctx context.Context, namespace string, scParameters map[string]string, snapc *snapclient.Clientset, sharedDatastoreType string, pandoraSyncWaitTime int) { var storageclass *storagev1.StorageClass @@ -8135,7 +8152,7 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c }() } else if supervisorCluster { ginkgo.By("Get storage class and create PVC") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -8146,7 +8163,7 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() } diff --git a/tests/e2e/csi_snapshot_negative.go b/tests/e2e/csi_snapshot_negative.go index 3a6a02b4a0..91461cffe9 100644 --- a/tests/e2e/csi_snapshot_negative.go +++ b/tests/e2e/csi_snapshot_negative.go @@ -59,6 +59,7 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti serviceName string pandoraSyncWaitTime int storagePolicyName string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -66,6 +67,9 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti ctx, cancel := context.WithCancel(context.Background()) defer cancel() client = f.ClientSet + var err error + + adminClient, client = initializeClusterClientsByUserRoles(client) namespace = getNamespaceToRunTests(f) scParameters = make(map[string]string) isServiceStopped = false @@ -114,12 +118,13 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti ctx, vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) svcCsiReplicas = *csiDeployment.Spec.Replicas - } + } else { - csiDeployment, err := client.AppsV1().Deployments(csiNamespace).Get( - ctx, vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - csiReplicas = *csiDeployment.Spec.Replicas + csiDeployment, err := client.AppsV1().Deployments(csiNamespace).Get( + ctx, vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + csiReplicas = *csiDeployment.Spec.Replicas + } if os.Getenv(envPandoraSyncWaitTime) != "" { pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) @@ -143,15 +148,15 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti if serviceName == "CSI" { framework.Logf("Starting CSI driver") ignoreLabels := make(map[string]string) - err := updateDeploymentReplicawithWait(client, csiReplicas, vSphereCSIControllerPodNamePrefix, + err := updateDeploymentReplicawithWait(adminClient, csiReplicas, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Wait for the CSI Pods to be up and Running - list_of_pods, err := fpod.GetPodsInNamespace(ctx, client, csiSystemNamespace, ignoreLabels) + list_of_pods, err := fpod.GetPodsInNamespace(ctx, adminClient, csiSystemNamespace, ignoreLabels) gomega.Expect(err).NotTo(gomega.HaveOccurred()) num_csi_pods := len(list_of_pods) - err = fpod.WaitForPodsRunningReady(ctx, client, csiSystemNamespace, int(num_csi_pods), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiSystemNamespace, int(num_csi_pods), time.Duration(pollTimeout)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } else if serviceName == hostdServiceName { @@ -170,7 +175,15 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti } ginkgo.By(fmt.Sprintf("Resetting provisioner time interval to %s sec", defaultProvisionerTimeInSec)) - updateCSIDeploymentProvisionerTimeout(client, csiSystemNamespace, defaultProvisionerTimeInSec) + updateCSIDeploymentProvisionerTimeout(adminClient, csiSystemNamespace, defaultProvisionerTimeInSec) + + if supervisorCluster { + dumpSvcNsEventsOnTestFailure(client, namespace) + } + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + } }) /* @@ -318,6 +331,13 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, storagePolicyName := GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) + /*var testClient clientset.Interface + if vanillaCluster { + testClient = client + } else { + testClient = adminClient + }*/ + adminClient, _ := initializeClusterClientsByUserRoles(client) if vanillaCluster { ginkgo.By("Create storage class") scParameters[scParamDatastoreURL] = datastoreURL @@ -329,7 +349,7 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, }() } else if supervisorCluster { ginkgo.By("Get storage class") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -340,7 +360,7 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() } @@ -404,18 +424,18 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, if serviceName == "CSI" { ginkgo.By("Stopping CSI driver") - isServiceStopped, err = stopCSIPods(ctx, client, csiNamespace) + isServiceStopped, err = stopCSIPods(ctx, adminClient, csiNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if isServiceStopped { framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiNamespace) + isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicas, csiNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiNamespace) + isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicas, csiNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime)) @@ -531,17 +551,17 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, if serviceName == "CSI" { ginkgo.By("Stopping CSI driver") - isServiceStopped, err = stopCSIPods(ctx, client, csiNamespace) + isServiceStopped, err = stopCSIPods(ctx, adminClient, csiNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if isServiceStopped { framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiNamespace) + isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicas, csiNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiNamespace) + isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicas, csiNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime)) @@ -633,7 +653,7 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, //After service restart bootstrap() - persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle @@ -657,18 +677,18 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, if isSnapshotCreated { if serviceName == "CSI" { ginkgo.By("Stopping CSI driver") - isServiceStopped, err = stopCSIPods(ctx, client, csiNamespace) + isServiceStopped, err = stopCSIPods(ctx, adminClient, csiNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if isServiceStopped { framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiNamespace) + isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicas, csiNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiNamespace) + isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicas, csiNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime)) diff --git a/tests/e2e/csi_snapshot_utils.go b/tests/e2e/csi_snapshot_utils.go index 17c8cbab19..74382c910b 100644 --- a/tests/e2e/csi_snapshot_utils.go +++ b/tests/e2e/csi_snapshot_utils.go @@ -691,7 +691,7 @@ func verifyVolumeRestoreOperation(ctx context.Context, client clientset.Interfac pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle @@ -759,8 +759,8 @@ func verifyVolumeRestoreOperation(ctx context.Context, client clientset.Interfac // createPVCAndQueryVolumeInCNS creates PVc with a given storage class on a given namespace // and verifies cns metadata of that volume if verifyCNSVolume is set to true -func createPVCAndQueryVolumeInCNS(ctx context.Context, client clientset.Interface, namespace string, - pvclaimLabels map[string]string, accessMode v1.PersistentVolumeAccessMode, +func createPVCAndQueryVolumeInCNS(ctx context.Context, client clientset.Interface, + namespace string, pvclaimLabels map[string]string, accessMode v1.PersistentVolumeAccessMode, ds string, storageclass *storagev1.StorageClass, verifyCNSVolume bool) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume, error) { @@ -771,7 +771,7 @@ func createPVCAndQueryVolumeInCNS(ctx context.Context, client clientset.Interfac } // Wait for PVC to be bound to a PV - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout*2) if err != nil { return pvclaim, persistentvolumes, fmt.Errorf("failed to wait for PVC to bind to a PV: %w", err) diff --git a/tests/e2e/csi_static_provisioning_basic.go b/tests/e2e/csi_static_provisioning_basic.go index c57f5785f2..c59348bd8c 100644 --- a/tests/e2e/csi_static_provisioning_basic.go +++ b/tests/e2e/csi_static_provisioning_basic.go @@ -78,6 +78,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { nonSharedDatastoreURL string fullSyncWaitTime int isQuotaValidationSupported bool + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -87,6 +88,16 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { var cancel context.CancelFunc ctx, cancel = context.WithCancel(context.Background()) defer cancel() + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) @@ -167,7 +178,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { } if pv != nil { - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeoutShort)) framework.ExpectNoError(e2eVSphere.waitForCNSVolumeToBeDeleted(pv.Spec.CSI.VolumeHandle)) } @@ -210,15 +221,17 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { framework.Logf("Profile ID :%s", profileID) scParameters := make(map[string]string) scParameters["storagePolicyID"] = profileID - err = client.StorageV1().StorageClasses().Delete(ctx, vsanDefaultStorageClassInSVC, metav1.DeleteOptions{}) - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if !supervisorCluster { + err = adminClient.StorageV1().StorageClasses().Delete(ctx, vsanDefaultStorageClassInSVC, metav1.DeleteOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } } storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, vsanDefaultStorageClassInSVC) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("storageclass name :%s", storageclass.GetName()) - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storageclass.GetName(), metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, storageclass.GetName(), metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("storageclass name :%s", storageclass.GetName()) @@ -229,6 +242,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { } testCleanUpUtil := func(ctx context.Context, restClientConfig *restclient.Config, + adminClient clientset.Interface, cnsRegistervolume *cnsregistervolumev1alpha1.CnsRegisterVolume, namespace string, pvcName string, pvName string) { if guestCluster { @@ -240,7 +254,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pvc = nil ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pvName, poll, + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pvName, poll, supervisorClusterOperationsTimeout)) pv = nil @@ -356,7 +370,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pvc = nil ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeout)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeout)) pv = nil }) @@ -477,7 +491,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pvc = nil ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeout)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeout)) pv = nil }) @@ -587,7 +601,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() defer func() { @@ -654,7 +668,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pvc = nil ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeoutShort)) pv = nil ginkgo.By("Verify volume is deleted in Supervisor Cluster") @@ -738,7 +752,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pvc = nil ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeoutShort)) pv = nil ginkgo.By("Verify volume is deleted in Supervisor Cluster") @@ -827,7 +841,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { vmUUID, pv.Spec.CSI.VolumeHandle)) defer func() { - testCleanUpUtil(ctx, restConfig, cnsRegisterVolume, namespace, pvc.Name, pv.Name) + testCleanUpUtil(ctx, restConfig, adminClient, cnsRegisterVolume, namespace, pvc.Name, pv.Name) }() }) @@ -929,7 +943,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { vmUUID, pv.Spec.CSI.VolumeHandle)) defer func() { - testCleanUpUtil(ctx, restConfig, cnsRegisterVolume, namespace, pvc.Name, pv.Name) + testCleanUpUtil(ctx, restConfig, adminClient, cnsRegisterVolume, namespace, pvc.Name, pv.Name) //Validates PVC quota in both StoragePolicyQuota and StoragePolicyUsage CR _, _, storagePolicyQuota_afterCleanUp, _, storagePolicyUsage_AfterCleanup, _ := @@ -1052,7 +1066,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { fmt.Sprintf("PodVM with vmUUID: %s still exists. So volume: %s is not detached from the PodVM", vmUUID, pv.Spec.CSI.VolumeHandle)) defer func() { - testCleanUpUtil(ctx, restConfig, cnsRegisterVolume, namespace, pvc.Name, pv.Name) + testCleanUpUtil(ctx, restConfig, adminClient, cnsRegisterVolume, namespace, pvc.Name, pv.Name) if isQuotaValidationSupported { //Validates PVC quota in both StoragePolicyQuota and StoragePolicyUsage CR _, _, storagePolicyQuota_afterCleanUp, _, storagePolicyUsage_AfterCleanup, _ := @@ -1217,10 +1231,10 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { "Failed to delete PVC", pvc2.Name) ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv2.Name, poll, supervisorClusterOperationsTimeout)) - testCleanUpUtil(ctx, restConfig, nil, namespace, pvc1.Name, pv1.Name) + testCleanUpUtil(ctx, restConfig, adminClient, nil, namespace, pvc1.Name, pv1.Name) }() }) @@ -1319,10 +1333,10 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { "Failed to delete PVC ", pvc2.Name) ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv2.Name, + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv2.Name, poll, supervisorClusterOperationsTimeout)) - testCleanUpUtil(ctx, restConfig, nil, namespace, pvc1.Name, pv1.Name) + testCleanUpUtil(ctx, restConfig, adminClient, nil, namespace, pvc1.Name, pv1.Name) }() }) @@ -1403,7 +1417,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { verifyBidirectionalReferenceOfPVandPVC(ctx, client, pvc, pv, fcdID) defer func() { - testCleanUpUtil(ctx, restConfig, nil, namespace, pvc.Name, pv.Name) + testCleanUpUtil(ctx, restConfig, adminClient, nil, namespace, pvc.Name, pv.Name) }() }) @@ -1483,7 +1497,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { verifyBidirectionalReferenceOfPVandPVC(ctx, client, pvc, pv, fcdID) defer func() { - testCleanUpUtil(ctx, restConfig, nil, namespace, pvc.Name, pv.Name) + testCleanUpUtil(ctx, restConfig, adminClient, nil, namespace, pvc.Name, pv.Name) }() }) @@ -1678,12 +1692,12 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pvName := "static-pv-" + fcdID framework.Logf("Deleting PersistentVolume %s", pvName) framework.ExpectNoError(fpv.DeletePersistentVolume(ctx, client, pvName)) - pv, err = client.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) + pv, err = adminClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if pv != nil { - framework.ExpectNoError(fpv.DeletePersistentVolume(ctx, client, pvName)) + framework.ExpectNoError(fpv.DeletePersistentVolume(ctx, adminClient, pvName)) } pv = nil }() @@ -1868,7 +1882,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { framework.Logf("Waiting for PV to move to released state") // TODO: replace sleep with polling mechanism. time.Sleep(time.Duration(100) * time.Second) - gcPV, err = client.CoreV1().PersistentVolumes().Get(ctx, gcPVName, metav1.GetOptions{}) + gcPV, err = adminClient.CoreV1().PersistentVolumes().Get(ctx, gcPVName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gcPVStatus := gcPV.Status.Phase if gcPVStatus != "Released" { @@ -1881,7 +1895,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { gomega.Expect(volumeExists).NotTo(gomega.BeFalse()) defer func() { - testCleanUpUtil(ctx, restConfig, nil, svNamespace, svcPVC.Name, svcPV.Name) + testCleanUpUtil(ctx, restConfig, adminClient, nil, svNamespace, svcPVC.Name, svcPV.Name) }() }) @@ -1978,11 +1992,11 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { client, namespaceToDelete, poll, supervisorClusterOperationsTimeout)) ginkgo.By("Verify PV got deleted") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv1.Name, poll, supervisorClusterOperationsTimeout)) framework.ExpectNoError(e2eVSphere.waitForCNSVolumeToBeDeleted(pv1.Spec.CSI.VolumeHandle)) - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv2.Name, poll, supervisorClusterOperationsTimeout)) framework.ExpectNoError(e2eVSphere.waitForCNSVolumeToBeDeleted(pv2.Spec.CSI.VolumeHandle)) @@ -2056,7 +2070,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pvc = nil ginkgo.By("PV will be in released state , hence delete PV explicitly") - framework.ExpectNoError(fpv.DeletePersistentVolume(ctx, client, pv.GetName())) + framework.ExpectNoError(fpv.DeletePersistentVolume(ctx, adminClient, pv.GetName())) pv = nil ginkgo.By("Verify CRD should be deleted automatically") @@ -2261,9 +2275,13 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { } framework.Logf("Deleting PersistentVolume %s", pv.Name) - err = fpv.DeletePersistentVolume(ctx, client, pv.Name) + if vanillaCluster { + err = fpv.DeletePersistentVolume(ctx, client, pv.Name) + } else { + err = fpv.DeletePersistentVolume(ctx, adminClient, pv.Name) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = client.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) + _, err = adminClient.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -2442,9 +2460,9 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { } framework.Logf("Deleting PersistentVolume %s", pv.Name) - err = fpv.DeletePersistentVolume(ctx, client, pv.Name) + err = fpv.DeletePersistentVolume(ctx, adminClient, pv.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = client.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) + _, err = adminClient.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } diff --git a/tests/e2e/csi_utils.go b/tests/e2e/csi_utils.go index 33bc814e98..df2974ade7 100644 --- a/tests/e2e/csi_utils.go +++ b/tests/e2e/csi_utils.go @@ -70,7 +70,8 @@ func waitForControllerDeletion(ctx context.Context, client clientset.Interface, func mapK8sMasterNodeWithIPs(client clientset.Interface, nodeNameIPMap map[string]string) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: "node-role.kubernetes.io/master"}) + adminClient, client := initializeClusterClientsByUserRoles(client) + nodes, err := adminClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: "node-role.kubernetes.io/master"}) if err != nil { return err } diff --git a/tests/e2e/data_persistence.go b/tests/e2e/data_persistence.go index c66406808c..d035740ce3 100644 --- a/tests/e2e/data_persistence.go +++ b/tests/e2e/data_persistence.go @@ -68,12 +68,24 @@ var _ = ginkgo.Describe("Data Persistence", func() { storagePolicyName string svcPVCName string // PVC Name in the Supervisor Cluster. datastoreURL string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet namespace = getNamespaceToRunTests(f) ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var err error + + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -136,7 +148,7 @@ var _ = ginkgo.Describe("Data Persistence", func() { profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) sc, pvc, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", false, "", storagePolicyName) } else { @@ -148,13 +160,13 @@ var _ = ginkgo.Describe("Data Persistence", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -322,7 +334,7 @@ var _ = ginkgo.Describe("Data Persistence", func() { }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -473,7 +485,7 @@ var _ = ginkgo.Describe("Data Persistence", func() { time.Sleep(time.Duration(90) * time.Second) ginkgo.By("Create resource quota") - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) ginkgo.By("Import above created FCD ") cnsRegisterVolume := getCNSRegisterVolumeSpec(ctx, namespace, fcdID, "", pvcName, v1.ReadWriteOnce) @@ -565,7 +577,7 @@ var _ = ginkgo.Describe("Data Persistence", func() { pvc = nil ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeoutShort)) pv = nil ginkgo.By("Verify CRD should be deleted automatically") @@ -573,6 +585,6 @@ var _ = ginkgo.Describe("Data Persistence", func() { restConfig, namespace, cnsRegisterVolume, poll, supervisorClusterOperationsTimeout)) ginkgo.By("Delete Resource quota") - deleteResourceQuota(client, namespace) + deleteResourceQuota(adminClient, namespace) }) }) diff --git a/tests/e2e/e2e_common.go b/tests/e2e/e2e_common.go index b4fe559faa..ce460ca5f6 100644 --- a/tests/e2e/e2e_common.go +++ b/tests/e2e/e2e_common.go @@ -33,6 +33,7 @@ const ( adminUser = "Administrator@vsphere.local" apiServerIPs = "API_SERVER_IPS" attacherContainerName = "csi-attacher" + audienceForSvcAccountName = "https://kubernetes.default.svc.cluster.local" nginxImage = "registry.k8s.io/nginx-slim:0.26" nginxImage4upg = "registry.k8s.io/nginx-slim:0.27" retainClaimPolicy = "Retain" @@ -71,6 +72,7 @@ const ( e2evSphereCSIDriverName = "csi.vsphere.vmware.com" ensureAccessibilityMModeType = "ensureObjectAccessibility" envClusterFlavor = "CLUSTER_FLAVOR" + envDevopsUserName = "DEVOPS_USERNAME" envDiskSizeLarge = "LARGE_DISK_SIZE" envCSINamespace = "CSI_NAMESPACE" envContentLibraryUrl = "CONTENT_LIB_URL" @@ -182,9 +184,12 @@ const ( rqLimit = "200Gi" rqLimitScaleTest = "900Gi" rootUser = "root" + rbacApiGroup = "rbac.authorization.k8s.io" + roleKeyword = "Role" defaultrqLimit = "20Gi" rqStorageType = ".storageclass.storage.k8s.io/requests.storage" resizerContainerName = "csi-resizer" + serviceAccountKeyword = "ServiceAccount" scParamDatastoreURL = "DatastoreURL" scParamFsType = "csi.storage.k8s.io/fstype" scParamStoragePolicyID = "storagePolicyID" diff --git a/tests/e2e/fullsync_test_for_block_volume.go b/tests/e2e/fullsync_test_for_block_volume.go index 26b8728d45..26ac0f8c07 100644 --- a/tests/e2e/fullsync_test_for_block_volume.go +++ b/tests/e2e/fullsync_test_for_block_volume.go @@ -71,6 +71,7 @@ var _ bool = ginkgo.Describe("full-sync-test", func() { storagePolicyName string scParameters map[string]string isVsanHealthServiceStopped bool + adminClient clientset.Interface ) const ( @@ -80,12 +81,15 @@ var _ bool = ginkgo.Describe("full-sync-test", func() { ginkgo.BeforeEach(func() { client = f.ClientSet namespace = getNamespaceToRunTests(f) + var err error + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var nodeList *v1.NodeList + adminClient, client = initializeClusterClientsByUserRoles(client) if vanillaCluster { csiControllerNamespace = GetAndExpectStringEnvVar(envCSINamespace) } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") @@ -243,7 +247,7 @@ var _ bool = ginkgo.Describe("full-sync-test", func() { }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -275,7 +279,7 @@ var _ bool = ginkgo.Describe("full-sync-test", func() { ginkgo.By(fmt.Sprintf("Updating labels %+v for pv %s", labels, pv.Name)) pv.Labels = labels - _, err = client.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) + _, err = adminClient.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintln("Starting vsan-health on the vCenter host")) @@ -328,7 +332,7 @@ var _ bool = ginkgo.Describe("full-sync-test", func() { }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -368,7 +372,7 @@ var _ bool = ginkgo.Describe("full-sync-test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Deleting the PV %s", pv.Name)) - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintln("Starting vsan-health on the vCenter host")) @@ -503,7 +507,7 @@ var _ bool = ginkgo.Describe("full-sync-test", func() { for _, pv := range pvs { ginkgo.By(fmt.Sprintf("Deleting the PV %s", pv.Name)) - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) if !apierrors.IsNotFound(err) { // Skip if failure is "not found" - object may already been deleted // by test. @@ -791,7 +795,7 @@ var _ bool = ginkgo.Describe("full-sync-test", func() { ctx, f, client, "", storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } diff --git a/tests/e2e/gc_block_resize_retain_policy.go b/tests/e2e/gc_block_resize_retain_policy.go index 266a71d375..dca370f78b 100644 --- a/tests/e2e/gc_block_resize_retain_policy.go +++ b/tests/e2e/gc_block_resize_retain_policy.go @@ -69,12 +69,14 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po restConfig *restclient.Config deleteFCDRequired bool labels_ns map[string]string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() client = f.ClientSet + namespace = f.Namespace.Name f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged labels_ns = map[string]string{} @@ -82,6 +84,15 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po labels_ns["e2e-framework"] = f.BaseName bootstrap() + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } ginkgo.By("Getting ready nodes on GC 1") nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") @@ -113,7 +124,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv = persistentvolumes[0] volHandle = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) @@ -147,7 +158,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if !pvDeleted { - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if !pvcDeletedInSvc { @@ -169,7 +180,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po ginkgo.By("Verify volume is deleted in Supervisor Cluster") err = waitTillVolumeIsDeletedInSvc(svcPVCName, poll, pollTimeoutShort) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) @@ -253,7 +264,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po pvcDeleted = true ginkgo.By("Check GC PV exists and is released") - pv, err = waitForPvToBeReleased(ctx, client, pv.Name) + pv, err = waitForPvToBeReleased(ctx, adminClient, pv.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) oldPvUID := string(pv.UID) fmt.Println("PV uuid", oldPvUID) @@ -442,7 +453,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po err = client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvclaim.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvcDeleted = true - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvDeleted = true @@ -680,7 +691,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po pvcDeleted = true ginkgo.By("Check GC PV exists and is released") - pv, err = waitForPvToBeReleased(ctx, client, pv.Name) + pv, err = waitForPvToBeReleased(ctx, adminClient, pv.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) oldPvUID := string(pv.UID) fmt.Println("PV uuid", oldPvUID) @@ -900,7 +911,8 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po ginkgo.By("verify created PV, PVC and check the bidirectional reference") svcPVC, err := svcClient.CoreV1().PersistentVolumeClaims(svNamespace).Get(ctx, svpvcName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - svcPV := getPvFromClaim(svcClient, svNamespace, svpvcName) + svcPV, err := svcClient.CoreV1().PersistentVolumes().Get(ctx, svcPVC.Spec.VolumeName, metav1.GetOptions{}) + gomega.Expect(svcPVC).NotTo(gomega.BeNil()) verifyBidirectionalReferenceOfPVandPVC(ctx, svcClient, svcPVC, svcPV, fcdID) //Create PVC,PV in GC2 @@ -1038,7 +1050,8 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po ginkgo.By("verify created PV, PVC and check the bidirectional reference") svcPVC, err := svcClient.CoreV1().PersistentVolumeClaims(svNamespace).Get(ctx, svpvcName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - svcPV := getPvFromClaim(svcClient, svNamespace, svpvcName) + svcPV, err := svcClient.CoreV1().PersistentVolumes().Get(ctx, svcPVC.Spec.VolumeName, metav1.GetOptions{}) + gomega.Expect(svcPVC).NotTo(gomega.BeNil()) verifyBidirectionalReferenceOfPVandPVC(ctx, svcClient, svcPVC, svcPV, fcdID) pvcNew, pvNew := createStaticPVandPVCinGuestCluster(client, ctx, namespace, svpvcName, diff --git a/tests/e2e/gc_block_volume_expansion.go b/tests/e2e/gc_block_volume_expansion.go index 865791ef8e..fa91172cfd 100644 --- a/tests/e2e/gc_block_volume_expansion.go +++ b/tests/e2e/gc_block_volume_expansion.go @@ -67,6 +67,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { restConfig *restclient.Config isVsanHealthServiceStopped bool isGCCSIDeploymentPODdown bool + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { ctx, cancel := context.WithCancel(context.Background()) @@ -74,6 +75,16 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { client = f.ClientSet namespace = f.Namespace.Name bootstrap() + + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -107,7 +118,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv = persistentvolumes[0] volHandle = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) @@ -146,7 +157,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { volumeExists := verifyVolumeExistInSupervisorCluster(svcPVCName) gomega.Expect(volumeExists).To(gomega.BeFalse()) } - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) svcClient, svNamespace = getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) @@ -1247,7 +1258,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { var pvcs []*v1.PersistentVolumeClaim pvcs = append(pvcs, pvc) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvol := persistentvolumes[0] volHandleSvc := getVolumeIDFromSupervisorCluster(pvol.Spec.CSI.VolumeHandle) @@ -1261,7 +1272,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify volume is deleted in Supervisor Cluster") err = waitTillVolumeIsDeletedInSvc(svcPvcName, poll, pollTimeoutShort) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() // Create a Pod to use this PVC, and verify volume has been attached. @@ -1455,7 +1466,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { var pvcs []*v1.PersistentVolumeClaim pvcs = append(pvcs, pvc) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvol := persistentvolumes[0] svcPvcName := pvol.Spec.CSI.VolumeHandle @@ -1468,7 +1479,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify volume is deleted in Supervisor Cluster") err = waitTillVolumeIsDeletedInSvc(svcPvcName, poll, pollTimeoutShort) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() diff --git a/tests/e2e/gc_cns_nodevm_attachment.go b/tests/e2e/gc_cns_nodevm_attachment.go index f31c1f98df..e0e6254817 100644 --- a/tests/e2e/gc_cns_nodevm_attachment.go +++ b/tests/e2e/gc_cns_nodevm_attachment.go @@ -49,18 +49,31 @@ var _ = ginkgo.Describe("[csi-guest] CnsNodeVmAttachment persistence", func() { pvclaim *v1.PersistentVolumeClaim svcPVCName string // PVC Name in the Supervisor Cluster. labels_ns map[string]string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet namespace = getNamespaceToRunTests(f) + var err error ctx, cancel := context.WithCancel(context.Background()) defer cancel() + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") } bootstrap() + + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) svcClient, svNamespace := getSvcClientAndNamespace() @@ -106,12 +119,12 @@ var _ = ginkgo.Describe("[csi-guest] CnsNodeVmAttachment persistence", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -189,19 +202,19 @@ var _ = ginkgo.Describe("[csi-guest] CnsNodeVmAttachment persistence", func() { sc1, pvc1, err := createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc1.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc1.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() sc2, pvc2, err := createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc2.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc2.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for first persistent claim %s to be in bound phase", pvc1.Name)) - pvs1, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs1, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs1).NotTo(gomega.BeEmpty()) @@ -212,7 +225,7 @@ var _ = ginkgo.Describe("[csi-guest] CnsNodeVmAttachment persistence", func() { gomega.Expect(volumeID1).NotTo(gomega.BeEmpty()) ginkgo.By(fmt.Sprintf("Waiting for second persistent claim %s to be in bound phase", pvc2.Name)) - pvs2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs2).NotTo(gomega.BeEmpty()) @@ -271,12 +284,12 @@ var _ = ginkgo.Describe("[csi-guest] CnsNodeVmAttachment persistence", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -399,7 +412,7 @@ var _ = ginkgo.Describe("[csi-guest] CnsNodeVmAttachment persistence", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -577,12 +590,12 @@ var _ = ginkgo.Describe("[csi-guest] CnsNodeVmAttachment persistence", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -680,7 +693,7 @@ var _ = ginkgo.Describe("[csi-guest] CnsNodeVmAttachment persistence", func() { var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := persistentvolumes[0] volHandle := getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) @@ -690,7 +703,7 @@ var _ = ginkgo.Describe("[csi-guest] CnsNodeVmAttachment persistence", func() { defer func() { err = client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvclaim.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() diff --git a/tests/e2e/gc_file_share_negative.go b/tests/e2e/gc_file_share_negative.go index 968ed8811e..0df2d09081 100644 --- a/tests/e2e/gc_file_share_negative.go +++ b/tests/e2e/gc_file_share_negative.go @@ -39,6 +39,7 @@ var _ = ginkgo.Describe("[csi-guest] File Share on Non File Service enabled setu namespace string scParameters map[string]string storagePolicyName string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -46,12 +47,23 @@ var _ = ginkgo.Describe("[csi-guest] File Share on Non File Service enabled setu namespace = getNamespaceToRunTests(f) ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var err error + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") } bootstrap() + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) svcClient, svNamespace := getSvcClientAndNamespace() @@ -100,7 +112,7 @@ var _ = ginkgo.Describe("[csi-guest] File Share on Non File Service enabled setu gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() diff --git a/tests/e2e/gc_full_sync.go b/tests/e2e/gc_full_sync.go index 8b4fc4db6b..76fb8917a4 100644 --- a/tests/e2e/gc_full_sync.go +++ b/tests/e2e/gc_full_sync.go @@ -50,18 +50,29 @@ var _ = ginkgo.Describe("[csi-guest] Guest cluster fullsync tests", func() { labelKey string labelValue string fullSyncWaitTime int + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet namespace = getNamespaceToRunTests(f) ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var err error nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") } bootstrap() + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) labelKey = "app" @@ -114,12 +125,12 @@ var _ = ginkgo.Describe("[csi-guest] Guest cluster fullsync tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -208,12 +219,12 @@ var _ = ginkgo.Describe("[csi-guest] Guest cluster fullsync tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) diff --git a/tests/e2e/gc_metadata_syncer.go b/tests/e2e/gc_metadata_syncer.go index cd214a540a..6e1dcd40f0 100644 --- a/tests/e2e/gc_metadata_syncer.go +++ b/tests/e2e/gc_metadata_syncer.go @@ -62,11 +62,13 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { pvc *v1.PersistentVolumeClaim isVsanHealthServiceStopped bool labels_ns map[string]string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet namespace = getNamespaceToRunTests(f) + var err error svcClient, svNamespace = getSvcClientAndNamespace() ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -80,6 +82,15 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { framework.Failf("Unable to find ready and schedulable Node") } bootstrap() + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } setResourceQuota(svcClient, svNamespace, rqLimit) scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) @@ -127,12 +138,12 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -219,12 +230,12 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -300,12 +311,12 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -399,7 +410,7 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { sc, err = client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -407,7 +418,7 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { statefulset := fss.CreateStatefulSet(ctx, client, manifestPath, namespace) defer func() { ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) if supervisorCluster { ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) err := client.CoreV1().Services(namespace).Delete(ctx, servicename, *metav1.NewDeleteOptions(0)) @@ -523,12 +534,12 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -639,14 +650,14 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { sc, pvc, err := createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -724,12 +735,12 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { sc, pvc, err := createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -859,14 +870,14 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = client.StorageV1().StorageClasses().Delete(ctx, sc2.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, sc2.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc1.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1, pvc2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -923,14 +934,14 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { sc, pvc, err := createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -1001,12 +1012,12 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -1155,7 +1166,7 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if pvclaim != nil { ginkgo.By("Delete the PVC") @@ -1168,7 +1179,7 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + _, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := getPvFromClaim(client, pvclaim.Namespace, pvclaim.Name) @@ -1220,7 +1231,7 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { crdCNSVolumeMetadatas, crdVersion, crdGroup, true, pv.Spec.CSI.VolumeHandle, false, nil, false) ginkgo.By(fmt.Sprintf("Delete the PV %s", pv.Name)) - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) verifyEntityReferenceInCRDInSupervisor(ctx, f, gcClusterID+pvUID, @@ -1378,7 +1389,7 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if pvclaim != nil { ginkgo.By("Delete the PVC") @@ -1391,7 +1402,7 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + _, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := getPvFromClaim(client, pvclaim.Namespace, pvclaim.Name) @@ -1445,7 +1456,7 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { crdCNSVolumeMetadatas, crdVersion, crdGroup, true, pv.Spec.CSI.VolumeHandle, false, nil, false) ginkgo.By(fmt.Sprintf("Delete the PV %s", pv.Name)) - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) verifyEntityReferenceInCRDInSupervisor(ctx, f, gcClusterID+pvUID, @@ -1493,7 +1504,7 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { crdCNSVolumeMetadatas, crdVersion, crdGroup, true, gcClusterID+pvNewUID, false, nil, false) ginkgo.By(fmt.Sprintf("Delete the PV %s", pvNew.Name)) - err = client.CoreV1().PersistentVolumes().Delete(ctx, pvNew.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pvNew.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) verifyEntityReferenceInCRDInSupervisor(ctx, f, gcClusterID+pvNewUID, crdCNSVolumeMetadatas, crdVersion, crdGroup, false, gcClusterID+pvNewUID, false, nil, false) diff --git a/tests/e2e/gc_rwx_basic.go b/tests/e2e/gc_rwx_basic.go index b01e855a66..ceeda8a90c 100644 --- a/tests/e2e/gc_rwx_basic.go +++ b/tests/e2e/gc_rwx_basic.go @@ -44,6 +44,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Basic File Volume Provision Test", ginkgo namespace string scParameters map[string]string storagePolicyName string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -52,8 +53,19 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Basic File Volume Provision Test", ginkgo scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) bootstrap() + var err error ctx, cancel := context.WithCancel(context.Background()) defer cancel() + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -97,12 +109,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Basic File Volume Provision Test", ginkgo gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -204,12 +216,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Basic File Volume Provision Test", ginkgo gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -351,12 +363,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Basic File Volume Provision Test", ginkgo gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") diff --git a/tests/e2e/gc_rwx_deployments.go b/tests/e2e/gc_rwx_deployments.go index 0f848ad13f..5ddfe228e3 100644 --- a/tests/e2e/gc_rwx_deployments.go +++ b/tests/e2e/gc_rwx_deployments.go @@ -46,6 +46,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Deployments", scParameters map[string]string storagePolicyName string volHealthCheck bool + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -60,6 +61,16 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Deployments", bootstrap() ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -118,7 +129,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Deployments", gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -131,7 +142,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Deployments", gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim, pvc2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -226,7 +237,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Deployments", defer func() { framework.Logf("Delete deployment set") - err := client.AppsV1().Deployments(namespace).Delete(ctx, dep.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.AppsV1().Deployments(namespace).Delete(ctx, dep.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() diff --git a/tests/e2e/gc_rwx_destructive.go b/tests/e2e/gc_rwx_destructive.go index 7c7cded116..f5b3a51921 100644 --- a/tests/e2e/gc_rwx_destructive.go +++ b/tests/e2e/gc_rwx_destructive.go @@ -60,6 +60,8 @@ var _ = ginkgo.Describe("[rwm-csi-destructive-tkg] Statefulsets with File Volume svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, rqLimit) bootstrap() + var err error + ctx, cancel := context.WithCancel(context.Background()) defer cancel() nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) diff --git a/tests/e2e/gc_rwx_multi_gc.go b/tests/e2e/gc_rwx_multi_gc.go index 32db26ff48..9d6de13885 100644 --- a/tests/e2e/gc_rwx_multi_gc.go +++ b/tests/e2e/gc_rwx_multi_gc.go @@ -57,6 +57,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across TKG clusters", gi // TODO: Read value from command line volHealthCheck = false namespace = getNamespaceToRunTests(f) + scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) svcClient, svNamespace := getSvcClientAndNamespace() @@ -151,7 +152,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across TKG clusters", gi }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -418,7 +419,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across TKG clusters", gi }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -654,7 +655,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across TKG clusters", gi }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") diff --git a/tests/e2e/gc_rwx_multi_ns_gc.go b/tests/e2e/gc_rwx_multi_ns_gc.go index fbc8ca26a1..66778b7531 100644 --- a/tests/e2e/gc_rwx_multi_ns_gc.go +++ b/tests/e2e/gc_rwx_multi_ns_gc.go @@ -47,12 +47,15 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across Namespace", ginkg storagePolicyName string volHealthCheck bool labels_ns map[string]string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet // TODO: Read value from command line volHealthCheck = false namespace = getNamespaceToRunTests(f) + var err error + scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) svcClient, svNamespace := getSvcClientAndNamespace() @@ -60,6 +63,15 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across Namespace", ginkg bootstrap() ctx, cancel := context.WithCancel(context.Background()) defer cancel() + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -148,7 +160,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across Namespace", ginkg framework.Logf("PVC UUID in GC %q", pvcUID) ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -194,14 +206,14 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across Namespace", ginkg // Changing the reclaim policy of the pv to retain. ginkgo.By("Changing the volume reclaim policy") persistentvolumes[0].Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimRetain - pv, err = client.CoreV1().PersistentVolumes().Update(ctx, persistentvolumes[0], + pv, err = adminClient.CoreV1().PersistentVolumes().Update(ctx, persistentvolumes[0], metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if pv != nil { ginkgo.By("Deleting the PV1") - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -226,13 +238,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across Namespace", ginkg ginkgo.By("Creating the PV in guest cluster") pv2 := getPersistentVolumeSpecForRWX(pvcNameInSV, v1.PersistentVolumeReclaimDelete, staticPVLabels, diskSize, "", v1.ReadWriteMany) - pv2, err = client.CoreV1().PersistentVolumes().Create(ctx, pv2, metav1.CreateOptions{}) + pv2, err = adminClient.CoreV1().PersistentVolumes().Create(ctx, pv2, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if pv2 != nil { ginkgo.By("Deleting the PV2") - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv2.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv2.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -268,7 +280,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across Namespace", ginkg verifyCRDInSupervisorWithWait(ctx, f, pvcNameInSV, crdCNSVolumeMetadatas, crdVersion, crdGroup, false) ginkgo.By("Deleting the PV1") - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv = nil }() diff --git a/tests/e2e/gc_rwx_non_vsan_datastore.go b/tests/e2e/gc_rwx_non_vsan_datastore.go index b557f9c689..d6b0b3797d 100644 --- a/tests/e2e/gc_rwx_non_vsan_datastore.go +++ b/tests/e2e/gc_rwx_non_vsan_datastore.go @@ -39,6 +39,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Non-VSAN datas namespace string scParameters map[string]string nonVsanStoragePolicyName string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -47,8 +48,18 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Non-VSAN datas scParameters = make(map[string]string) nonVsanStoragePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForNonSharedDatastores) bootstrap() + var err error ctx, cancel := context.WithCancel(context.Background()) defer cancel() + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -86,7 +97,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Non-VSAN datas gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -122,7 +133,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Non-VSAN datas gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() diff --git a/tests/e2e/gc_rwx_operation_storm.go b/tests/e2e/gc_rwx_operation_storm.go index b29b772397..745ee5ba73 100644 --- a/tests/e2e/gc_rwx_operation_storm.go +++ b/tests/e2e/gc_rwx_operation_storm.go @@ -50,14 +50,27 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Operation storm Test", ginkgo podArray []*v1.Pod scParameters map[string]string storagePolicyName string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet // TODO: Read value from command line volHealthCheck = false + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() volumeOpsScale = 5 namespace = getNamespaceToRunTests(f) + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) svcClient, svNamespace := getSvcClientAndNamespace() @@ -65,8 +78,6 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Operation storm Test", ginkgo pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale) podArray = make([]*v1.Pod, volumeOpsScale) bootstrap() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -122,12 +133,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Operation storm Test", ginkgo gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -293,7 +304,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Operation storm Test", ginkgo ginkgo.By("Delete all the pods concurrently") for _, podToDelete := range pods { - err = client.CoreV1().Pods(namespace).Delete(ctx, podToDelete.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().Pods(namespace).Delete(ctx, podToDelete.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -354,7 +365,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Operation storm Test", ginkgo gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -380,7 +391,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Operation storm Test", ginkgo for index, claim := range pvclaims { framework.Logf("Waiting for all claims %s to be in bound state - PVC number %d", claim.Name, index) - pv, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{claim}, + pv, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{claim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) persistentvolumes[index] = pv[0] diff --git a/tests/e2e/gc_rwx_parallel_claim.go b/tests/e2e/gc_rwx_parallel_claim.go index baddf8e11d..63d0808b22 100644 --- a/tests/e2e/gc_rwx_parallel_claim.go +++ b/tests/e2e/gc_rwx_parallel_claim.go @@ -40,15 +40,26 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] PVCs claiming the available resource in p storagePolicyName string svcClient clientset.Interface svcNamespace string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet namespace = getNamespaceToRunTests(f) + var err error + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) bootstrap() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -92,9 +103,9 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] PVCs claiming the available resource in p setResourceQuota(svcClient, svcNamespace, rqLimit) }() - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if err == nil && storageclass != nil { - gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0))). + gomega.Expect(adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0))). NotTo(gomega.HaveOccurred()) } @@ -119,12 +130,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] PVCs claiming the available resource in p }() framework.Logf("Waiting for claims %s to be in bound state", pvclaims[0].Name) - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaims[0]}, + _, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaims[0]}, healthStatusWaitTime) gomega.Expect(err).To(gomega.HaveOccurred()) framework.Logf("Waiting for claims %s to be in bound state", pvclaims[1].Name) - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaims[1]}, + _, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaims[1]}, healthStatusWaitTime) gomega.Expect(err).To(gomega.HaveOccurred()) }) diff --git a/tests/e2e/gc_rwx_readonly.go b/tests/e2e/gc_rwx_readonly.go index fa526309c4..d76394150f 100644 --- a/tests/e2e/gc_rwx_readonly.go +++ b/tests/e2e/gc_rwx_readonly.go @@ -44,15 +44,26 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", ginkg namespace string scParameters map[string]string storagePolicyName string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet namespace = getNamespaceToRunTests(f) scParameters = make(map[string]string) - storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) - bootstrap() ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) + bootstrap() nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -99,12 +110,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", ginkg gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -215,7 +226,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", ginkg gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -225,12 +236,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", ginkg gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc2.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc2.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim, pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -391,12 +402,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", ginkg gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -568,12 +579,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", ginkg gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -651,7 +662,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", ginkg gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv2.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv2.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -678,12 +689,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", ginkg gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc3.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc3.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes3, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim3}, + persistentvolumes3, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim3}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") diff --git a/tests/e2e/gc_rwx_reclaim_policy.go b/tests/e2e/gc_rwx_reclaim_policy.go index 53ce0d9a47..576bab054b 100644 --- a/tests/e2e/gc_rwx_reclaim_policy.go +++ b/tests/e2e/gc_rwx_reclaim_policy.go @@ -47,19 +47,30 @@ var _ = ginkgo.Describe("File Volume Test for Reclaim Policy", ginkgo.Label(p1, scParameters map[string]string storagePolicyName string volHealthCheck bool + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet // TODO: Read value from command line volHealthCheck = false namespace = getNamespaceToRunTests(f) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } svcClient, svNamespace := getSvcClientAndNamespace() scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) setResourceQuota(svcClient, svNamespace, rqLimit) bootstrap() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -135,12 +146,12 @@ var _ = ginkgo.Describe("File Volume Test for Reclaim Policy", ginkgo.Label(p1, framework.Logf("PVC UUID in GC %q", pvcUID) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -267,13 +278,13 @@ var _ = ginkgo.Describe("File Volume Test for Reclaim Policy", ginkgo.Label(p1, defer func() { if pv != nil { ginkgo.By("Deleting the PV1") - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if pv2 != nil { ginkgo.By("Deleting the PV2") - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv2.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv2.Name, *metav1.NewDeleteOptions(0)) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -287,7 +298,7 @@ var _ = ginkgo.Describe("File Volume Test for Reclaim Policy", ginkgo.Label(p1, defer func() { ginkgo.By("Deleting the PV1") - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv = nil @@ -404,12 +415,12 @@ var _ = ginkgo.Describe("File Volume Test for Reclaim Policy", ginkgo.Label(p1, gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -441,7 +452,7 @@ var _ = ginkgo.Describe("File Volume Test for Reclaim Policy", ginkgo.Label(p1, defer func() { ginkgo.By("Deleting the PV2") - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv2.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv2.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -527,12 +538,12 @@ var _ = ginkgo.Describe("File Volume Test for Reclaim Policy", ginkgo.Label(p1, framework.Logf("PVC UUID in GC %q", pvcUID) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -592,7 +603,7 @@ var _ = ginkgo.Describe("File Volume Test for Reclaim Policy", ginkgo.Label(p1, defer func() { ginkgo.By("Deleting the PV2") - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv2.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv2.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() diff --git a/tests/e2e/gc_rwx_security_context.go b/tests/e2e/gc_rwx_security_context.go index 8c2ae1f3ea..f8aeb17e98 100644 --- a/tests/e2e/gc_rwx_security_context.go +++ b/tests/e2e/gc_rwx_security_context.go @@ -44,6 +44,7 @@ var _ = ginkgo.Describe("File Volume Test with security context", ginkgo.Label(p scParameters map[string]string storagePolicyName string volHealthCheck bool + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -51,13 +52,23 @@ var _ = ginkgo.Describe("File Volume Test with security context", ginkgo.Label(p // TODO: Read value from command line volHealthCheck = false namespace = getNamespaceToRunTests(f) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } svcClient, svNamespace := getSvcClientAndNamespace() scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) setResourceQuota(svcClient, svNamespace, rqLimit) bootstrap() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -120,12 +131,12 @@ var _ = ginkgo.Describe("File Volume Test with security context", ginkgo.Label(p gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -320,12 +331,12 @@ var _ = ginkgo.Describe("File Volume Test with security context", ginkgo.Label(p gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -500,7 +511,7 @@ var _ = ginkgo.Describe("File Volume Test with security context", ginkgo.Label(p gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -563,7 +574,7 @@ var _ = ginkgo.Describe("File Volume Test with security context", ginkgo.Label(p gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") diff --git a/tests/e2e/gc_rwx_service_down.go b/tests/e2e/gc_rwx_service_down.go index 912b6e12a6..9873fed120 100644 --- a/tests/e2e/gc_rwx_service_down.go +++ b/tests/e2e/gc_rwx_service_down.go @@ -46,6 +46,7 @@ var _ = ginkgo.Describe("File Volume Test on Service down", ginkgo.Label(p1, tkg volHealthCheck bool isVsanHealthServiceStopped bool fullSyncWaitTime int + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -54,13 +55,23 @@ var _ = ginkgo.Describe("File Volume Test on Service down", ginkgo.Label(p1, tkg volHealthCheck = false isVsanHealthServiceStopped = false namespace = getNamespaceToRunTests(f) + var err error + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } svcClient, svNamespace := getSvcClientAndNamespace() scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) setResourceQuota(svcClient, svNamespace, rqLimit) bootstrap() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -128,7 +139,7 @@ var _ = ginkgo.Describe("File Volume Test on Service down", ginkgo.Label(p1, tkg gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -167,7 +178,7 @@ var _ = ginkgo.Describe("File Volume Test on Service down", ginkgo.Label(p1, tkg ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", vsanhealthServiceName)) startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) - pvArray, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, + pvArray, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -289,12 +300,12 @@ var _ = ginkgo.Describe("File Volume Test on Service down", ginkgo.Label(p1, tkg framework.Logf("PVC UUID in GC %q", pvcUID) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -521,12 +532,12 @@ var _ = ginkgo.Describe("File Volume Test on Service down", ginkgo.Label(p1, tkg framework.Logf("PVC UUID in GC %q", pvcUID) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -680,12 +691,12 @@ var _ = ginkgo.Describe("File Volume Test on Service down", ginkgo.Label(p1, tkg gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -779,7 +790,7 @@ var _ = ginkgo.Describe("File Volume Test on Service down", ginkgo.Label(p1, tkg defer func() { if pv2 != nil { ginkgo.By("Deleting the PV2") - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv2.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv2.Name, *metav1.NewDeleteOptions(0)) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -896,12 +907,12 @@ var _ = ginkgo.Describe("File Volume Test on Service down", ginkgo.Label(p1, tkg gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") diff --git a/tests/e2e/gc_rwx_statefulsets.go b/tests/e2e/gc_rwx_statefulsets.go index d00589cee5..a425c7b319 100644 --- a/tests/e2e/gc_rwx_statefulsets.go +++ b/tests/e2e/gc_rwx_statefulsets.go @@ -47,6 +47,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Statefulsets", isSTSDeleted bool isServiceDeleted bool missingPodAndVolume map[string]string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet @@ -55,13 +56,23 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Statefulsets", isSTSDeleted = false isServiceDeleted = false namespace = getNamespaceToRunTests(f) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, rqLimit) bootstrap() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -74,7 +85,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Statefulsets", defer cancel() if !isSTSDeleted { ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) } if !isServiceDeleted { @@ -123,11 +134,11 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Statefulsets", ginkgo.By("Creating StorageClass for Statefulset") scParameters[svStorageClassName] = storagePolicyName scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, scParameters, nil, "", "", false) - sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -362,7 +373,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Statefulsets", "Number of Pods in the statefulset should match with number of replicas") ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) isSTSDeleted = true ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) @@ -422,11 +433,11 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Statefulsets", ginkgo.By("Creating StorageClass for Statefulset") scParameters[svStorageClassName] = storagePolicyName scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, scParameters, nil, "", "", false) - sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -661,7 +672,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Statefulsets", "Number of Pods in the statefulset should match with number of replicas") ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) isSTSDeleted = true ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) diff --git a/tests/e2e/gc_rwx_static_provision.go b/tests/e2e/gc_rwx_static_provision.go index a02946c74d..cf65b958e3 100644 --- a/tests/e2e/gc_rwx_static_provision.go +++ b/tests/e2e/gc_rwx_static_provision.go @@ -44,15 +44,27 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume static Provision Test", ginkg namespace string scParameters map[string]string storagePolicyName string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet namespace = getNamespaceToRunTests(f) + scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) - bootstrap() ctx, cancel := context.WithCancel(context.Background()) defer cancel() + bootstrap() + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -106,12 +118,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume static Provision Test", ginkg gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -286,12 +298,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume static Provision Test", ginkg gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") diff --git a/tests/e2e/gc_rwx_syncer.go b/tests/e2e/gc_rwx_syncer.go index 363218d0dc..cbefa6e4ae 100644 --- a/tests/e2e/gc_rwx_syncer.go +++ b/tests/e2e/gc_rwx_syncer.go @@ -46,6 +46,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for label updates", gink storagePolicyName string volHealthCheck bool isVsanHealthServiceStopped bool + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -54,13 +55,23 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for label updates", gink volHealthCheck = false isVsanHealthServiceStopped = false namespace = getNamespaceToRunTests(f) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } svcClient, svNamespace := getSvcClientAndNamespace() scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) setResourceQuota(svcClient, svNamespace, rqLimit) bootstrap() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -119,12 +130,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for label updates", gink gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -299,12 +310,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for label updates", gink framework.Logf("PVC UUID in GC %q", pvcUID) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -364,7 +375,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for label updates", gink defer func() { ginkgo.By("Deleting the PV2") - err = client.CoreV1().PersistentVolumes().Delete(ctx, pv2.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.CoreV1().PersistentVolumes().Delete(ctx, pv2.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -636,12 +647,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for label updates", gink framework.Logf("PVC UUID in GC %q", pvcUID) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -802,12 +813,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for label updates", gink gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") diff --git a/tests/e2e/gc_rwx_tkg_scale.go b/tests/e2e/gc_rwx_tkg_scale.go index 5ef6c8d0b3..25a6e4fa87 100644 --- a/tests/e2e/gc_rwx_tkg_scale.go +++ b/tests/e2e/gc_rwx_tkg_scale.go @@ -44,6 +44,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] TKG RWX for STS with GC worker nodes scal volHealthCheck bool isSTSDeleted bool isServiceDeleted bool + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -53,13 +54,23 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] TKG RWX for STS with GC worker nodes scal isSTSDeleted = false isServiceDeleted = false namespace = getNamespaceToRunTests(f) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, rqLimit) bootstrap() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -72,7 +83,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] TKG RWX for STS with GC worker nodes scal defer cancel() if !isSTSDeleted { ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) } if !isServiceDeleted { ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) @@ -119,11 +130,11 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] TKG RWX for STS with GC worker nodes scal ginkgo.By("Creating StorageClass for Statefulset") scParameters[svStorageClassName] = storagePolicyName scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, scParameters, nil, "", "", false) - sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -150,7 +161,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] TKG RWX for STS with GC worker nodes scal defer func() { if !isSTSDeleted { ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) isSTSDeleted = true } }() @@ -307,7 +318,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] TKG RWX for STS with GC worker nodes scal } ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) isSTSDeleted = true ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) @@ -366,11 +377,11 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] TKG RWX for STS with GC worker nodes scal ginkgo.By("Creating StorageClass for Statefulset") scParameters[svStorageClassName] = storagePolicyName scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, scParameters, nil, "", "", false) - sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -396,7 +407,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] TKG RWX for STS with GC worker nodes scal defer func() { if !isSTSDeleted { ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) isSTSDeleted = true } }() @@ -552,7 +563,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] TKG RWX for STS with GC worker nodes scal } ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) isSTSDeleted = true ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) diff --git a/tests/e2e/gc_rwx_volume_health.go b/tests/e2e/gc_rwx_volume_health.go index bde1d0bb72..bda9e54861 100644 --- a/tests/e2e/gc_rwx_volume_health.go +++ b/tests/e2e/gc_rwx_volume_health.go @@ -50,6 +50,7 @@ var _ = ginkgo.Describe("File Volume Test volume health plumbing", ginkgo.Label( volHealthCheck = true isVsanHealthServiceStopped = false namespace = getNamespaceToRunTests(f) + svcClient, svNamespace := getSvcClientAndNamespace() scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) @@ -116,7 +117,7 @@ var _ = ginkgo.Describe("File Volume Test volume health plumbing", ginkgo.Label( }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") @@ -253,7 +254,7 @@ var _ = ginkgo.Describe("File Volume Test volume health plumbing", ginkgo.Label( }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") diff --git a/tests/e2e/improved_csi_idempotency.go b/tests/e2e/improved_csi_idempotency.go index 5b9f9c7add..84947e0aa3 100644 --- a/tests/e2e/improved_csi_idempotency.go +++ b/tests/e2e/improved_csi_idempotency.go @@ -58,18 +58,30 @@ var _ = ginkgo.Describe("Improved CSI Idempotency Tests", func() { serviceName string csiReplicaCount int32 deployment *appsv1.Deployment + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { bootstrap() client = f.ClientSet namespace = getNamespaceToRunTests(f) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var err error + var nodeList *v1.NodeList + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + scParameters = make(map[string]string) isServiceStopped = false storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -113,7 +125,8 @@ var _ = ginkgo.Describe("Improved CSI Idempotency Tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) c = remoteC } - deployment, err = c.AppsV1().Deployments(csiSystemNamespace).Get(ctx, + + deployment, err = adminClient.AppsV1().Deployments(csiSystemNamespace).Get(ctx, vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) csiReplicaCount = *deployment.Spec.Replicas @@ -153,10 +166,14 @@ var _ = ginkgo.Describe("Improved CSI Idempotency Tests", func() { } ginkgo.By(fmt.Sprintf("Resetting provisioner time interval to %s sec", defaultProvisionerTimeInSec)) - updateCSIDeploymentProvisionerTimeout(c, csiSystemNamespace, defaultProvisionerTimeInSec) + if vanillaCluster { + updateCSIDeploymentProvisionerTimeout(c, csiSystemNamespace, defaultProvisionerTimeInSec) + } else { + updateCSIDeploymentProvisionerTimeout(adminClient, csiSystemNamespace, defaultProvisionerTimeInSec) + } if supervisorCluster { - deleteResourceQuota(client, namespace) + deleteResourceQuota(adminClient, namespace) dumpSvcNsEventsOnTestFailure(client, namespace) } if guestCluster { @@ -180,7 +197,7 @@ var _ = ginkgo.Describe("Improved CSI Idempotency Tests", func() { ginkgo.It("[csi-block-vanilla] [csi-file-vanilla][csi-guest] [csi-supervisor] "+ "Reduce external provisioner timeout and create volumes - idempotency", ginkgo.Label(p0, disruptive, block, file, windows, wcp, tkg, vanilla, vc70), func() { - createVolumesByReducingProvisionerTime(namespace, client, storagePolicyName, scParameters, + createVolumesByReducingProvisionerTime(namespace, client, adminClient, storagePolicyName, scParameters, volumeOpsScale, shortProvisionerTimeout, c) }) @@ -296,7 +313,7 @@ var _ = ginkgo.Describe("Improved CSI Idempotency Tests", func() { "extend volume when csi restarts - idempotency", ginkgo.Label(p0, disruptive, block, file, windows, wcp, tkg, vanilla, vc70), func() { serviceName = "CSI" - extendVolumeWithServiceDown(serviceName, namespace, client, storagePolicyName, scParameters, + extendVolumeWithServiceDown(serviceName, namespace, client, adminClient, storagePolicyName, scParameters, volumeOpsScale, true, isServiceStopped, c) }) @@ -315,7 +332,7 @@ var _ = ginkgo.Describe("Improved CSI Idempotency Tests", func() { "extend volume when CNS goes down - idempotency", ginkgo.Label(p0, disruptive, block, file, windows, wcp, tkg, vanilla, vc70), func() { serviceName = vsanhealthServiceName - extendVolumeWithServiceDown(serviceName, namespace, client, storagePolicyName, scParameters, + extendVolumeWithServiceDown(serviceName, namespace, client, adminClient, storagePolicyName, scParameters, volumeOpsScale, true, isServiceStopped, c) }) @@ -339,7 +356,7 @@ var _ = ginkgo.Describe("Improved CSI Idempotency Tests", func() { }) // createVolumesByReducingProvisionerTime creates the volumes by reducing the provisioner timeout -func createVolumesByReducingProvisionerTime(namespace string, client clientset.Interface, storagePolicyName string, +func createVolumesByReducingProvisionerTime(namespace string, client clientset.Interface, adminClient clientset.Interface, storagePolicyName string, scParameters map[string]string, volumeOpsScale int, customProvisionerTimeout string, c clientset.Interface) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -350,7 +367,6 @@ func createVolumesByReducingProvisionerTime(namespace string, client clientset.I var pvclaims []*v1.PersistentVolumeClaim var err error pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale) - // Decide which test setup is available to run if vanillaCluster { ginkgo.By("CNS_TEST: Running for vanilla k8s setup") @@ -376,7 +392,6 @@ func createVolumesByReducingProvisionerTime(namespace string, client clientset.I profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimit, thickProvPolicy) storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, thickProvPolicy) } else { ginkgo.By("CNS_TEST: Running for GC setup") @@ -384,9 +399,8 @@ func createVolumesByReducingProvisionerTime(namespace string, client clientset.I if thickProvPolicy == "" { ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") } - createResourceQuota(client, namespace, rqLimit, thickProvPolicy) scParameters[svStorageClassName] = thickProvPolicy - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -402,15 +416,14 @@ func createVolumesByReducingProvisionerTime(namespace string, client clientset.I }() // TODO: Stop printing csi logs on the console - collectPodLogs(ctx, c, csiSystemNamespace) + collectPodLogs(ctx, adminClient, csiSystemNamespace) // This assumes the tkg-controller-manager's auto sync is disabled ginkgo.By(fmt.Sprintf("Reducing Provisioner time interval to %s Sec for the test...", customProvisionerTimeout)) - updateCSIDeploymentProvisionerTimeout(c, csiSystemNamespace, customProvisionerTimeout) - + updateCSIDeploymentProvisionerTimeout(adminClient, csiSystemNamespace, customProvisionerTimeout) defer func() { ginkgo.By(fmt.Sprintf("Resetting provisioner time interval to %s sec", defaultProvisionerTimeInSec)) - updateCSIDeploymentProvisionerTimeout(c, csiSystemNamespace, defaultProvisionerTimeInSec) + updateCSIDeploymentProvisionerTimeout(adminClient, csiSystemNamespace, defaultProvisionerTimeInSec) }() ginkgo.By("Creating PVCs using the Storage Class") @@ -428,7 +441,7 @@ func createVolumesByReducingProvisionerTime(namespace string, client clientset.I } ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + persistentvolumes, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, 2*framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -440,7 +453,7 @@ func createVolumesByReducingProvisionerTime(namespace string, client clientset.I } ginkgo.By("Verify PVs, volumes are deleted from CNS") for _, pv := range persistentvolumes { - err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, + err := fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, framework.Poll, framework.PodDeleteTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volumeID := pv.Spec.CSI.VolumeHandle @@ -467,6 +480,8 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl var fullSyncWaitTime int pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale) + adminClient, _ := initializeClusterClientsByUserRoles(client) + // Decide which test setup is available to run if vanillaCluster { ginkgo.By("CNS_TEST: Running for vanilla k8s setup") @@ -492,7 +507,6 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) scParameters[scParamStoragePolicyID] = profileID // create resource quota - //createResourceQuota(client, namespace, rqLimit, thickProvPolicy) restConfig = getRestConfigClient() setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimit) storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, thickProvPolicy) @@ -502,9 +516,8 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl if thickProvPolicy == "" { ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") } - createResourceQuota(client, namespace, rqLimit, thickProvPolicy) scParameters[svStorageClassName] = thickProvPolicy - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -545,24 +558,30 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl if serviceName == "CSI" { // Get CSI Controller's replica count from the setup - deployment, err := c.AppsV1().Deployments(csiSystemNamespace).Get(ctx, - vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) + var deployment *appsv1.Deployment + if vanillaCluster { + deployment, err = c.AppsV1().Deployments(csiSystemNamespace).Get(ctx, + vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) + } else { + deployment, err = adminClient.AppsV1().Deployments(csiSystemNamespace).Get(ctx, + vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) csiReplicaCount := *deployment.Spec.Replicas ginkgo.By("Stopping CSI driver") - isServiceStopped, err = stopCSIPods(ctx, c, csiSystemNamespace) + isServiceStopped, err = stopCSIPods(ctx, adminClient, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if isServiceStopped { framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount, csiSystemNamespace) + isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicaCount, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount, csiSystemNamespace) + isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicaCount, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if os.Getenv(envFullSyncWaitTime) != "" { @@ -610,19 +629,19 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl isServiceStopped = false } else if serviceName == "storage-quota-webhook" { // Get CSI Controller's replica count from the setup - deployment, err := c.AppsV1().Deployments(kubeSystemNamespace).Get(ctx, + deployment, err := adminClient.AppsV1().Deployments(kubeSystemNamespace).Get(ctx, storageQuotaWebhookPrefix, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) csiReplicaCount := *deployment.Spec.Replicas ginkgo.By("Stopping webhook driver") - isServiceStopped, err = stopStorageQuotaWebhookPodInKubeSystem(ctx, c, kubeSystemNamespace) + isServiceStopped, err = stopStorageQuotaWebhookPodInKubeSystem(ctx, adminClient, kubeSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if isServiceStopped { framework.Logf("Starting storage-quota-webhook driver") - isServiceStopped, err = startStorageQuotaWebhookPodInKubeSystem(ctx, c, csiReplicaCount, kubeSystemNamespace) + isServiceStopped, err = startStorageQuotaWebhookPodInKubeSystem(ctx, adminClient, csiReplicaCount, kubeSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -680,7 +699,7 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl bootstrap() ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + persistentvolumes, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, 2*framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -692,7 +711,7 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl } ginkgo.By("Verify PVs, volumes are deleted from CNS") for _, pv := range persistentvolumes { - err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, + err = fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, framework.Poll, framework.PodDeleteTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volumeID := pv.Spec.CSI.VolumeHandle @@ -718,7 +737,7 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl // extendVolumeWithServiceDown extends the volumes and immediately stops the service and wait for // the service to be up again and validates the volumes are bound -func extendVolumeWithServiceDown(serviceName string, namespace string, client clientset.Interface, +func extendVolumeWithServiceDown(serviceName string, namespace string, client clientset.Interface, adminClient clientset.Interface, storagePolicyName string, scParameters map[string]string, volumeOpsScale int, extendVolume bool, isServiceStopped bool, c clientset.Interface) { @@ -767,20 +786,19 @@ func extendVolumeWithServiceDown(serviceName string, namespace string, client cl if thickProvPolicy == "" { ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") } - createResourceQuota(client, namespace, rqLimit, thickProvPolicy) scParameters[svStorageClassName] = thickProvPolicy if windowsEnv { scParameters[scParamFsType] = ntfsFSType } else { scParameters[scParamFsType] = ext4FSType } - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } var allowExpansion = true storageclass.AllowVolumeExpansion = &allowExpansion - storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -812,7 +830,7 @@ func extendVolumeWithServiceDown(serviceName string, namespace string, client cl } ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + persistentvolumes, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, 2*framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -824,8 +842,13 @@ func extendVolumeWithServiceDown(serviceName string, namespace string, client cl } ginkgo.By("Verify PVs, volumes are deleted from CNS") for _, pv := range persistentvolumes { - err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, - framework.PodDeleteTimeout) + if vanillaCluster { + err = fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, + framework.PodDeleteTimeout) + } else { + err = fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, framework.Poll, + framework.PodDeleteTimeout) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) volumeID := pv.Spec.CSI.VolumeHandle err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) @@ -874,13 +897,13 @@ func extendVolumeWithServiceDown(serviceName string, namespace string, client cl if serviceName == "CSI" { // Get CSI Controller's replica count from the setup - deployment, err := c.AppsV1().Deployments(csiSystemNamespace).Get(ctx, + deployment, err := adminClient.AppsV1().Deployments(csiSystemNamespace).Get(ctx, vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) csiReplicaCount := *deployment.Spec.Replicas ginkgo.By("Stopping CSI driver") - isServiceStopped, err = stopCSIPods(ctx, c, csiSystemNamespace) + isServiceStopped, err = stopCSIPods(ctx, adminClient, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -892,7 +915,11 @@ func extendVolumeWithServiceDown(serviceName string, namespace string, client cl }() framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount, csiSystemNamespace) + if vanillaCluster { + isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount, csiSystemNamespace) + } else { + isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicaCount, csiSystemNamespace) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) if os.Getenv(envFullSyncWaitTime) != "" { diff --git a/tests/e2e/labelupdates.go b/tests/e2e/labelupdates.go index e9e95d9e9f..141948721a 100644 --- a/tests/e2e/labelupdates.go +++ b/tests/e2e/labelupdates.go @@ -79,6 +79,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize storagePolicyName string scParameters map[string]string storageClassName string + adminClient clientset.Interface ) const ( fcdName = "BasicStaticFCD" @@ -87,8 +88,19 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize ctx, cancel := context.WithCancel(context.Background()) defer cancel() client = f.ClientSet + var err error + var nodeList *v1.NodeList + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } namespace = getNamespaceToRunTests(f) - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") @@ -114,7 +126,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize ginkgo.AfterEach(func() { if supervisorCluster { - deleteResourceQuota(client, namespace) + deleteResourceQuota(adminClient, namespace) dumpSvcNsEventsOnTestFailure(client, namespace) } if guestCluster { @@ -155,7 +167,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -180,7 +192,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize ginkgo.By(fmt.Sprintf("Updating labels %+v for pv %s", labels, pv.Name)) pv.Labels = labels - _, err = client.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) + _, err = adminClient.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Waiting for labels %+v to be updated for pvc %s in namespace %s", @@ -216,7 +228,6 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimit, storagePolicyName) sc, pvc, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", true, "", storagePolicyName) } @@ -231,7 +242,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -245,7 +256,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize ginkgo.By(fmt.Sprintf("Updating labels %+v for pv %s", labels, pv.Name)) pv.Labels = labels - _, err = client.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) + _, err = adminClient.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Waiting for labels %+v to be updated for pv %s", labels, pv.Name)) @@ -269,12 +280,12 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Fetching updated pv %s", pv.Name)) - pv, err = client.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) + pv, err = adminClient.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Deleting labels %+v for pv %s", labels, pv.Name)) pv.Labels = make(map[string]string) - _, err = client.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) + _, err = adminClient.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Waiting for labels %+v to be deleted for pv %s", labels, pv.Name)) @@ -301,7 +312,6 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimit, storagePolicyName) sc, pvc, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", true, "", storagePolicyName) } @@ -316,7 +326,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -680,8 +690,6 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize } else { storageClassName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) ginkgo.By("CNS_TEST: Running for WCP setup") - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) - scParameters[scParamStoragePolicyID] = profileID } ginkgo.By("Creating service") @@ -697,7 +705,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize CreateStatefulSet(namespace, statefulset, client) defer func() { ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() replicas := *(statefulset.Spec.Replicas) // Waiting for pods status to be Ready @@ -740,10 +748,10 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize } ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas+2)) - _, scaleupErr := fss.Scale(ctx, f.ClientSet, statefulset, replicas+2) + _, scaleupErr := fss.Scale(ctx, client, statefulset, replicas+2) gomega.Expect(scaleupErr).NotTo(gomega.HaveOccurred()) - fss.WaitForStatusReplicas(ctx, f.ClientSet, statefulset, replicas+2) - fss.WaitForStatusReadyReplicas(ctx, f.ClientSet, statefulset, replicas+2) + fss.WaitForStatusReplicas(ctx, client, statefulset, replicas+2) + fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas+2) pvlabels := make(map[string]string) pvlabels[pvlabelKey] = pvlabelValue @@ -758,7 +766,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize ginkgo.By(fmt.Sprintf("Updating labels %+v for pv %s", pvlabels, pv.Name)) pv.Labels = pvlabels - _, err = client.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) + _, err = adminClient.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Waiting for labels %+v to be updated for pv %s", pvlabels, pv.Name)) @@ -771,10 +779,10 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize } ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", 0)) - _, scaledownErr := fss.Scale(ctx, f.ClientSet, statefulset, 0) + _, scaledownErr := fss.Scale(ctx, client, statefulset, 0) gomega.Expect(scaledownErr).NotTo(gomega.HaveOccurred()) - fss.WaitForStatusReadyReplicas(ctx, f.ClientSet, statefulset, 0) - ssPodsAfterScaleDown, err := fss.GetPodList(ctx, f.ClientSet, statefulset) + fss.WaitForStatusReadyReplicas(ctx, client, statefulset, 0) + ssPodsAfterScaleDown, err := fss.GetPodList(ctx, client, statefulset) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(0)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") diff --git a/tests/e2e/mgmt_wrkld_domain_isolation.go b/tests/e2e/mgmt_wrkld_domain_isolation.go index 8cc443c023..0e0a195a9a 100644 --- a/tests/e2e/mgmt_wrkld_domain_isolation.go +++ b/tests/e2e/mgmt_wrkld_domain_isolation.go @@ -47,7 +47,6 @@ import ( fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" - fss "k8s.io/kubernetes/test/e2e/framework/statefulset" admissionapi "k8s.io/pod-security-admission/api" snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" @@ -94,6 +93,8 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol vmClass string cnsopC ctlrclient.Client contentLibId string + adminClient clientset.Interface + devopsUser string ) ginkgo.BeforeEach(func() { @@ -104,6 +105,9 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol client = f.ClientSet bootstrap() + var err error + adminClient, client = initializeClusterClientsByUserRoles(client) + // reading vc session id if vcRestSessionId == "" { vcRestSessionId = createVcSession4RestApis(ctx) @@ -232,16 +236,16 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol topValEndIndex) namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{storageProfileId}, getSvcId(vcRestSessionId), - []string{zone2}, "", "") + []string{zone2}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read zonal-2 storage policy tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -256,7 +260,7 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol statefulset := createCustomisedStatefulSets(ctx, client, namespace, true, replicas, false, nil, false, true, "", "", storageclass, storageclass.Name) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By("Verify svc pv affinity, pvc annotation and pod node affinity") @@ -311,16 +315,16 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{storageProfileId}, getSvcId(vcRestSessionId), - []string{zone1}, "", "") + []string{zone1}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Fetch zone-1 storage policy tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -335,7 +339,7 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol statefulset := createCustomisedStatefulSets(ctx, client, namespace, true, replicas, false, nil, false, true, "", "", storageclass, storageclass.Name) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By("Verify svc pv affinity, pvc annotation and pod node affinity") @@ -388,16 +392,16 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol topValEndIndex) namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{storageProfileId}, getSvcId(vcRestSessionId), - []string{zone2}, "", "") + []string{zone2}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Fetch zone-2 storage policy tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyNameWffc, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyNameWffc, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -412,7 +416,7 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol statefulset := createCustomisedStatefulSets(ctx, client, namespace, true, replicas, false, nil, false, true, "", "", storageclass, storageclass.Name) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By("Verify svc pv affinity, pvc annotation and pod node affinity") @@ -480,16 +484,16 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol ginkgo.By("Create a WCP namespace and tag it to zone-2 and zone-3 wrkld " + "domains using storage policy compatible to all zones") namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{storageProfileId}, - getSvcId(vcRestSessionId), []string{zone2, zone3}, "", "") + getSvcId(vcRestSessionId), []string{zone2, zone3}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Fetch shared storage policy tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -540,7 +544,7 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol statefulset := createCustomisedStatefulSets(ctx, client, namespace, true, replicas, false, nil, false, true, "", "", storageclass, storageclass.Name) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By("Verify svc pv affinity, pvc annotation and pod node affinity") @@ -657,16 +661,16 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol // here fetching zone:zone-3 from topologyAffinityDetails namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{sharedStorageProfileId}, getSvcId(vcRestSessionId), - []string{zone3}, "", "") + []string{zone3}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read shared storage policy tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -681,7 +685,7 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol statefulset := createCustomisedStatefulSets(ctx, client, namespace, true, replicas, false, nil, false, true, "", "", storageclass, storageclass.Name) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By("Verify svc pv affinity, pvc annotation and pod node affinity") @@ -729,16 +733,16 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol ginkgo.By("Create a WCP namespace tagged to zone-1 & zone-2") namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{sharedStorageProfileId}, getSvcId(vcRestSessionId), - []string{zone1, zone2}, "", "") + []string{zone1, zone2}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read shared storage policy tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -753,7 +757,7 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol statefulset := createCustomisedStatefulSets(ctx, client, namespace, true, replicas, false, nil, false, true, "", "", storageclass, storageclass.Name) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By("Verify svc pv affinity, pvc annotation and pod node affinity") @@ -846,16 +850,16 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol namespace, statuscode, err = createtWcpNsWithZonesAndPolicies( vcRestSessionId, []string{storageProfileIdZone1, storageProfileIdZone3}, - getSvcId(vcRestSessionId), []string{zone3}, "", "") + getSvcId(vcRestSessionId), []string{zone3}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read zonal storage policy of zone3") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalStoragePolicyZone3, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalStoragePolicyZone3, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -922,19 +926,19 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol ginkgo.By("Create a WCP namespace tagged to zone-1 & zone-2") namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{sharedStorageProfileId, storageProfileIdZ1, storageProfileIdZ2}, getSvcId(vcRestSessionId), - []string{zone1, zone2}, vmClass, contentLibId) + []string{zone1, zone2}, vmClass, contentLibId, devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Fetch storage class tagged to wcp namespace") storageClassNames := []string{sharedStoragePolicyName, storagePolicyNameZ1, storagePolicyNameZ2} storageClasses := make([]*storagev1.StorageClass, len(storageClassNames)) for i, name := range storageClassNames { - sc, err := client.StorageV1().StorageClasses().Get(ctx, name, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -973,7 +977,7 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol pod2, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim2}, false, execRWXCommandPod1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := pvs[0] vmUUID := getNodeUUID(ctx, client, pod2.Spec.NodeName) @@ -999,7 +1003,7 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol framework.Failf("Unable to find ready and schedulable Node") } ginkgo.By("Verify volume affinity annotation state") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim3}, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim3}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv = pvs[0] allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, 2, @@ -1145,18 +1149,18 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol ginkgo.By("Create a WCP namespace tagged to zone-1, zone-2 & zone-3") namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{sharedStorageProfileId, storageProfileIdZ1, storageProfileIdZ2}, getSvcId(vcRestSessionId), - []string{zone1, zone2, zone3}, vmClass, contentLibId) + []string{zone1, zone2, zone3}, vmClass, contentLibId, devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() storageClassNames := []string{storagePolicyNameZ1, sharedStoragePolicyName} storageClasses := make([]*storagev1.StorageClass, len(storageClassNames)) for i, name := range storageClassNames { - sc, err := client.StorageV1().StorageClasses().Get(ctx, name, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1170,7 +1174,7 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for PVC to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify volume affinity annotation state") @@ -1212,7 +1216,7 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for PVC to be in bound state") - pvs2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, pollTimeout) + pvs2, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify volume affinity annotation state") @@ -1283,7 +1287,7 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol storageClassNames := []string{zonalPolicyNameWffc, sharedPolicyName} storageClasses := make([]*storagev1.StorageClass, len(storageClassNames)) for i, name := range storageClassNames { - sc, err := client.StorageV1().StorageClasses().Get(ctx, name, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1298,13 +1302,12 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol vcRestSessionId, []string{zonalStorageProfileId, sharedStorageProfileId}, getSvcId(vcRestSessionId), - []string{zone1}, "", "", - ) + []string{zone1}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() // Create PVCs @@ -1357,7 +1360,7 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol // Wait for PVCs to bound ginkgo.By("Wait for PVCs to be in bound state") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvcList, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, pvcList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -1499,7 +1502,7 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol storageDatastoreUrlZone2 := GetAndExpectStringEnvVar(envZone2DatastoreUrl) ginkgo.By("Read zonal storage class of zone2") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1516,12 +1519,12 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol topValEndIndex) namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{storageProfileId}, getSvcId(vcRestSessionId), - []string{zone2}, "", "") + []string{zone2}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Create static volume") @@ -1637,20 +1640,20 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol namespace, statuscode, err = createtWcpNsWithZonesAndPolicies( vcRestSessionId, []string{zonalProfileId}, - getSvcId(vcRestSessionId), []string{zone1, zone2}, "", "") + getSvcId(vcRestSessionId), []string{zone1, zone2}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read zonal class") - storageclassImm, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclassImm, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - storageclassWffc, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicyWffc, metav1.GetOptions{}) + storageclassWffc, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicyWffc, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1686,7 +1689,7 @@ var _ bool = ginkgo.Describe("[domain-isolation] Management-Workload-Domain-Isol gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PVC is in Bound state.") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, + _, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/mgmt_wrkld_domain_isolation_disruptive.go b/tests/e2e/mgmt_wrkld_domain_isolation_disruptive.go index 33c4fef54a..c9167576f9 100644 --- a/tests/e2e/mgmt_wrkld_domain_isolation_disruptive.go +++ b/tests/e2e/mgmt_wrkld_domain_isolation_disruptive.go @@ -68,6 +68,8 @@ var _ bool = ginkgo.Describe("[domain-isolation-disruptive] Management-Workload- isVcRebooted bool isSPSServiceStopped bool isHostInMaintenanceMode bool + adminClient clientset.Interface + devopsUser string ) ginkgo.BeforeEach(func() { @@ -78,6 +80,16 @@ var _ bool = ginkgo.Describe("[domain-isolation-disruptive] Management-Workload- client = f.ClientSet bootstrap() + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } // reading vc session id if vcRestSessionId == "" { vcRestSessionId = createVcSession4RestApis(ctx) @@ -220,16 +232,16 @@ var _ bool = ginkgo.Describe("[domain-isolation-disruptive] Management-Workload- ginkgo.By("Create a WCP namespace and tag zone-1, zone-2 and zone-3 to it using shared storage policy") namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{sharedStorageProfileId}, getSvcId(vcRestSessionId), - []string{zone1, zone2, zone3}, "", "") + []string{zone1, zone2, zone3}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read shared storage policy tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -240,7 +252,7 @@ var _ bool = ginkgo.Describe("[domain-isolation-disruptive] Management-Workload- ginkgo.By("Verify PVC claim to be in bound phase and create POD for each PVC") for i := 0; i < len(pvclaimsList); i++ { var pvclaims []*v1.PersistentVolumeClaim - pvc, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvc, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaimsList[i]}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvc).NotTo(gomega.BeEmpty()) @@ -413,16 +425,16 @@ var _ bool = ginkgo.Describe("[domain-isolation-disruptive] Management-Workload- ginkgo.By("Create a WCP namespace and tag zone-1 and zone-3 to it using shared storage policy") namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{sharedStorageProfileId}, getSvcId(vcRestSessionId), - []string{zone1, zone3}, "", "") + []string{zone1, zone3}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read shared storage policy tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -433,7 +445,7 @@ var _ bool = ginkgo.Describe("[domain-isolation-disruptive] Management-Workload- ginkgo.By("Verify PVC claim to be in bound phase and create POD for each PVC") for i := 0; i < len(pvclaimsList); i++ { var pvclaims []*v1.PersistentVolumeClaim - pvc, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvc, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaimsList[i]}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvc).NotTo(gomega.BeEmpty()) @@ -591,16 +603,16 @@ var _ bool = ginkgo.Describe("[domain-isolation-disruptive] Management-Workload- ginkgo.By("Create a WCP namespace and tag zone-1, zone-2 and zone-3 to it using shared storage policy") namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{sharedStorageProfileId}, getSvcId(vcRestSessionId), - []string{zone1, zone2, zone3}, "", "") + []string{zone1, zone2, zone3}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read shared storage policy tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -633,7 +645,7 @@ var _ bool = ginkgo.Describe("[domain-isolation-disruptive] Management-Workload- ginkgo.By("Verify PVC claim to be in bound phase and create POD for each PVC") for i := 0; i < len(pvclaimsList); i++ { var pvclaims []*v1.PersistentVolumeClaim - pvc, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvc, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaimsList[i]}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvc).NotTo(gomega.BeEmpty()) diff --git a/tests/e2e/mgmt_wrkld_domain_isolation_negative.go b/tests/e2e/mgmt_wrkld_domain_isolation_negative.go index 779fe3d150..1bcb3efa01 100644 --- a/tests/e2e/mgmt_wrkld_domain_isolation_negative.go +++ b/tests/e2e/mgmt_wrkld_domain_isolation_negative.go @@ -61,6 +61,8 @@ var _ bool = ginkgo.Describe("[domain-isolation-negative] Management-Workload-Do replicas int32 isVsanHealthServiceStopped bool isWcpServicestopped bool + adminClient clientset.Interface + devopsUser string ) ginkgo.BeforeEach(func() { @@ -71,6 +73,7 @@ var _ bool = ginkgo.Describe("[domain-isolation-negative] Management-Workload-Do client = f.ClientSet bootstrap() + adminClient, client = initializeClusterClientsByUserRoles(client) // reading vc session id if vcRestSessionId == "" { vcRestSessionId = createVcSession4RestApis(ctx) @@ -166,20 +169,20 @@ var _ bool = ginkgo.Describe("[domain-isolation-negative] Management-Workload-Do namespace, statuscode, err = createtWcpNsWithZonesAndPolicies( vcRestSessionId, []string{storageProfileIdZ1, storageProfileIdZ2}, - getSvcId(vcRestSessionId), []string{zone1, zone2}, "", "") + getSvcId(vcRestSessionId), []string{zone1, zone2}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read zonal class") - storageclassZ1, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZ1, metav1.GetOptions{}) + storageclassZ1, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZ1, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - storageclassZ2, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZ2, metav1.GetOptions{}) + storageclassZ2, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZ2, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -190,7 +193,7 @@ var _ bool = ginkgo.Describe("[domain-isolation-negative] Management-Workload-Do gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for PVC to reach Bound state.") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, + _, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim1}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -310,16 +313,16 @@ var _ bool = ginkgo.Describe("[domain-isolation-negative] Management-Workload-Do namespace, statuscode, err = createtWcpNsWithZonesAndPolicies( vcRestSessionId, []string{storageProfileIdZ2}, - getSvcId(vcRestSessionId), []string{zone1, zone2}, "", "") + getSvcId(vcRestSessionId), []string{zone1, zone2}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read zonal class") - storageclassZ2, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZ2, metav1.GetOptions{}) + storageclassZ2, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZ2, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -342,7 +345,7 @@ var _ bool = ginkgo.Describe("[domain-isolation-negative] Management-Workload-Do gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for PVC to reach Bound state.") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, + _, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim1}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // startIndex=1 & endIndex=2 to set allowedTopologies to zone-2 @@ -408,24 +411,24 @@ var _ bool = ginkgo.Describe("[domain-isolation-negative] Management-Workload-Do namespace, statuscode, err = createtWcpNsWithZonesAndPolicies( vcRestSessionId, []string{storageProfileIdZ2, storageProfileIdZ1, sharedProfileId}, - getSvcId(vcRestSessionId), []string{zone2, zone1}, "", "") + getSvcId(vcRestSessionId), []string{zone2, zone1}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read storage class") - storageclassZ1, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZ1, metav1.GetOptions{}) + storageclassZ1, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZ1, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - storageclassZ2, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZ2, metav1.GetOptions{}) + storageclassZ2, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZ2, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - sharedStorageClass, err := client.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) + sharedStorageClass, err := adminClient.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } diff --git a/tests/e2e/mgmt_wrkld_domain_isolation_tkg.go b/tests/e2e/mgmt_wrkld_domain_isolation_tkg.go index 35a0b4eadb..0d9404cfc6 100644 --- a/tests/e2e/mgmt_wrkld_domain_isolation_tkg.go +++ b/tests/e2e/mgmt_wrkld_domain_isolation_tkg.go @@ -31,7 +31,6 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" - fpv "k8s.io/kubernetes/test/e2e/framework/pv" fss "k8s.io/kubernetes/test/e2e/framework/statefulset" admissionapi "k8s.io/pod-security-admission/api" @@ -64,6 +63,7 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation] TKG-Management-Workload-Dom svcNamespace string guestClusterRestConfig *restclient.Config topkeyStartIndex int + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -75,6 +75,8 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation] TKG-Management-Workload-Dom client = f.ClientSet bootstrap() + adminClient, client = initializeClusterClientsByUserRoles(client) + // reading vc session id if vcRestSessionId == "" { vcRestSessionId = createVcSession4RestApis(ctx) @@ -172,7 +174,7 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation] TKG-Management-Workload-Dom replicas = 3 ginkgo.By("Read shared storage policy tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -187,7 +189,7 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation] TKG-Management-Workload-Dom statefulset := createCustomisedStatefulSets(ctx, client, namespace, true, replicas, true, allowedTopologies, true, true, "", "", storageclass, storageclass.Name) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By("Verify svc pv affinity, pvc annotation and pod node affinity") @@ -381,7 +383,7 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation] TKG-Management-Workload-Dom replicas = 3 ginkgo.By("Read zonal storage policy tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonal2StroragePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonal2StroragePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -396,7 +398,7 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation] TKG-Management-Workload-Dom statefulset := createCustomisedStatefulSets(ctx, client, namespace, true, replicas, true, allowedTopologies, true, true, "", "", storageclass, storageclass.Name) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By("Verify svc pv affinity, pvc annotation and pod node affinity") @@ -426,11 +428,11 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation] TKG-Management-Workload-Dom ginkgo.By("Read shared-latebinding storage policy tagged to wcp namespace") spWffc := sharedStoragePolicyNameWffc + "-latebinding" - storageclassWffc, err := client.StorageV1().StorageClasses().Get(ctx, spWffc, metav1.GetOptions{}) + storageclassWffc, err := adminClient.StorageV1().StorageClasses().Get(ctx, spWffc, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyNameWffc, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyNameWffc, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -444,7 +446,7 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation] TKG-Management-Workload-Dom gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for PVC to reach Bound state.") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, + _, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -490,7 +492,7 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation] TKG-Management-Workload-Dom replicas = 3 ginkgo.By("Read shared storage policy tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, sharedStoragePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -511,7 +513,7 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation] TKG-Management-Workload-Dom statefulset := createCustomisedStatefulSets(ctx, client, namespace, true, replicas, true, allowedTopologiesZ2, false, true, "", "", storageclass, sharedStoragePolicyName) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() // PV will have all 3 zones, but pod will be on zone-2 @@ -543,7 +545,7 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation] TKG-Management-Workload-Dom ginkgo.By("Read shared storage policy tagged to wcp namespace") spWffc := zonal2StroragePolicyName + "-latebinding" - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, spWffc, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, spWffc, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -564,7 +566,7 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation] TKG-Management-Workload-Dom statefulset := createCustomisedStatefulSets(ctx, client, namespace, true, replicas, true, allowedTopologies, false, true, "", "", storageclass, storageclass.Name) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By("Verify svc pv affinity, pvc annotation and pod node affinity") diff --git a/tests/e2e/mgmt_wrkld_domain_isolation_utils.go b/tests/e2e/mgmt_wrkld_domain_isolation_utils.go index 7a8619bfbe..a6c6eb8138 100644 --- a/tests/e2e/mgmt_wrkld_domain_isolation_utils.go +++ b/tests/e2e/mgmt_wrkld_domain_isolation_utils.go @@ -66,7 +66,6 @@ func verifyPvcAnnotationPvAffinityPodAnnotationInSvc(ctx context.Context, client var podList *v1.PodList var err error - // Determine the pod list based on input (StatefulSet, StandalonePod, or Deployment) if statefulset != nil { // If statefulset is provided, get the pod list associated with it diff --git a/tests/e2e/mgmt_wrkld_domain_isolation_vmservice.go b/tests/e2e/mgmt_wrkld_domain_isolation_vmservice.go index 022639ec7f..627de9c1ab 100644 --- a/tests/e2e/mgmt_wrkld_domain_isolation_vmservice.go +++ b/tests/e2e/mgmt_wrkld_domain_isolation_vmservice.go @@ -72,6 +72,8 @@ var _ bool = ginkgo.Describe("[domain-isolation-vmsvc] Domain-Isolation-VmServic storageProfileIdZone2 string snapc *snapclient.Clientset restConfig *rest.Config + adminClient clientset.Interface + devopsUser string ) ginkgo.BeforeEach(func() { @@ -81,6 +83,8 @@ var _ bool = ginkgo.Describe("[domain-isolation-vmsvc] Domain-Isolation-VmServic // making vc connection client = f.ClientSet bootstrap() + var err error + adminClient, client = initializeClusterClientsByUserRoles(client) // reading vc session id if vcRestSessionId == "" { @@ -211,16 +215,16 @@ var _ bool = ginkgo.Describe("[domain-isolation-vmsvc] Domain-Isolation-VmServic // creating namespace with zonal2 storage policy namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{storageProfileIdZone2}, getSvcId(vcRestSessionId), - []string{zone2}, vmClass, contentLibId) + []string{zone2}, vmClass, contentLibId, devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read zonal-2 storage policy which is tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZone2, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZone2, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -235,7 +239,7 @@ var _ bool = ginkgo.Describe("[domain-isolation-vmsvc] Domain-Isolation-VmServic gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for PVC to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := pvs[0] volHandle := pv.Spec.CSI.VolumeHandle @@ -319,16 +323,16 @@ var _ bool = ginkgo.Describe("[domain-isolation-vmsvc] Domain-Isolation-VmServic allowedTopologiesMap := convertToTopologyMap(allowedTopologies) namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{storageProfileIdZone24}, getSvcId(vcRestSessionId), - []string{zone2, zone4}, vmClass, contentLibId) + []string{zone2, zone4}, vmClass, contentLibId, devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read shared storage policy which is tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZone24, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZone24, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -433,16 +437,16 @@ var _ bool = ginkgo.Describe("[domain-isolation-vmsvc] Domain-Isolation-VmServic "policy which is accessible to all zones") namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{storageProfileId}, getSvcId(vcRestSessionId), - []string{zone1, zone2}, vmClass, contentLibId) + []string{zone1, zone2}, vmClass, contentLibId, devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read shared storage policy which is tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -459,7 +463,7 @@ var _ bool = ginkgo.Describe("[domain-isolation-vmsvc] Domain-Isolation-VmServic gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for PVCs to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1, pvc2}, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1, pvc2}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) pv1 := pvs[0] @@ -592,16 +596,16 @@ var _ bool = ginkgo.Describe("[domain-isolation-vmsvc] Domain-Isolation-VmServic "policy compatible with only these 3 zones") namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{storageProfileId}, getSvcId(vcRestSessionId), - []string{zone1, zone2, zone3}, vmClass, contentLibId) + []string{zone1, zone2, zone3}, vmClass, contentLibId, devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read shared storage policy which is tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -613,7 +617,7 @@ var _ bool = ginkgo.Describe("[domain-isolation-vmsvc] Domain-Isolation-VmServic gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for PVCs to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1, pvc2}, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1, pvc2}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) pv1 := pvs[0] @@ -777,16 +781,16 @@ var _ bool = ginkgo.Describe("[domain-isolation-vmsvc] Domain-Isolation-VmServic "policy compatible with only these 3 zones") namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{storageProfileId}, getSvcId(vcRestSessionId), - []string{zone1, zone2, zone3}, vmClass, contentLibId) + []string{zone1, zone2, zone3}, vmClass, contentLibId, devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }() ginkgo.By("Read shared storage policy which is tagged to wcp namespace") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -802,7 +806,7 @@ var _ bool = ginkgo.Describe("[domain-isolation-vmsvc] Domain-Isolation-VmServic pvclaims = append(pvclaims, pvc1, pvc2, pvc3) ginkgo.By("Wait for PVCs to be in bound state") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1, pvc2, pvc3}, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1, pvc2, pvc3}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) diff --git a/tests/e2e/multi_master_k8s.go b/tests/e2e/multi_master_k8s.go index 329573d13e..619603d0bb 100644 --- a/tests/e2e/multi_master_k8s.go +++ b/tests/e2e/multi_master_k8s.go @@ -54,6 +54,7 @@ var _ = ginkgo.Describe("[csi-multi-master-block-e2e]", func() { labelValue string scParameters map[string]string nodeNameIPMap map[string]string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet @@ -65,12 +66,14 @@ var _ = ginkgo.Describe("[csi-multi-master-block-e2e]", func() { controllerNamespace = csiSystemNamespace } bootstrap() + var err error + adminClient, client = initializeClusterClientsByUserRoles(client) scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) nodeNameIPMap = make(map[string]string) ginkgo.By("Retrieving testbed configuration data") - err := mapK8sMasterNodeWithIPs(client, nodeNameIPMap) + err = mapK8sMasterNodeWithIPs(client, nodeNameIPMap) framework.ExpectNoError(err) labelKey = "app" @@ -239,7 +242,13 @@ var _ = ginkgo.Describe("[csi-multi-master-block-e2e]", func() { "pod is running", ginkgo.Label(p0, block, vanilla, vc70), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeList, podList := getControllerRuntimeDetails(client, controllerNamespace) + var nodeList, podList []string + if vanillaCluster { + nodeList, podList = getControllerRuntimeDetails(client, controllerNamespace) + } else { + nodeList, podList = getControllerRuntimeDetails(adminClient, controllerNamespace) + + } ginkgo.By(fmt.Sprintf("vsphere-csi-controller pod(s) %+v is running on node(s) %+v", podList, nodeList)) gomega.Expect(len(podList) == 1).To(gomega.BeTrue(), "Number of vsphere-csi-controller pod running is not 1") @@ -252,7 +261,6 @@ var _ = ginkgo.Describe("[csi-multi-master-block-e2e]", func() { profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimit, storagePolicyName) sc, pvc, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", false, "", storagePolicyName) } diff --git a/tests/e2e/multi_vc_utils.go b/tests/e2e/multi_vc_utils.go index 84c178a80b..606e3b8b14 100644 --- a/tests/e2e/multi_vc_utils.go +++ b/tests/e2e/multi_vc_utils.go @@ -159,6 +159,9 @@ If we have multiple statefulsets, deployment Pods, PVCs/PVs created on a given n cleanup of these multiple sts creation, deleteAllStsAndPodsPVCsInNamespace is used */ func deleteAllStsAndPodsPVCsInNamespace(ctx context.Context, c clientset.Interface, ns string) { + + var err error + adminClient, _ := initializeClusterClientsByUserRoles(c) StatefulSetPoll := 10 * time.Second StatefulSetTimeout := 10 * time.Minute ssList, err := c.AppsV1().StatefulSets(ns).List(context.TODO(), @@ -203,7 +206,7 @@ func deleteAllStsAndPodsPVCsInNamespace(ctx context.Context, c clientset.Interfa pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true, func(ctx context.Context) (bool, error) { - pvList, err := c.CoreV1().PersistentVolumes().List(context.TODO(), + pvList, err := adminClient.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { framework.Logf("WARNING: Failed to list pvs, retrying %v", err) diff --git a/tests/e2e/operationstorm.go b/tests/e2e/operationstorm.go index c30bfbd38c..6f2cb716c4 100644 --- a/tests/e2e/operationstorm.go +++ b/tests/e2e/operationstorm.go @@ -72,12 +72,22 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] Vo err error volumeOpsScale int storagePolicyName string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet namespace = getNamespaceToRunTests(f) ctx, cancel := context.WithCancel(context.Background()) defer cancel() + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -270,7 +280,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] Vo gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() diff --git a/tests/e2e/policy_driven_vol_allocation.go b/tests/e2e/policy_driven_vol_allocation.go index 521e6bc7b9..979979e5d2 100644 --- a/tests/e2e/policy_driven_vol_allocation.go +++ b/tests/e2e/policy_driven_vol_allocation.go @@ -69,6 +69,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation sshWcpConfig *ssh.ClientConfig svcNamespace string pandoraSyncWaitTime int + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -76,10 +77,20 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation defer cancel() client = f.ClientSet namespace = getNamespaceToRunTests(f) - + var nodeList *v1.NodeList + var err error bootstrap() - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") @@ -272,8 +283,8 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation setStoragePolicyQuota(ctx, restClientConfig, policyName, namespace, resourceQuotaLimit) } } else if guestCluster { - _, svNamespace := getSvcClientAndNamespace() - assignPolicyToWcpNamespace(client, ctx, svNamespace, policyNames, resourceQuotaLimit) + svcClient, svNamespace := getSvcClientAndNamespace() + assignPolicyToWcpNamespace(svcClient, ctx, svNamespace, policyNames, resourceQuotaLimit) time.Sleep(5 * time.Minute) restClientConfig := getRestConfigClient() for _, policyName := range policyNames { @@ -294,13 +305,13 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation ginkgo.By("CNS_TEST: Running for WCP setup") if wcpVsanDirectCluster { - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = createVsanDPvcAndPod(sshWcpConfig, svcMasterIp, svcNamespace, pvcVsandNames[i], podVsandNames[i], policyName, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) } else { - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -310,7 +321,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } else { ginkgo.By("CNS_TEST: Running for GC setup") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -350,7 +361,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") @@ -522,8 +533,8 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation setStoragePolicyQuota(ctx, restClientConfig, policyName, namespace, resourceQuotaLimit) } } else if guestCluster { - _, svNamespace := getSvcClientAndNamespace() - assignPolicyToWcpNamespace(client, ctx, svNamespace, policyNames, resourceQuotaLimit) + svcClient, svNamespace := getSvcClientAndNamespace() + assignPolicyToWcpNamespace(svcClient, ctx, svNamespace, policyNames, resourceQuotaLimit) time.Sleep(5 * time.Minute) restClientConfig := getRestConfigClient() for _, policyName := range policyNames { @@ -543,13 +554,13 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } else if supervisorCluster { ginkgo.By("CNS_TEST: Running for WCP setup") if wcpVsanDirectCluster { - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = createVsanDPvcAndPod(sshWcpConfig, svcMasterIp, svcNamespace, pvcVsandNames[i], podVsandNames[i], policyName, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) } else { - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = createPVC(ctx, client, namespace, nil, largeSize, storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -558,7 +569,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } } else { ginkgo.By("CNS_TEST: Running for GC setup") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = createPVC(ctx, client, namespace, nil, largeSize, storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -597,7 +608,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -749,8 +760,8 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation setStoragePolicyQuota(ctx, restClientConfig, policyName, namespace, resourceQuotaLimit) } else if guestCluster { - _, svNamespace := getSvcClientAndNamespace() - assignPolicyToWcpNamespace(client, ctx, svNamespace, []string{policyName}, resourceQuotaLimit) + svcClient, svNamespace := getSvcClientAndNamespace() + assignPolicyToWcpNamespace(svcClient, ctx, svNamespace, []string{policyName}, resourceQuotaLimit) time.Sleep(5 * time.Minute) restClientConfig := getRestConfigClient() setStoragePolicyQuota(ctx, restClientConfig, policyName, svNamespace, resourceQuotaLimit) @@ -773,20 +784,20 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } else if supervisorCluster { ginkgo.By("CNS_TEST: Running for WCP setup") if wcpVsanDirectCluster { - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = createVsanDPvcAndPod(sshWcpConfig, svcMasterIp, svcNamespace, eztVsandPvcName+randomStr, eztVsandPodName+randomStr, policyName, largeSize) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } else { - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = createPVC(ctx, client, namespace, nil, largeSize, storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } else { ginkgo.By("CNS_TEST: Running for GC setup") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = createPVC(ctx, client, namespace, nil, largeSize, storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -835,7 +846,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation start := time.Now() ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout*4) elapsed := time.Since(start) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -958,8 +969,8 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation setStoragePolicyQuota(ctx, restClientConfig, policyName, namespace, resourceQuotaLimit) } else if guestCluster { - _, svNamespace := getSvcClientAndNamespace() - assignPolicyToWcpNamespace(client, ctx, svNamespace, []string{policyName}, resourceQuotaLimit) + svcClient, svNamespace := getSvcClientAndNamespace() + assignPolicyToWcpNamespace(svcClient, ctx, svNamespace, []string{policyName}, resourceQuotaLimit) time.Sleep(5 * time.Minute) restClientConfig := getRestConfigClient() setStoragePolicyQuota(ctx, restClientConfig, policyName, svNamespace, resourceQuotaLimit) @@ -985,20 +996,20 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation ginkgo.By("CNS_TEST: Running for WCP setup") // create resource quota if wcpVsanDirectCluster { - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = createVsanDPvcAndPod(sshWcpConfig, svcMasterIp, svcNamespace, eztVsandPvcName+randomStr, eztVsandPodName+randomStr, policyName, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) } else { - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } else { ginkgo.By("CNS_TEST: Running for GC setup") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1029,7 +1040,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1255,8 +1266,8 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation setStoragePolicyQuota(ctx, restClientConfig, policyName, namespace, resourceQuotaLimit) } } else if guestCluster { - _, svNamespace := getSvcClientAndNamespace() - assignPolicyToWcpNamespace(client, ctx, svNamespace, policyNames, resourceQuotaLimit) + svcClient, svNamespace := getSvcClientAndNamespace() + assignPolicyToWcpNamespace(svcClient, ctx, svNamespace, policyNames, resourceQuotaLimit) time.Sleep(5 * time.Minute) restClientConfig := getRestConfigClient() for _, policyName := range policyNames { @@ -1276,13 +1287,13 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } else if supervisorCluster { ginkgo.By("CNS_TEST: Running for WCP setup") if wcpVsanDirectCluster { - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = createVsanDPvcAndPod(sshWcpConfig, svcMasterIp, svcNamespace, pvcVsandNames[i], podVsandNames[i], policyName, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) } else { - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1291,7 +1302,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } } else { ginkgo.By("CNS_TEST: Running for GC setup") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1331,7 +1342,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -1586,8 +1597,8 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation setStoragePolicyQuota(ctx, restClientConfig, policyName, namespace, resourceQuotaLimit) } } else if guestCluster { - _, svNamespace := getSvcClientAndNamespace() - assignPolicyToWcpNamespace(client, ctx, svNamespace, policyNames, resourceQuotaLimit) + svcClient, svNamespace := getSvcClientAndNamespace() + assignPolicyToWcpNamespace(svcClient, ctx, svNamespace, policyNames, resourceQuotaLimit) time.Sleep(5 * time.Minute) restClientConfig := getRestConfigClient() for _, policyName := range policyNames { @@ -1603,11 +1614,11 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation gomega.Expect(err).NotTo(gomega.HaveOccurred()) } else if supervisorCluster { ginkgo.By("CNS_TEST: Running for WCP setup") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } else { ginkgo.By("CNS_TEST: Running for GC setup") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1652,7 +1663,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volIds := []string{} @@ -1873,8 +1884,8 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation setStoragePolicyQuota(ctx, restClientConfig, policyName, namespace, resourceQuotaLimit) } else if guestCluster { - _, svNamespace := getSvcClientAndNamespace() - assignPolicyToWcpNamespace(client, ctx, svNamespace, []string{policyName}, resourceQuotaLimit) + svcClient, svNamespace := getSvcClientAndNamespace() + assignPolicyToWcpNamespace(svcClient, ctx, svNamespace, []string{policyName}, resourceQuotaLimit) time.Sleep(5 * time.Minute) restClientConfig := getRestConfigClient() @@ -1894,20 +1905,20 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } else if supervisorCluster { ginkgo.By("CNS_TEST: Running for WCP setup") if wcpVsanDirectCluster { - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = createVsanDPvcAndPod(sshWcpConfig, svcMasterIp, svcNamespace, eztVsandPvcName+randomStr, eztVsandPodName+randomStr, policyName, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) } else { - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } else { ginkgo.By("CNS_TEST: Running for GC setup") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1938,7 +1949,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2217,7 +2228,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation }() ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volIds := []string{} @@ -2504,7 +2515,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volIds := []string{} @@ -2720,7 +2731,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation } ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") @@ -2926,7 +2937,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation }() ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volIds := []string{} @@ -3154,7 +3165,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation }() ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volIds := []string{} @@ -3398,7 +3409,7 @@ var _ = ginkgo.Describe("[vol-allocation] Policy driven volume space allocation }() ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volIds := []string{} diff --git a/tests/e2e/raw_block_volume.go b/tests/e2e/raw_block_volume.go index a16947c0b7..48bb994baf 100644 --- a/tests/e2e/raw_block_volume.go +++ b/tests/e2e/raw_block_volume.go @@ -34,6 +34,7 @@ import ( "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/types" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -76,6 +77,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { snapc *snapclient.Clientset restConfig *restclient.Config guestClusterRestConfig *restclient.Config + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -83,8 +85,21 @@ var _ = ginkgo.Describe("raw block volume support", func() { defer cancel() namespace = getNamespaceToRunTests(f) client = f.ClientSet + var err error bootstrap() - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + + var nodeList *v1.NodeList + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") @@ -101,9 +116,10 @@ var _ = ginkgo.Describe("raw block volume support", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - sc, err := client.StorageV1().StorageClasses().Get(ctx, defaultNginxStorageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, defaultNginxStorageClassName, metav1.GetOptions{}) + framework.Logf("err: %v", err) if err == nil && sc != nil { - gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, sc.Name, + gomega.Expect(adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0))).NotTo(gomega.HaveOccurred()) } scParameters = make(map[string]string) @@ -198,10 +214,10 @@ var _ = ginkgo.Describe("raw block volume support", func() { ginkgo.By("Creating StorageClass for Statefulset") scSpec := getVSphereStorageClassSpec(storageClassName, scParameters, nil, "", "", false) - sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -232,7 +248,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { replicas := *(statefulset.Spec.Replicas) defer func() { ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() // Waiting for pods status to be Ready @@ -414,7 +430,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { sc, err := createStorageClass(client, scParameters, nil, "", "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -425,7 +441,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Failed to create pvc with err: %v", err)) ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*corev1.PersistentVolumeClaim{pvc}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*corev1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -440,7 +456,11 @@ var _ = ginkgo.Describe("raw block volume support", func() { defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort) + if vanillaCluster { + err = fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort) + } else { + err = fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeoutShort) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -590,12 +610,12 @@ var _ = ginkgo.Describe("raw block volume support", func() { ginkgo.By("Creating raw block PV") pv = getPersistentVolumeSpec(fcdID, corev1.PersistentVolumeReclaimDelete, staticPVLabels, "") pv.Spec.VolumeMode = &rawBlockVolumeMode - pv, err = client.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) + pv, err = adminClient.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2eVSphere.waitForCNSVolumeToBeCreated(pv.Spec.CSI.VolumeHandle) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := fpv.DeletePersistentVolume(ctx, client, pv.Name) + err := fpv.DeletePersistentVolume(ctx, adminClient, pv.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -610,7 +630,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort) + err = fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeoutShort) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2eVSphere.waitForCNSVolumeToBeDeleted(pv.Spec.CSI.VolumeHandle) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -712,7 +732,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { sc, err := createStorageClass(client, scParameters, nil, "", "", true, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -723,7 +743,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Failed to create pvc with err: %v", err)) ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*corev1.PersistentVolumeClaim{pvc}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*corev1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -738,7 +758,11 @@ var _ = ginkgo.Describe("raw block volume support", func() { defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort) + if vanillaCluster { + err = fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort) + } else { + err = fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeoutShort) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -884,7 +908,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { sc, err := createStorageClass(client, scParameters, nil, "", "", true, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -898,7 +922,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { var pvclaims []*corev1.PersistentVolumeClaim pvclaims = append(pvclaims, pvc) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := pvs[0] volumeID := pv.Spec.CSI.VolumeHandle @@ -910,7 +934,11 @@ var _ = ginkgo.Describe("raw block volume support", func() { defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort) + if vanillaCluster { + err = fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort) + } else { + err = fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeoutShort) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1116,7 +1144,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { sc, err := createStorageClass(client, scParameters, nil, "", "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -1127,7 +1155,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Failed to create pvc with err: %v", err)) ginkgo.By("Expect source volume claim to provision volume successfully") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*corev1.PersistentVolumeClaim{pvc1}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*corev1.PersistentVolumeClaim{pvc1}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volumeID := pvs[0].Spec.CSI.VolumeHandle @@ -1238,7 +1266,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { restoredPvc, err := fpv.CreatePVC(ctx, client, namespace, restorePvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - restoredPvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + restoredPvs, err := WaitForPVClaimBoundPhase(ctx, client, []*corev1.PersistentVolumeClaim{restoredPvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/scale.go b/tests/e2e/scale.go index 6ed269643d..817f5bd240 100644 --- a/tests/e2e/scale.go +++ b/tests/e2e/scale.go @@ -125,7 +125,6 @@ var _ = ginkgo.Describe("Scale Test", func() { profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimitScaleTest, storagePolicyName) } gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/snapshot_stretched_supervisor.go b/tests/e2e/snapshot_stretched_supervisor.go index e5aabb9721..81432a8bf7 100644 --- a/tests/e2e/snapshot_stretched_supervisor.go +++ b/tests/e2e/snapshot_stretched_supervisor.go @@ -68,13 +68,16 @@ var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() { bootstrap() client = f.ClientSet + var err error + var nodeList *v1.NodeList + namespace = getNamespaceToRunTests(f) // parameters set for storage policy scParameters = make(map[string]string) // fetching node list and checking node status - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") @@ -155,7 +158,7 @@ var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() { defer cancel() ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) err := client.CoreV1().Services(namespace).Delete(ctx, servicename, *metav1.NewDeleteOptions(0)) if !apierrors.IsNotFound(err) { @@ -488,7 +491,7 @@ var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() { int32(stsReplicas), true, allowedTopologies, true, true, zonalPolicy, "", storageclass, zonalPolicy) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() framework.Logf("Verify PV node affinity and that the PODS are running on appropriate node") @@ -544,7 +547,7 @@ var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() { v1.ReadWriteOnce, volumeSnapshot1.Name, snapshotapigroup) restoreVol1, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - restorepv1, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + restorepv1, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{restoreVol1}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) restoreVolHandle1 := restorepv1[0].Spec.CSI.VolumeHandle diff --git a/tests/e2e/snapshot_vmservice_vm.go b/tests/e2e/snapshot_vmservice_vm.go index 9a3fd28095..9de0b68d47 100644 --- a/tests/e2e/snapshot_vmservice_vm.go +++ b/tests/e2e/snapshot_vmservice_vm.go @@ -71,6 +71,8 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { pandoraSyncWaitTime int dsRef types.ManagedObjectReference labelsMap map[string]string + adminClient clientset.Interface + userName string ) ginkgo.BeforeEach(func() { @@ -80,13 +82,14 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { // client connection client = f.ClientSet bootstrap() - + var err error + adminClient, client = initializeClusterClientsByUserRoles(client) // fetch the testbed type for executing testcases topologyFeature := os.Getenv(topologyFeature) // fetching nodes and reading storage policy name if topologyFeature != topologyTkgHaName && topologyFeature != podVMOnStretchedSupervisor { - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, adminClient) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") @@ -125,7 +128,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { framework.Logf("Create a WCP namespace for the test") // creating wcp test namespace and setting vmclass, contlib, storage class fields in test ns namespace = createTestWcpNs( - vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId)) + vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId), userName) framework.Logf("Verifying storage policies usage for each storage class") restConfig = getRestConfigClient() @@ -182,7 +185,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { dumpSvcNsEventsOnTestFailure(client, namespace) delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }) /* @@ -209,7 +212,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -469,7 +472,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -646,7 +649,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -854,7 +857,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -1097,7 +1100,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC1") @@ -1406,7 +1409,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -1608,7 +1611,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -1831,7 +1834,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { snapshotContents := make([]*snapV1.VolumeSnapshotContent, volumeOpsScale) ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -2076,7 +2079,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { var datastoreUrls []string ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC-1") @@ -2322,7 +2325,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC1") @@ -2606,7 +2609,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") @@ -2870,7 +2873,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { framework.Logf("annotationVal :%s, pvcAnnotations: %v", annotationVal, pvcAnnotations) ginkgo.By("Creating Pvc with Immediate topology storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvcSpec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", storageclass, nil, "") pvcSpec.Annotations = pvcAnnotations @@ -2878,7 +2881,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for SV PVC to come to bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) topologykey := pvs[0].Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values[0] @@ -3093,7 +3096,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { framework.Logf("annotationVal :%s, pvcAnnotations: %v", annotationVal, pvcAnnotations) ginkgo.By("Creating Pvc with Immediate topology storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvcSpec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", storageclass, nil, "") pvcSpec.Annotations = pvcAnnotations @@ -3101,7 +3104,7 @@ var _ bool = ginkgo.Describe("[snapshot-vmsvc] Snapshot VM Service VM", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for SV PVC to come to bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) topologykey := pvs[0].Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values[0] diff --git a/tests/e2e/staging_env_basic.go b/tests/e2e/staging_env_basic.go index 1d28d60ae7..3ca4125692 100644 --- a/tests/e2e/staging_env_basic.go +++ b/tests/e2e/staging_env_basic.go @@ -52,21 +52,19 @@ var _ = ginkgo.Describe("[csi-supervisor-staging] Tests for WCP env with minimal client clientset.Interface namespace string storagePolicyName string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { var err error - if k8senvsv := GetAndExpectStringEnvVar("SUPERVISOR_CLUSTER_KUBE_CONFIG"); k8senvsv != "" { - client, err = createKubernetesClientFromConfig(k8senvsv) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + adminClient, client = initializeClusterClientsByUserRoles(client) namespace = os.Getenv("SVC_NAMESPACE") bootstrap() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, client) + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, adminClient) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") @@ -97,7 +95,7 @@ var _ = ginkgo.Describe("[csi-supervisor-staging] Tests for WCP env with minimal var pv *v1.PersistentVolume ginkgo.By("Creating a PVC") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = fpv.CreatePVC(ctx, client, namespace, @@ -822,7 +820,7 @@ var _ = ginkgo.Describe("[csi-supervisor-staging] Tests for WCP env with minimal var err error ginkgo.By("Create PVC ") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvc, err = fpv.CreatePVC(ctx, client, namespace, @@ -944,7 +942,7 @@ var _ = ginkgo.Describe("[csi-supervisor-staging] Tests for WCP env with minimal var runAsUser int64 ginkgo.By("Creating a PVC") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = fpv.CreatePVC(ctx, client, namespace, @@ -1034,7 +1032,7 @@ var _ = ginkgo.Describe("[csi-supervisor-staging] Tests for WCP env with minimal var pvclaim *v1.PersistentVolumeClaim ginkgo.By("Creating a PVC") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = fpv.CreatePVC(ctx, client, namespace, @@ -1118,7 +1116,7 @@ var _ = ginkgo.Describe("[csi-supervisor-staging] Tests for WCP env with minimal var err error ginkgo.By("Creating a PVC") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = fpv.CreatePVC(ctx, client, namespace, @@ -1219,7 +1217,7 @@ var _ = ginkgo.Describe("[csi-supervisor-staging] Tests for WCP env with minimal // decide which test setup is available to run ginkgo.By("Creating a PVC") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaim, err = fpv.CreatePVC(ctx, client, namespace, diff --git a/tests/e2e/statefulsets.go b/tests/e2e/statefulsets.go index 241897872a..a4a72c6df3 100644 --- a/tests/e2e/statefulsets.go +++ b/tests/e2e/statefulsets.go @@ -82,18 +82,25 @@ var _ = ginkgo.Describe("statefulset", func() { stsReplicas int32 allowedTopologies []v1.TopologySelectorLabelRequirement isQuotaValidationSupported bool + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - namespace = getNamespaceToRunTests(f) + var err error client = f.ClientSet + namespace = getNamespaceToRunTests(f) + adminClient, client = initializeClusterClientsByUserRoles(client) + bootstrap() - sc, err := client.StorageV1().StorageClasses().Get(ctx, defaultNginxStorageClassName, metav1.GetOptions{}) - if err == nil && sc != nil { - gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, sc.Name, - *metav1.NewDeleteOptions(0))).NotTo(gomega.HaveOccurred()) + + if vanillaCluster { + sc, err := client.StorageV1().StorageClasses().Get(ctx, defaultNginxStorageClassName, metav1.GetOptions{}) + if err == nil && sc != nil { + gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, sc.Name, + *metav1.NewDeleteOptions(0))).NotTo(gomega.HaveOccurred()) + } } scParameters = make(map[string]string) @@ -127,7 +134,7 @@ var _ = ginkgo.Describe("statefulset", func() { } if stretchedSVC { - nodeList, err = fnodes.GetReadySchedulableNodes(ctx, client) + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") } @@ -142,12 +149,13 @@ var _ = ginkgo.Describe("statefulset", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) err := client.CoreV1().Services(namespace).Delete(ctx, servicename, *metav1.NewDeleteOptions(0)) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + if supervisorCluster { dumpSvcNsEventsOnTestFailure(client, namespace) } @@ -194,8 +202,6 @@ var _ = ginkgo.Describe("statefulset", func() { } else { storageClassName = storagePolicyName ginkgo.By("Running for WCP setup") - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) - scParameters[scParamStoragePolicyID] = profileID } restConfig := getRestConfigClient() @@ -219,7 +225,7 @@ var _ = ginkgo.Describe("statefulset", func() { if stretchedSVC { scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -231,6 +237,10 @@ var _ = ginkgo.Describe("statefulset", func() { CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) + + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() // Waiting for pods status to be Ready fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) @@ -437,8 +447,6 @@ var _ = ginkgo.Describe("statefulset", func() { } else { storageClassName = storagePolicyName ginkgo.By("Running for WCP setup") - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) - scParameters[scParamStoragePolicyID] = profileID } ginkgo.By("Creating service") @@ -455,6 +463,9 @@ var _ = ginkgo.Describe("statefulset", func() { ginkgo.By("Creating statefulset") CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() // Waiting for pods status to be Ready fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) @@ -635,8 +646,6 @@ var _ = ginkgo.Describe("statefulset", func() { storageClassName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) framework.Logf("storageClassName %v", storageClassName) ginkgo.By("CNS_TEST: Running for WCP setup") - profileID := e2eVSphere.GetSpbmPolicyID(storageClassName) - scParameters[scParamStoragePolicyID] = profileID } if !vcptocsi { @@ -666,6 +675,10 @@ var _ = ginkgo.Describe("statefulset", func() { Spec.StorageClassName = &storageClassName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() + // Waiting for pods status to be Ready fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) @@ -841,21 +854,18 @@ var _ = ginkgo.Describe("statefulset", func() { storageClassName = "nginx-sc-default" } else { ginkgo.By("Running for WCP setup") - - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) - scParameters[scParamStoragePolicyID] = profileID - // create resource quota + storageClassName = storagePolicyName } ginkgo.By("scale down CSI driver POD to 1 , so that it will" + "be easy to validate all Listvolume response on one driver POD") - collectPodLogs(ctx, client, csiSystemNamespace) - scaledownCSIDriver, err := scaleCSIDriver(ctx, client, namespace, 1) + collectPodLogs(ctx, adminClient, csiSystemNamespace) + scaledownCSIDriver, err := scaleCSIDriver(ctx, adminClient, namespace, 1) gomega.Expect(scaledownCSIDriver).To(gomega.BeTrue(), "csi driver scaledown is not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Scale up the csi-driver replica to 3") - success, err := scaleCSIDriver(ctx, client, namespace, 3) + success, err := scaleCSIDriver(ctx, adminClient, namespace, 3) gomega.Expect(success).To(gomega.BeTrue(), "csi driver scale up to 3 replica not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -882,6 +892,10 @@ var _ = ginkgo.Describe("statefulset", func() { Spec.StorageClassName = &storageClassName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) + + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() // Waiting for pods status to be Ready fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) @@ -935,7 +949,7 @@ var _ = ginkgo.Describe("statefulset", func() { HostKeyCallback: ssh.InsecureIgnoreHostKey(), } } - _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, + _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, adminClient, sshClientConfig, containerName, logMessage, volumesBeforeScaleUp) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -955,7 +969,7 @@ var _ = ginkgo.Describe("statefulset", func() { ginkgo.By("Validate pagination") logMessage = "token for next set: 3" - _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, nil) + _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, adminClient, sshClientConfig, containerName, logMessage, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if vanillaCluster { @@ -968,7 +982,7 @@ var _ = ginkgo.Describe("statefulset", func() { //List volume responses will show up in the interval of every 1 minute. //To see the error, It is required to wait for 1 min after deleteting few Volumes time.Sleep(pollTimeoutShort) - _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, nil) + _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, adminClient, sshClientConfig, containerName, logMessage, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -994,7 +1008,7 @@ var _ = ginkgo.Describe("statefulset", func() { ginkgo.By("Validate ListVolume Response when no volumes are present") logMessage = "ListVolumes served 0 results" - _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, nil) + _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, adminClient, sshClientConfig, containerName, logMessage, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -1019,7 +1033,7 @@ var _ = ginkgo.Describe("statefulset", func() { setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimit) ginkgo.By("Get the storageclass from Supervisor") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Creating service") @@ -1128,7 +1142,7 @@ var _ = ginkgo.Describe("statefulset", func() { parallelStatefulSetCreation := false scParameters[svStorageClassName] = zonalWffcPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1145,7 +1159,7 @@ var _ = ginkgo.Describe("statefulset", func() { stsReplicas, nodeAffinityToSet, nil, podAntiAffinityToSet, true, "", "", storageclass, storageClassName) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() framework.Logf("Verify PV node affinity and that the PODS are running on appropriate node") @@ -1202,7 +1216,7 @@ var _ = ginkgo.Describe("statefulset", func() { parallelStatefulSetCreation := false scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1219,7 +1233,7 @@ var _ = ginkgo.Describe("statefulset", func() { stsReplicas, nodeAffinityToSet, allowedTopologies, podAntiAffinityToSet, true, "", "", storageclass, storageClassName) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() framework.Logf("Verify PV node affinity and that the PODS are running on appropriate node") @@ -1274,7 +1288,7 @@ var _ = ginkgo.Describe("statefulset", func() { parallelStatefulSetCreation := false scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1291,7 +1305,7 @@ var _ = ginkgo.Describe("statefulset", func() { stsReplicas, nodeAffinityToSet, allowedTopologies, podAntiAffinityToSet, true, "", "", storageclass, storageClassName) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up/down") diff --git a/tests/e2e/storagepolicy.go b/tests/e2e/storagepolicy.go index 4888fcc848..612d94390f 100644 --- a/tests/e2e/storagepolicy.go +++ b/tests/e2e/storagepolicy.go @@ -32,7 +32,6 @@ import ( fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" - fss "k8s.io/kubernetes/test/e2e/framework/statefulset" admissionapi "k8s.io/pod-security-admission/api" _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" @@ -59,8 +58,9 @@ var _ = ginkgo.Describe("Storage Policy Based Volume Provisioning", func() { f := framework.NewDefaultFramework("e2e-spbm-policy") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ( - client clientset.Interface - namespace string + client clientset.Interface + namespace string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet @@ -68,7 +68,19 @@ var _ = ginkgo.Describe("Storage Policy Based Volume Provisioning", func() { bootstrap() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + var nodeList *v1.NodeList + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") @@ -80,7 +92,7 @@ var _ = ginkgo.Describe("Storage Policy Based Volume Provisioning", func() { }) ginkgo.AfterEach(func() { if supervisorCluster { - deleteResourceQuota(client, namespace) + deleteResourceQuota(adminClient, namespace) dumpSvcNsEventsOnTestFailure(client, namespace) } if guestCluster { @@ -103,10 +115,8 @@ var _ = ginkgo.Describe("Storage Policy Based Volume Provisioning", func() { scParameters[scParamStoragePolicyName] = storagePolicyNameForSharedDatastores } else if supervisorCluster { ginkgo.By("CNS_TEST: Running for WCP setup") - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyNameForSharedDatastores) - scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimit, storagePolicyNameForSharedDatastores) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyNameForSharedDatastores) } else { ginkgo.By("CNS_TEST: Running for GC setup") scParameters[svStorageClassName] = storagePolicyNameForSharedDatastores @@ -131,26 +141,12 @@ var _ = ginkgo.Describe("Storage Policy Based Volume Provisioning", func() { scParameters[scParamStoragePolicyName] = storagePolicyNameForNonSharedDatastores } else if supervisorCluster { ginkgo.By("CNS_TEST: Running for WCP setup") - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyNameForNonSharedDatastores) - scParameters[scParamStoragePolicyID] = profileID - - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, + _, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyNameForNonSharedDatastores, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } else { - storageclass, err = createStorageClass(client, scParameters, nil, - "", "", true, storagePolicyNameForNonSharedDatastores) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - // create resource quota - restClientConfig := getRestConfigClient() - setStoragePolicyQuota(ctx, restClientConfig, storagePolicyNameForNonSharedDatastores, namespace, rqLimit) - - pvcspec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", storageclass, nil, accessMode) - _, _ = fpv.CreatePVC(ctx, client, namespace, pvcspec) - } else { scParameters[svStorageClassName] = storagePolicyNameForNonSharedDatastores createVolumeWaitTime = pollTimeout @@ -195,7 +191,7 @@ var _ = ginkgo.Describe("Storage Policy Based Volume Provisioning", func() { if zonalPolicy == "" { ginkgo.Fail(envZonalStoragePolicyName + " env variable not set") } - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -217,7 +213,7 @@ var _ = ginkgo.Describe("Storage Policy Based Volume Provisioning", func() { } framework.Logf("zonal policy: %s and zonal wffc policy: %s", zonalPolicy, zonalWffcPolicy) - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -241,7 +237,7 @@ var _ = ginkgo.Describe("Storage Policy Based Volume Provisioning", func() { } defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) framework.Logf("deleting statefulset on namespace: %s", namespace) }() @@ -258,6 +254,8 @@ func verifyStoragePolicyBasedVolumeProvisioning(f *framework.Framework, client c var err error ctx, cancel := context.WithCancel(context.Background()) defer cancel() + + adminClient, _ := initializeClusterClientsByUserRoles(client) // decide which test setup is available to run if vanillaCluster { ginkgo.By("CNS_TEST: Running for vanilla k8s setup") @@ -276,7 +274,7 @@ func verifyStoragePolicyBasedVolumeProvisioning(f *framework.Framework, client c defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -362,6 +360,8 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc var storageclass *storagev1.StorageClass var pvclaim *v1.PersistentVolumeClaim var err error + + adminClient, _ := initializeClusterClientsByUserRoles(client) // decide which test setup is available to run if vanillaCluster { ginkgo.By("CNS_TEST: Running for vanilla k8s setup") @@ -376,11 +376,13 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", false, "", storagePolicyName) } - gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("failed to create a StorageClass. Error: %v", err)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("failed to create/get a StorageClass. Error: %v", err)) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if !supervisorCluster { + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() defer func() { diff --git a/tests/e2e/tkgs_ha.go b/tests/e2e/tkgs_ha.go index 7f5b720a94..7b6ee82a9c 100644 --- a/tests/e2e/tkgs_ha.go +++ b/tests/e2e/tkgs_ha.go @@ -70,11 +70,24 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { labels_ns map[string]string isVcRebooted bool isQuotaValidationSupported bool + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet namespace = getNamespaceToRunTests(f) bootstrap() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } scParameters = make(map[string]string) topologyHaMap := GetAndExpectStringEnvVar(topologyHaMap) _, categories = createTopologyMapLevel5(topologyHaMap) @@ -94,9 +107,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.Fail(envZonalWffcStoragePolicyName + " env variable not set") } framework.Logf("zonal policy: %s and zonal wffc policy: %s", zonalPolicy, zonalWffcPolicy) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, client) + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") @@ -211,7 +222,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { setResourceQuota(svcClient, svNamespace, rqLimit) restConfig := getRestConfigClient() scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -227,7 +238,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for GC PVC to come to bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -406,7 +417,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { setResourceQuota(svcClient, svNamespace, rqLimit) restConfig := getRestConfigClient() scParameters[svStorageClassName] = zonalWffcPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -503,7 +514,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, rqLimit) scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -511,7 +522,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for GC PVC to come to bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -653,7 +664,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { setResourceQuota(svcClient, svNamespace, rqLimit) restConfig := getRestConfigClient() scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -662,7 +673,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for GC PVC to come to bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -786,7 +797,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { setResourceQuota(svcClient, svNamespace, rqLimit) restConfig := getRestConfigClient() scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -794,7 +805,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for GC PVC to come to bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -932,7 +943,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, rqLimit) scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1156,7 +1167,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, rqLimit) scParameters[svStorageClassName] = zonalWffcPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1349,7 +1360,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { nodeList, _ := fnodes.GetReadySchedulableNodes(ctx, client) scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1461,7 +1472,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { createResourceQuota(client, namespace, rqLimit, zonalPolicy) scParameters[svStorageClassName] = zonalWffcPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1472,7 +1483,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { pvclaims = append(pvclaims, pvclaim) ginkgo.By("Expect the pvc to provision volume successfully") - pv, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + pv, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := getVolumeIDFromSupervisorCluster(pv[0].Spec.CSI.VolumeHandle) gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) @@ -1644,7 +1655,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create statefulset with parallel pod management policy with replica 3") createResourceQuota(client, namespace, rqLimit, zonalPolicy) scParameters[svStorageClassName] = zonalWffcPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1757,7 +1768,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create statefulset with parallel pod management policy with replica 3") createResourceQuota(client, namespace, rqLimit, zonalPolicy) scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1868,7 +1879,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create statefulset with parallel pod management policy with replica 3") createResourceQuota(client, namespace, rqLimit, zonalWffcPolicy) scParameters[svStorageClassName] = zonalWffcPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -2096,13 +2107,13 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create 10 PVCs with with zonal SC") createResourceQuota(client, namespace, rqLimit, zonalPolicy) scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, volumeOpsScale, nil) - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, + _, err = WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2297,13 +2308,13 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create 10 PVCs with with zonal SC") createResourceQuota(client, namespace, rqLimit, zonalPolicy) scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, volumeOpsScale, nil) - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, + _, err = WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2434,7 +2445,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Creating Pvc with Immediate topology storageclass") createResourceQuota(client, namespace, rqLimit, zonalPolicy) scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -2445,7 +2456,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for SV PVC to come to bound state") - svcPv, err := fpv.WaitForPVClaimBoundPhase(ctx, svClient, []*v1.PersistentVolumeClaim{svPvclaim}, + svcPv, err := WaitForPVClaimBoundPhase(ctx, svClient, []*v1.PersistentVolumeClaim{svPvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2530,7 +2541,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for GC PVC to come to bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2719,7 +2730,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { createResourceQuota(client, namespace, rqLimit, zonalPolicy) scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -2847,13 +2858,13 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create 10 PVCs with with zonal SC") createResourceQuota(client, namespace, rqLimit, zonalPolicy) scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, volumeOpsScale, nil) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3026,7 +3037,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create statefulset with parallel pod management policy with replica 3") createResourceQuota(client, namespace, rqLimit, zonalWffcPolicy) scParameters[svStorageClassName] = zonalWffcPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -3164,13 +3175,13 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create 3 statefulsets with parallel pod management policy with replica 3") createResourceQuota(client, namespace, rqLimit, zonalWffcPolicy) scParameters[svStorageClassName] = zonalWffcPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } createResourceQuota(client, namespace, rqLimit, zonalPolicy) scParameters[svStorageClassName] = zonalPolicy - storageclassImmediate, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclassImmediate, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -3217,7 +3228,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { } ginkgo.By("Expect all pvcs to provision volume successfully") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + _, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) labelsMap := make(map[string]string) @@ -3268,7 +3279,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { }() framework.Logf("After the VC reboot, Wait for all the PVC's to reach bound state") - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + _, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("After the VC reboot, Verify all the pre-created deployment pod's, its status and metadata") @@ -3317,7 +3328,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { } ginkgo.By("Wait for GC PVC to come to bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3411,7 +3422,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Creating Pvc with Immediate topology storageclass") createResourceQuota(client, namespace, rqLimit, zonalPolicy) scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } diff --git a/tests/e2e/tkgs_ha_utils.go b/tests/e2e/tkgs_ha_utils.go index ba0ffae688..6d0ad4ffb0 100644 --- a/tests/e2e/tkgs_ha_utils.go +++ b/tests/e2e/tkgs_ha_utils.go @@ -330,6 +330,7 @@ func verifyOnlineVolumeExpansionOnGc(client clientset.Interface, namespace strin func verifyOfflineVolumeExpansionOnGc(ctx context.Context, client clientset.Interface, pvclaim *v1.PersistentVolumeClaim, svcPVCName string, namespace string, volHandle string, pod *v1.Pod, pv *v1.PersistentVolume, f *framework.Framework) { + var err error ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") originalFsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/util.go b/tests/e2e/util.go index eabf3f4250..e8709f8421 100644 --- a/tests/e2e/util.go +++ b/tests/e2e/util.go @@ -91,6 +91,11 @@ import ( cnsvolumemetadatav1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsvolumemetadata/v1alpha1" storagepolicyv1alpha2 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/storagepolicy/v1alpha2" k8s "sigs.k8s.io/vsphere-csi-driver/v3/pkg/kubernetes" + + authenticationv1 "k8s.io/api/authentication/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/client-go/kubernetes" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) var ( @@ -585,11 +590,13 @@ func getVSphereStorageClassSpec(scName string, scParameters map[string]string, // getPvFromClaim returns PersistentVolume for requested claim. func getPvFromClaim(client clientset.Interface, namespace string, claimName string) *v1.PersistentVolume { + + adminClient, client := initializeClusterClientsByUserRoles(client) ctx, cancel := context.WithCancel(context.Background()) defer cancel() pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, claimName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - pv, err := client.CoreV1().PersistentVolumes().Get(ctx, pvclaim.Spec.VolumeName, metav1.GetOptions{}) + pv, err := adminClient.CoreV1().PersistentVolumes().Get(ctx, pvclaim.Spec.VolumeName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) return pv } @@ -825,11 +832,12 @@ func createStorageClass(client clientset.Interface, scParameters map[string]stri allowVolumeExpansion bool, scName string) (*storagev1.StorageClass, error) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass + var err error + adminClient, client := initializeClusterClientsByUserRoles(client) + var storageclass *storagev1.StorageClass isStorageClassPresent := false p := map[string]string{} - if scParameters == nil && os.Getenv(envHciMountRemoteDs) == "true" { p[scParamStoragePolicyName] = os.Getenv(envStoragePolicyNameForHCIRemoteDatastores) scParameters = p @@ -839,7 +847,7 @@ func createStorageClass(client clientset.Interface, scParameters map[string]stri scName, scParameters, allowedTopologies, scReclaimPolicy, allowVolumeExpansion)) if supervisorCluster { - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -850,7 +858,7 @@ func createStorageClass(client clientset.Interface, scParameters map[string]stri } if !isStorageClassPresent { - storageclass, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(scName, + storageclass, err = adminClient.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(scName, scParameters, allowedTopologies, scReclaimPolicy, bindingMode, allowVolumeExpansion), metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) } @@ -2536,7 +2544,10 @@ func getVolumeIDFromSupervisorCluster(pvcName string) string { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } svNamespace := GetAndExpectStringEnvVar(envSupervisorClusterNamespace) - svcPV := getPvFromClaim(svcClient, svNamespace, pvcName) + svcPvclaim, err := svcClient.CoreV1().PersistentVolumeClaims(svNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + svcPV, err := svcClient.CoreV1().PersistentVolumes().Get(context.TODO(), svcPvclaim.Spec.VolumeName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volumeHandle := svcPV.Spec.CSI.VolumeHandle ginkgo.By(fmt.Sprintf("Found volume in Supervisor cluster with VolumeID: %s", volumeHandle)) @@ -2552,7 +2563,10 @@ func getPvFromSupervisorCluster(pvcName string) *v1.PersistentVolume { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } svNamespace := GetAndExpectStringEnvVar(envSupervisorClusterNamespace) - svcPV := getPvFromClaim(svcClient, svNamespace, pvcName) + svcPvclaim, err := svcClient.CoreV1().PersistentVolumeClaims(svNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + svcPV, err := svcClient.CoreV1().PersistentVolumes().Get(context.TODO(), svcPvclaim.Spec.VolumeName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) return svcPV } @@ -5020,8 +5034,9 @@ func verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx context.Cont parallelStatefulSetCreation bool) error { allowedTopologiesMap := createAllowedTopologiesMap(allowedTopologies) var ssPodsBeforeScaleDown *v1.PodList - var err error + var err error + adminClient, client := initializeClusterClientsByUserRoles(client) if parallelStatefulSetCreation { ssPodsBeforeScaleDown = GetListOfPodsInSts(client, statefulset) } else { @@ -5056,7 +5071,7 @@ func verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx context.Cont } // fetch node details - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, client) + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, adminClient) if err != nil { return err } @@ -6035,20 +6050,21 @@ This method is used to delete the CSI Controller Pod func deleteCsiControllerPodWhereLeaderIsRunning(ctx context.Context, client clientset.Interface, csi_controller_pod string) error { ignoreLabels := make(map[string]string) - csiPods, err := fpod.GetPodsInNamespace(ctx, client, csiSystemNamespace, ignoreLabels) + adminClient, client := initializeClusterClientsByUserRoles(client) + csiPods, err := fpod.GetPodsInNamespace(ctx, adminClient, csiSystemNamespace, ignoreLabels) gomega.Expect(err).NotTo(gomega.HaveOccurred()) num_csi_pods := len(csiPods) // Collecting and dumping csi pod logs before deleting them - collectPodLogs(ctx, client, csiSystemNamespace) + collectPodLogs(ctx, adminClient, csiSystemNamespace) for _, csiPod := range csiPods { if strings.Contains(csiPod.Name, vSphereCSIControllerPodNamePrefix) && csiPod.Name == csi_controller_pod { framework.Logf("Deleting the pod: %s", csiPod.Name) - err = fpod.DeletePodWithWait(ctx, client, csiPod) + err = fpod.DeletePodWithWait(ctx, adminClient, csiPod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } // wait for csi Pods to be in running ready state - err = fpod.WaitForPodsRunningReady(ctx, client, csiSystemNamespace, int(num_csi_pods), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiSystemNamespace, int(num_csi_pods), time.Duration(pollTimeout)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) return nil @@ -6324,7 +6340,7 @@ func startVCServiceWait4VPs(ctx context.Context, vcAddress string, service strin // assignPolicyToWcpNamespace assigns a set of storage policies to a wcp namespace func assignPolicyToWcpNamespace(client clientset.Interface, ctx context.Context, namespace string, policyNames []string, resourceQuotaLimit string) { - var err error + adminClient, client := initializeClusterClientsByUserRoles(client) sessionId := createVcSession4RestApis(ctx) curlStr := "" policyNamesArrLength := len(policyNames) @@ -6360,11 +6376,11 @@ func assignPolicyToWcpNamespace(client clientset.Interface, ctx context.Context, gomega.Expect(err).NotTo(gomega.HaveOccurred(), "couldn't execute command: %v due to err %v", curlCmd, err) } - gomega.Expect(result.Stdout).To(gomega.Equal("204")) + gomega.Expect(result.Stdout).To(gomega.Equal(status_code_success)) // wait for sc to get created in SVC for _, policyName := range policyNames { - err = waitForScToGetCreated(client, ctx, policyName) + err = waitForScToGetCreated(adminClient, ctx, policyName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -7990,6 +8006,7 @@ func convertGiStrToMibInt64(size resource.Quantity) int64 { func staticProvisioningPreSetUpUtil(ctx context.Context, f *framework.Framework, c clientset.Interface, storagePolicyName string) (*rest.Config, *storagev1.StorageClass, string) { namespace := getNamespaceToRunTests(f) + adminClient, _ := initializeClusterClientsByUserRoles(c) // Get a config to talk to the apiserver k8senv := GetAndExpectStringEnvVar("KUBECONFIG") restConfig, err := clientcmd.BuildConfigFromFlags("", k8senv) @@ -8001,7 +8018,7 @@ func staticProvisioningPreSetUpUtil(ctx context.Context, f *framework.Framework, scParameters["storagePolicyID"] = profileID if !supervisorCluster { - err = c.StorageV1().StorageClasses().Delete(ctx, storagePolicyName, metav1.DeleteOptions{}) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storagePolicyName, metav1.DeleteOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -8010,11 +8027,11 @@ func staticProvisioningPreSetUpUtil(ctx context.Context, f *framework.Framework, storageclass, err := createStorageClass(c, scParameters, nil, "", "", true, storagePolicyName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("storageclass Name: %s", storageclass.GetName())) - storageclass, err = c.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("create resource quota") - createResourceQuota(c, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) return restConfig, storageclass, profileID } @@ -8105,7 +8122,7 @@ It constructs an API request and sends it to the vSphere REST API. func createtWcpNsWithZonesAndPolicies( vcRestSessionId string, storagePolicyId []string, supervisorId string, zoneNames []string, - vmClass string, contentLibId string) (string, int, error) { + vmClass string, contentLibId string, userName string) (string, int, error) { r := rand.New(rand.NewSource(time.Now().Unix())) namespace := fmt.Sprintf("csi-vmsvcns-%v", r.Intn(10000)) @@ -8138,6 +8155,15 @@ func createtWcpNsWithZonesAndPolicies( } } + if userName != "" { + requestBody["access_list"] = map[string]interface{}{ + "domain": "vsphere.local", + "role": "OWNER", + "subject": userName, + "subject_type": "USER", + } + } + reqBodyBytes, err := json.Marshal(requestBody) if err != nil { return "", 500, fmt.Errorf("error marshalling request body: %w", err) @@ -8164,3 +8190,183 @@ func genrateRandomString(length int) (string, error) { generatedString = fmt.Sprintf("%x", b)[2 : length+2] return generatedString, err } + +// WaitForPVClaimBoundPhase waits until all pvcs phase set to bound +func WaitForPVClaimBoundPhase(ctx context.Context, client clientset.Interface, + pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) ([]*v1.PersistentVolume, error) { + persistentvolumes := make([]*v1.PersistentVolume, len(pvclaims)) + + adminClient, client := initializeClusterClientsByUserRoles(client) + for index, claim := range pvclaims { + err := fpv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, timeout) + if err != nil { + return persistentvolumes, err + } + // Get new copy of the claim + claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{}) + if err != nil { + return persistentvolumes, fmt.Errorf("PVC Get API error: %w", err) + } + // Get the bounded PV + persistentvolumes[index], err = adminClient.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{}) + if err != nil { + return persistentvolumes, fmt.Errorf("PV Get API error: %w", err) + } + } + return persistentvolumes, nil +} + +// createScopedClient generates a kubernetes-client by constructing kubeconfig by +// creating a user with minimal permissions and enable port forwarding +func createScopedClient(ctx context.Context, client clientset.Interface, ns string, saName string) (clientset.Interface, error) { + + roleName := ns + "role" + roleBindingName := roleName + "-binding" + contextName := "e2e-context" + + _, err := client.CoreV1().ServiceAccounts(ns).Create(ctx, &v1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: saName, + }, + }, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to create SA: %v", err) + } + + // 2. Create Role + _, err = client.RbacV1().Roles(ns).Create(ctx, &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods", "persistentvolumeclaims", "services"}, + Verbs: []string{"get", "watch", "list", "delete", "create", "update"}, + }, + { + APIGroups: []string{"apps"}, + Resources: []string{"statefulsets", "deployments", "replicasets"}, + Verbs: []string{"get", "watch", "list", "delete", "create", "update"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"events"}, + Verbs: []string{"get", "list"}, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to create Role: %v", err) + } + + time.Sleep(5 * time.Second) + + _, err = client.RbacV1().RoleBindings(ns).Create(ctx, &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleBindingName, + }, + Subjects: []rbacv1.Subject{ + { + Kind: serviceAccountKeyword, + Name: saName, + Namespace: ns, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: roleKeyword, + Name: roleName, + APIGroup: rbacApiGroup, + }, + }, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to create RoleBinding: %v", err) + } + + var token string + + tr := &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + Audiences: []string{audienceForSvcAccountName}, + }, + } + + tokenRequest, err := client.CoreV1().ServiceAccounts(ns).CreateToken(ctx, saName, tr, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get token: %v", err) + } + token = tokenRequest.Status.Token + framework.Logf("token:%s", token) + + if token == "" { + return nil, fmt.Errorf("no token found for service account") + } + framework.Logf("token:%s", token) + + time.Sleep(60 * time.Second) + localPort := GetAndExpectStringEnvVar("RANDOM_PORT") + framework.Logf("Random port: %s", localPort) + kubeConfig := clientcmdapi.Config{ + Clusters: map[string]*clientcmdapi.Cluster{ + "e2e-cluster": { + Server: fmt.Sprintf("https://127.0.0.1:%s", localPort), + InsecureSkipTLSVerify: true, + }, + }, + Contexts: map[string]*clientcmdapi.Context{ + contextName: { + Cluster: "e2e-cluster", + AuthInfo: "e2e-user", + Namespace: ns, + }, + }, + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + "e2e-user": { + Token: token, + }, + }, + CurrentContext: contextName, + } + + b, _ := json.MarshalIndent(kubeConfig, "", " ") + fmt.Println(string(b)) + restCfg, err := clientcmd.NewNonInteractiveClientConfig(kubeConfig, contextName, &clientcmd.ConfigOverrides{}, nil).ClientConfig() + if err != nil { + return nil, fmt.Errorf("failed to build rest.Config: %v", err) + } + + framework.Logf("API Server:%v", restCfg.Host) + framework.Logf("Bearer Token:%v", restCfg.BearerToken) // don't print full token + framework.Logf("CA Data Present:%v", len(restCfg.TLSClientConfig.CAData)) + framework.Logf("Insecure Skip TLS Verify:%v", restCfg.TLSClientConfig.Insecure) + nsScopedClient, err := kubernetes.NewForConfig(restCfg) + if err != nil { + return nil, fmt.Errorf("failed to create Clientset: %v", err) + } + return nsScopedClient, nil +} + +func initializeClusterClientsByUserRoles(client clientset.Interface) (clientset.Interface, clientset.Interface) { + var adminClient clientset.Interface + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + if supervisorCluster || guestCluster { + if runningAsDevopsUser == "yes" { + if svAdminK8sEnv := GetAndExpectStringEnvVar("ADMIN_KUBECONFIG"); svAdminK8sEnv != "" { + adminClient, err = createKubernetesClientFromConfig(svAdminK8sEnv) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if supervisorCluster { + if devopsK8sEnv := GetAndExpectStringEnvVar("DEVOPS_KUBE_CONFIG"); devopsK8sEnv != "" { + client, err = createKubernetesClientFromConfig(devopsK8sEnv) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + } else { + adminClient = client + } + } else if vanillaCluster || adminClient == nil { + adminClient = client + } + return adminClient, client +} diff --git a/tests/e2e/vc_reboot_volume_lifecycle.go b/tests/e2e/vc_reboot_volume_lifecycle.go index 94b5f94667..81578a7f85 100644 --- a/tests/e2e/vc_reboot_volume_lifecycle.go +++ b/tests/e2e/vc_reboot_volume_lifecycle.go @@ -44,13 +44,25 @@ var _ bool = ginkgo.Describe("Verify volume life_cycle operations works fine aft storagePolicyName string scParameters map[string]string isVcRebooted bool + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet namespace = getNamespaceToRunTests(f) bootstrap() + ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -130,21 +142,21 @@ var _ bool = ginkgo.Describe("Verify volume life_cycle operations works fine aft } else { ginkgo.By("CNS_TEST: Running for GC setup") scParameters[svStorageClassName] = storagePolicyName - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", false, "") } gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - if !supervisorCluster || !stretchedSVC { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + if vanillaCluster || guestCluster { + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() ginkgo.By("Waiting for claim to be in bound phase") - pvc, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvc, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvc).NotTo(gomega.BeEmpty()) diff --git a/tests/e2e/vcp_to_csi_attach_detach.go b/tests/e2e/vcp_to_csi_attach_detach.go index 2f944ac2cb..dbb2cd35ce 100644 --- a/tests/e2e/vcp_to_csi_attach_detach.go +++ b/tests/e2e/vcp_to_csi_attach_detach.go @@ -1085,6 +1085,7 @@ func verifyVolMountsInPods(ctx context.Context, client clientset.Interface, pods var exists bool var vmUUID string + for i, pod := range pods { // Waiting for pod to be running. err := fpod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, pod.Namespace) diff --git a/tests/e2e/vm_service_vsan_stretch_cluster.go b/tests/e2e/vm_service_vsan_stretch_cluster.go index ecb0b5f6c6..bdba7c1547 100644 --- a/tests/e2e/vm_service_vsan_stretch_cluster.go +++ b/tests/e2e/vm_service_vsan_stretch_cluster.go @@ -65,6 +65,8 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests isVsanHealthServiceStopped bool isSPSserviceStopped bool nodeList *v1.NodeList + adminClient clientset.Interface + userName string ) ginkgo.BeforeEach(func() { @@ -73,6 +75,8 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests client = f.ClientSet var err error + adminClient, client = initializeClusterClientsByUserRoles(client) + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -106,7 +110,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests vmClass = vmClassBestEffortSmall } namespace = createTestWcpNs( - vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId)) + vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId), userName) time.Sleep(5 * time.Minute) @@ -141,7 +145,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests } dumpSvcNsEventsOnTestFailure(client, namespace) delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }) /* @@ -170,14 +174,14 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests ginkgo.By("Creating StorageClass") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create multiple PVCs") pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -219,7 +223,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests "and waits for VM IP to come up to come up and verify PVCs are accessible in the VM") createVMServiceandWaitForVMtoGetIP(ctx, vmopC, cnsopC, namespace, vms, pvclaimsList, true, true) - csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) + csipods, err := adminClient.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Bring down the primary site") @@ -245,12 +249,12 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests time.Sleep(5 * time.Minute) // Check if csi pods are running fine after site failure ginkgo.By("Check if csi pods are running fine after site failure") - err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiNs, len(csipods.Items), time.Duration(pollTimeout*2)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, vm := range vms { @@ -299,14 +303,14 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests ginkgo.By("Get StorageClass for volume creation") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create multiple PVCs") pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -348,7 +352,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests "and waits for VM IP to come up to come up and verify PVCs are accessible in the VM") createVMServiceandWaitForVMtoGetIP(ctx, vmopC, cnsopC, namespace, vms, pvclaimsList, true, true) - csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) + csipods, err := adminClient.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Bring down the secondary site") @@ -374,12 +378,12 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests time.Sleep(5 * time.Minute) // Check if csi pods are running fine after site failure ginkgo.By("Check if csi pods are running fine after site failure") - err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiNs, len(csipods.Items), time.Duration(pollTimeout*2)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, vm := range vms { @@ -427,14 +431,14 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests var vms []*vmopv1.VirtualMachine ginkgo.By("Creating StorageClass") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create multiple PVCs") pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -458,7 +462,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) + csipods, err := adminClient.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ch := make(chan *vmopv1.VirtualMachine) @@ -507,12 +511,12 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests // Check if csi pods are running fine after site failure ginkgo.By("Check if csi pods are running fine after site failure") - err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiNs, len(csipods.Items), time.Duration(pollTimeout*2)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Creates a loadbalancing service for ssh with each VM" + @@ -562,14 +566,14 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests ginkgo.By("Creating StorageClass") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create multiple PVCs") pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -593,7 +597,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) + csipods, err := adminClient.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Creating VM") @@ -644,12 +648,12 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests time.Sleep(5 * time.Minute) // Check if csi pods are running fine after site failure ginkgo.By("Check if csi pods are running fine after site failure") - err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiNs, len(csipods.Items), time.Duration(pollTimeout*2)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify all the VMservice vms created before " + @@ -697,14 +701,14 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests var vms []*vmopv1.VirtualMachine ginkgo.By("Creating StorageClass") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create multiple PVCs") pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, 10, nil) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -728,7 +732,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) + csipods, err := adminClient.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ch := make(chan *vmopv1.VirtualMachine) @@ -757,12 +761,12 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests time.Sleep(5 * time.Minute) ginkgo.By("Check if csi pods are running fine after site recovery") - err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiNs, len(csipods.Items), time.Duration(pollTimeout*2)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Creates a loadbalancing service for ssh with each VM" + @@ -805,10 +809,10 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests var err error ginkgo.By("Creating StorageClass") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) + csipods, err := adminClient.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for k8s cluster to be healthy") @@ -831,7 +835,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests // Check if csi pods are running fine after site failure ginkgo.By("Check if csi pods are running fine after site failure") - err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiNs, len(csipods.Items), time.Duration(pollTimeout*2)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -839,7 +843,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -929,14 +933,14 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests ginkgo.By("Creating StorageClass") - sc, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create multiple PVCs") pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -978,7 +982,7 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests "and waits for VM IP to come up to come up and verify PVCs are accessible in the VM") createVMServiceandWaitForVMtoGetIP(ctx, vmopC, cnsopC, namespace, vms, pvclaimsList, true, true) - csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) + csipods, err := adminClient.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Cause a network failure on primary site @@ -1000,12 +1004,12 @@ var _ bool = ginkgo.Describe("[vsan-stretch-vmsvc] vm service with csi vol tests // Check if csi pods are running fine after site failure ginkgo.By("Check if csi pods are running fine after site failure") - err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), + err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiNs, len(csipods.Items), time.Duration(pollTimeout*2)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) + pvs, err = WaitForPVClaimBoundPhase(ctx, client, pvclaimsList, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, vm := range vms { diff --git a/tests/e2e/vmservice_late_binding.go b/tests/e2e/vmservice_late_binding.go index 66dbea32ae..7298826bc6 100644 --- a/tests/e2e/vmservice_late_binding.go +++ b/tests/e2e/vmservice_late_binding.go @@ -101,7 +101,7 @@ var _ bool = ginkgo.Describe("[vmsvc] VM-Service-VM-LateBinding", func() { // Create SVC namespace and assign storage policy and vmContent Library namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{storageProfileId}, getSvcId(vcRestSessionId), - nil, vmClass, contentLibId) + nil, vmClass, contentLibId, devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) diff --git a/tests/e2e/vmservice_utils.go b/tests/e2e/vmservice_utils.go index 0c62b585a8..bf3cf7775f 100644 --- a/tests/e2e/vmservice_utils.go +++ b/tests/e2e/vmservice_utils.go @@ -68,7 +68,7 @@ const vmServiceVmLabelKey = "topology.kubernetes.io/zone" // createTestWcpNs create a wcp namespace with given storage policy, vm class and content lib via REST API func createTestWcpNs( vcRestSessionId string, storagePolicyId string, vmClass string, contentLibId string, - supervisorId string) string { + supervisorId string, userName string) string { vcIp := e2eVSphere.Config.Global.VCenterHostname r := rand.New(rand.NewSource(time.Now().Unix())) @@ -76,6 +76,12 @@ func createTestWcpNs( namespace := fmt.Sprintf("csi-vmsvcns-%v", r.Intn(10000)) nsCreationUrl := "https://" + vcIp + "/api/vcenter/namespaces/instances/v2" reqBody := fmt.Sprintf(`{ + "access_list": [ { + "domain": "vsphere.local", + "role": "OWNER", + "subject": "%s", + "subject_type": "USER" + } ], "namespace": "%s", "storage_specs": [ { "policy": "%s" @@ -89,7 +95,7 @@ func createTestWcpNs( ] }, "supervisor": "%s" - }`, namespace, storagePolicyId, vmClass, contentLibId, supervisorId) + }`, userName, namespace, storagePolicyId, vmClass, contentLibId, supervisorId) fmt.Println(reqBody) diff --git a/tests/e2e/vmservice_vm.go b/tests/e2e/vmservice_vm.go index 24c56602d6..07a899dd39 100644 --- a/tests/e2e/vmservice_vm.go +++ b/tests/e2e/vmservice_vm.go @@ -66,6 +66,8 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { isSPSserviceStopped bool isQuotaValidationSupported bool defaultDatastore *object.Datastore + adminClient clientset.Interface + userName string ) ginkgo.BeforeEach(func() { @@ -73,6 +75,8 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { defer cancel() client = f.ClientSet var err error + + adminClient, client = initializeClusterClientsByUserRoles(client) topologyFeature := os.Getenv(topologyFeature) if topologyFeature != topologyTkgHaName { nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) @@ -109,7 +113,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { framework.Logf("Create a WCP namespace for the test") namespace = createTestWcpNs( - vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId)) + vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId), userName) ginkgo.By("Verifying storage policies usage for each storage class") restConfig = getRestConfigClient() @@ -173,7 +177,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { } dumpSvcNsEventsOnTestFailure(client, namespace) delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, adminClient, namespace, poll, pollTimeout)).To(gomega.Succeed()) }) /* @@ -243,13 +247,13 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { verifyBidirectionalReferenceOfPVandPVC(ctx, client, staticPvc, staticPv, fcdID) ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a PVC") pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, staticPvc}, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, staticPvc}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := pvs[0] volHandle := pv.Spec.CSI.VolumeHandle @@ -334,13 +338,13 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a PVC") pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := pvs[0] volHandle := pv.Spec.CSI.VolumeHandle @@ -526,7 +530,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a PVC") pvc1, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") @@ -535,7 +539,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvcs := []*v1.PersistentVolumeClaim{pvc1, pvc2} ginkgo.By("Waiting for all claims to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -730,7 +734,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { framework.Logf("annotationVal :%s, pvcAnnotations: %v", annotationVal, pvcAnnotations) ginkgo.By("Creating Pvc with Immediate topology storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvcSpec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", storageclass, nil, "") pvcSpec.Annotations = pvcAnnotations @@ -738,7 +742,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for SV PVC to come to bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volumeID := pvs[0].Spec.CSI.VolumeHandle @@ -845,7 +849,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { framework.Logf("annotationVal :%s, pvcAnnotations: %v", annotationVal, pvcAnnotations) ginkgo.By("Create a PVC say pvc1 under zone2") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvcSpec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", storageclass, nil, "") pvcSpec.Annotations = pvcAnnotations @@ -853,7 +857,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Wait for PVC to come to bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volumeID := pvs[0].Spec.CSI.VolumeHandle @@ -963,13 +967,13 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a PVC") pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := pvs[0] volHandle := pv.Spec.CSI.VolumeHandle @@ -1102,7 +1106,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { vmlbsvcs := []*vmopv1.VirtualMachineService{} ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a PVC") for i := 0; i < 3; i++ { @@ -1112,7 +1116,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { } ginkgo.By("Waiting for all claims to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, pvcs, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -1297,13 +1301,13 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { defer cancel() ginkgo.By("Create a storageclass") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a PVC") pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := pvs[0] volHandle := pv.Spec.CSI.VolumeHandle diff --git a/tests/e2e/volume_health_test.go b/tests/e2e/volume_health_test.go index 82275ced50..d1803ee454 100644 --- a/tests/e2e/volume_health_test.go +++ b/tests/e2e/volume_health_test.go @@ -57,6 +57,7 @@ var _ = ginkgo.Describe("Volume health check", func() { isVsanHealthServiceStopped bool isSPSServiceStopped bool csiNamespace string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -65,6 +66,17 @@ var _ = ginkgo.Describe("Volume health check", func() { defer cancel() client = f.ClientSet namespace = getNamespaceToRunTests(f) + var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) csiNamespace = GetAndExpectStringEnvVar(envCSINamespace) @@ -161,7 +173,7 @@ var _ = ginkgo.Describe("Volume health check", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -173,7 +185,7 @@ var _ = ginkgo.Describe("Volume health check", func() { pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle svPVCName := volHandle @@ -254,7 +266,7 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(nonShareadstoragePolicyName) scParameters[scParamStoragePolicyID] = profileID - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, nonShareadstoragePolicyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, nonShareadstoragePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } else { @@ -279,7 +291,7 @@ var _ = ginkgo.Describe("Volume health check", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -334,7 +346,7 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", true, "", storagePolicyName) } else if guestCluster { @@ -347,7 +359,7 @@ var _ = ginkgo.Describe("Volume health check", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -359,7 +371,7 @@ var _ = ginkgo.Describe("Volume health check", func() { pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle svPVCName := volHandle @@ -476,7 +488,7 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", true, "", storagePolicyName) } else if guestCluster { @@ -489,7 +501,7 @@ var _ = ginkgo.Describe("Volume health check", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -501,7 +513,7 @@ var _ = ginkgo.Describe("Volume health check", func() { pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle svPVCName := volHandle @@ -618,7 +630,7 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", true, "", storagePolicyName) } else if guestCluster { @@ -631,7 +643,7 @@ var _ = ginkgo.Describe("Volume health check", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -643,7 +655,7 @@ var _ = ginkgo.Describe("Volume health check", func() { pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle svPVCName := volHandle @@ -751,7 +763,7 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", true, "", storagePolicyName) } else if guestCluster { @@ -764,7 +776,7 @@ var _ = ginkgo.Describe("Volume health check", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -776,7 +788,7 @@ var _ = ginkgo.Describe("Volume health check", func() { pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle if guestCluster { @@ -857,13 +869,8 @@ var _ = ginkgo.Describe("Volume health check", func() { defer cancel() ginkgo.By("Creating StorageClass for Statefulset") // Decide which test setup is available to run. - if supervisorCluster { - ginkgo.By("CNS_TEST: Running for WCP setup") - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) - scParameters[scParamStoragePolicyID] = profileID - } - sc, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -886,7 +893,7 @@ var _ = ginkgo.Describe("Volume health check", func() { defer func() { ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() replicas := *(statefulset.Spec.Replicas) @@ -956,7 +963,7 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", true, "", storagePolicyName) } @@ -976,7 +983,7 @@ var _ = ginkgo.Describe("Volume health check", func() { pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle @@ -984,7 +991,7 @@ var _ = ginkgo.Describe("Volume health check", func() { ginkgo.By(fmt.Sprintf("Sleeping for %v minutes to allow volume health check to be triggered", healthStatusWaitTime)) time.Sleep(healthStatusWaitTime) - pv, err := client.CoreV1().PersistentVolumes().Get(ctx, persistentvolumes[0].Name, metav1.GetOptions{}) + pv, err := adminClient.CoreV1().PersistentVolumes().Get(ctx, persistentvolumes[0].Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for describe := range pv.Annotations { @@ -1038,7 +1045,7 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", true, "", storagePolicyName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1057,7 +1064,7 @@ var _ = ginkgo.Describe("Volume health check", func() { pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) @@ -1129,7 +1136,7 @@ var _ = ginkgo.Describe("Volume health check", func() { sc, err := createStorageClass(client, scParameters, nil, "", "", false, "nginx-sc") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -1199,7 +1206,7 @@ var _ = ginkgo.Describe("Volume health check", func() { } defer func() { ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() }) @@ -1237,12 +1244,12 @@ var _ = ginkgo.Describe("Volume health check", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -1367,7 +1374,7 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) sc, pvc, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", true, "", storagePolicyName) } else if guestCluster { @@ -1379,13 +1386,13 @@ var _ = ginkgo.Describe("Volume health check", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -1494,7 +1501,7 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(raid0StoragePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, raid0StoragePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, raid0StoragePolicyName) sc, pvc, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", false, "", raid0StoragePolicyName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1507,7 +1514,7 @@ var _ = ginkgo.Describe("Volume health check", func() { }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -1641,7 +1648,7 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(raid0StoragePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, raid0StoragePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, raid0StoragePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", false, "", raid0StoragePolicyName) } else if guestCluster { @@ -1654,14 +1661,14 @@ var _ = ginkgo.Describe("Volume health check", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() ginkgo.By("Expect claim to provision volume successfully") pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle svPVCName := volHandle @@ -1810,7 +1817,7 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(raid0StoragePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, raid0StoragePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, raid0StoragePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", false, "", raid0StoragePolicyName) @@ -1825,7 +1832,7 @@ var _ = ginkgo.Describe("Volume health check", func() { ginkgo.By("Expect claim to provision volume successfully") pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) @@ -1924,7 +1931,7 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(raid0StoragePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, raid0StoragePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, raid0StoragePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", false, "", raid0StoragePolicyName) } else if guestCluster { @@ -1937,14 +1944,14 @@ var _ = ginkgo.Describe("Volume health check", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() ginkgo.By("Expect claim to provision volume successfully") pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle svPVCName := volHandle @@ -2098,12 +2105,12 @@ var _ = ginkgo.Describe("Volume health check", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + pvs, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvs).NotTo(gomega.BeEmpty()) @@ -2242,14 +2249,12 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(raid0StoragePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, defaultNginxStorageClassName) - scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, scParameters, nil, "", "", false) - sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, raid0StoragePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -2262,7 +2267,7 @@ var _ = ginkgo.Describe("Volume health check", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -2367,7 +2372,7 @@ var _ = ginkgo.Describe("Volume health check", func() { err := waitForHostToBeUp(hostIP) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By(fmt.Sprintf("Sleeping for %v to allow volume health check to be triggered", svOperationTimeout)) @@ -2479,7 +2484,7 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(raid0StoragePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, raid0StoragePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, raid0StoragePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", false, "", raid0StoragePolicyName) } else if guestCluster { @@ -2492,14 +2497,14 @@ var _ = ginkgo.Describe("Volume health check", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() ginkgo.By("Expect claim to provision volume successfully") pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle svPVCName := volHandle @@ -2650,7 +2655,7 @@ var _ = ginkgo.Describe("Volume health check", func() { pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle @@ -2717,7 +2722,7 @@ var _ = ginkgo.Describe("Volume health check", func() { var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle @@ -2782,8 +2787,6 @@ var _ = ginkgo.Describe("Volume health check", func() { ginkgo.By("CNS_TEST: Running for WCP setup") profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID - // Create resource quota. - createResourceQuota(client, namespace, rqLimit, storagePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", true, "", storagePolicyName) } else if guestCluster { @@ -2796,7 +2799,7 @@ var _ = ginkgo.Describe("Volume health check", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -2808,7 +2811,7 @@ var _ = ginkgo.Describe("Volume health check", func() { pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle svcPVCName = volHandle @@ -2886,13 +2889,13 @@ var _ = ginkgo.Describe("Volume health check", func() { profileID := e2eVSphere.GetSpbmPolicyID(raid0StoragePolicyName) scParameters[scParamStoragePolicyID] = profileID // Create resource quota. - createResourceQuota(client, namespace, rqLimit, defaultNginxStorageClassName) + createResourceQuota(adminClient, namespace, rqLimit, defaultNginxStorageClassName) scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, scParameters, nil, "", "", false) sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() } @@ -2903,7 +2906,7 @@ var _ = ginkgo.Describe("Volume health check", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() } @@ -3006,7 +3009,7 @@ var _ = ginkgo.Describe("Volume health check", func() { err := waitForHostToBeUp(hostIP) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By(fmt.Sprintf("Sleeping for %v to allow volume health check to be triggered", svOperationTimeout)) diff --git a/tests/e2e/vsan_max_podvm_wldi.go b/tests/e2e/vsan_max_podvm_wldi.go index 9c4b6c3635..0fed41aeb9 100644 --- a/tests/e2e/vsan_max_podvm_wldi.go +++ b/tests/e2e/vsan_max_podvm_wldi.go @@ -59,6 +59,7 @@ var _ bool = ginkgo.Describe("[podvm-domain-isolation-vsan-max] PodVM-WLDI-Vsan- uncordon bool filteredNodes *v1.NodeList dh drain.Helper + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -69,6 +70,9 @@ var _ bool = ginkgo.Describe("[podvm-domain-isolation-vsan-max] PodVM-WLDI-Vsan- client = f.ClientSet bootstrap() + var err error + adminClient, client = initializeClusterClientsByUserRoles(client) + // reading vc session id if vcRestSessionId == "" { vcRestSessionId = createVcSession4RestApis(ctx) @@ -100,7 +104,7 @@ var _ bool = ginkgo.Describe("[podvm-domain-isolation-vsan-max] PodVM-WLDI-Vsan- dh = drain.Helper{ Ctx: ctx, - Client: client, + Client: adminClient, Force: true, IgnoreAllDaemonSets: true, Out: ginkgo.GinkgoWriter, @@ -172,7 +176,7 @@ var _ bool = ginkgo.Describe("[podvm-domain-isolation-vsan-max] PodVM-WLDI-Vsan- topValEndIndex) namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, []string{storageProfileId}, getSvcId(vcRestSessionId), - []string{zone1, zone2}, "", "") + []string{zone1, zone2}, "", "", devopsUser) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) defer func() { diff --git a/tests/e2e/vsan_max_tkg_wldi.go b/tests/e2e/vsan_max_tkg_wldi.go index e21a305d92..e56a2a7e1c 100644 --- a/tests/e2e/vsan_max_tkg_wldi.go +++ b/tests/e2e/vsan_max_tkg_wldi.go @@ -191,7 +191,7 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation-vsan-max] TKG-WLDI-Vsan-Max" statefulsetRwo := createCustomisedStatefulSets(ctx, client, namespace, true, replicas, true, allowedTopologies, true, true, "", "", storageclass, storageclass.Name) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By("Verify svc pv affinity, pvc annotation and pod node affinity") @@ -203,7 +203,7 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation-vsan-max] TKG-WLDI-Vsan-Max" statefulsetRwm := createCustomisedStatefulSets(ctx, client, namespace, true, replicas, true, allowedTopologies, true, true, "", v1.ReadWriteMany, storageclass, storageclass.Name) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By("Verify svc pv affinity, pvc annotation and pod node affinity") @@ -317,7 +317,7 @@ var _ bool = ginkgo.Describe("[tkg-domain-isolation-vsan-max] TKG-WLDI-Vsan-Max" statefulsetRwo := createCustomisedStatefulSets(ctx, client, namespace, true, replicas, true, allowedTopologies, true, true, "", "", storageclass, storageclass.Name) defer func() { - fss.DeleteAllStatefulSets(ctx, client, namespace) + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) }() ginkgo.By("Verify svc pv affinity, pvc annotation and pod node affinity") diff --git a/tests/e2e/vsan_stretched_cluster.go b/tests/e2e/vsan_stretched_cluster.go index 58d61096bb..a90e43f8c0 100644 --- a/tests/e2e/vsan_stretched_cluster.go +++ b/tests/e2e/vsan_stretched_cluster.go @@ -82,6 +82,7 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f sc *storagev1.StorageClass accessMode v1.PersistentVolumeAccessMode err error + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -91,6 +92,16 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f var err error ctx, cancel := context.WithCancel(context.Background()) defer cancel() + + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) readVcEsxIpsViaTestbedInfoJson(GetAndExpectStringEnvVar(envTestbedInfoJsonPath)) nimbusGeneratedK8sVmPwd = GetAndExpectStringEnvVar(nimbusK8sVmPwd) @@ -99,7 +110,11 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f isVsanHealthServiceStopped = false initialiseFdsVar(ctx) - err = waitForAllNodes2BeReady(ctx, client) + if !supervisorCluster { + err = waitForAllNodes2BeReady(ctx, client) + } else { + err = waitForAllNodes2BeReady(ctx, adminClient) + } framework.ExpectNoError(err, "cluster not completely healthy") // TODO: verify csi pods are up @@ -115,7 +130,11 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f } } - nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + if !supervisorCluster { + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, client) + } else { + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, adminClient) + } framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") @@ -184,21 +203,20 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f } else { accessMode = v1.ReadWriteOnce } - - if rwxAccessMode { - accessMode = v1.ReadWriteMany - } else { - accessMode = v1.ReadWriteOnce - } }) ginkgo.AfterEach(func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var err error if !guestCluster { wait4AllK8sNodesToBeUp(nodeList) } - err := waitForAllNodes2BeReady(ctx, client) + if !supervisorCluster { + err = waitForAllNodes2BeReady(ctx, client) + } else { + err = waitForAllNodes2BeReady(ctx, adminClient) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace) @@ -207,15 +225,16 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - scs, err := client.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + if isVsanHealthServiceStopped { ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", vsanhealthServiceName)) startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) } if vanillaCluster { + scs, err := client.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } for _, sc := range scs.Items { err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) if !apierrors.IsNotFound(err) { @@ -346,10 +365,14 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f if vanillaCluster { wait4AllK8sNodesToBeUp(nodeList) } - if guestCluster || vanillaCluster { + if vanillaCluster { err = waitForAllNodes2BeReady(ctx, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + if guestCluster { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } time.Sleep(pollTimeout * 2) if guestCluster { @@ -404,7 +427,11 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Wait for k8s cluster to be healthy") // wait for the VMs to move back - err = waitForAllNodes2BeReady(ctx, client) + if vanillaCluster { + err = waitForAllNodes2BeReady(ctx, client) + } else { + + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) if supervisorCluster { @@ -555,10 +582,15 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f if vanillaCluster { wait4AllK8sNodesToBeUp(nodeList) } - if vanillaCluster && guestCluster { + + if vanillaCluster { err = waitForAllNodes2BeReady(ctx, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + if guestCluster { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } time.Sleep(pollTimeout) if guestCluster { @@ -907,10 +939,15 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f if vanillaCluster { wait4AllK8sNodesToBeUp(nodeList) } - if vanillaCluster && guestCluster { + + if vanillaCluster { err = waitForAllNodes2BeReady(ctx, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + if guestCluster { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } time.Sleep(pollTimeout * 2) if guestCluster { @@ -964,8 +1001,13 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f fds.hostsDown = nil } - err = waitForAllNodes2BeReady(ctx, client) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = waitForAllNodes2BeReady(ctx, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }) @@ -1080,10 +1122,14 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f if vanillaCluster { wait4AllK8sNodesToBeUp(nodeList) } - if guestCluster || vanillaCluster { + if vanillaCluster { err = waitForAllNodes2BeReady(ctx, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + if guestCluster { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } time.Sleep(pollTimeout * 2) ginkgo.By("Check if csi pods are running fine after site failure") @@ -1113,8 +1159,13 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Bring up the primary site") siteNetworkFailure(true, true) - err = waitForAllNodes2BeReady(ctx, client) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = waitForAllNodes2BeReady(ctx, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } if supervisorCluster { @@ -1224,10 +1275,14 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f if vanillaCluster { wait4AllK8sNodesToBeUp(nodeList) } - if vanillaCluster || guestCluster { + if vanillaCluster { err = waitForAllNodes2BeReady(ctx, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + if guestCluster { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } time.Sleep(pollTimeout * 2) if guestCluster { @@ -1267,8 +1322,13 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Wait for k8s cluster to be healthy") // wait for the VMs to move back - err = waitForAllNodes2BeReady(ctx, client) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = waitForAllNodes2BeReady(ctx, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }) @@ -1516,10 +1576,14 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f if vanillaCluster { wait4AllK8sNodesToBeUp(nodeList) } - if vanillaCluster || guestCluster { + if vanillaCluster { err = waitForAllNodes2BeReady(ctx, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + if guestCluster { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } time.Sleep(pollTimeout) ginkgo.By("Check if csi pods are running fine after site failure") @@ -2187,11 +2251,13 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f if vanillaCluster { wait4AllK8sNodesToBeUp(nodeList) - } - if vanillaCluster || guestCluster { err = waitForAllNodes2BeReady(ctx, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + if guestCluster { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } time.Sleep(pollTimeout * 2) if guestCluster { @@ -2237,8 +2303,13 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f fds.hostsDown = nil } - err = waitForAllNodes2BeReady(ctx, client) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = waitForAllNodes2BeReady(ctx, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } if supervisorCluster { ginkgo.By("Performing scaledown operation on statefulset when site is down") @@ -2377,11 +2448,13 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Wait for k8s cluster to be healthy") if vanillaCluster { wait4AllK8sNodesToBeUp(nodeList) - } - if vanillaCluster || guestCluster { err = waitForAllNodes2BeReady(ctx, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + if guestCluster { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } time.Sleep(2 * pollTimeout) if guestCluster { ginkgo.By("Check if csi pods are running fine after site failurein supervisor") @@ -2452,8 +2525,13 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Wait for k8s cluster to be healthy") // wait for the VMs to move back - err = waitForAllNodes2BeReady(ctx, client) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = waitForAllNodes2BeReady(ctx, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } ginkgo.By("Verifying volume lifecycle actions works fine") volumeLifecycleActions(ctx, client, namespace, sc, accessMode) @@ -2571,11 +2649,14 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f if vanillaCluster { wait4AllK8sNodesToBeUp(nodeList) - } - if vanillaCluster || guestCluster { + err = waitForAllNodes2BeReady(ctx, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + if guestCluster { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } ginkgo.By("Check if csi pods are running fine after witness failure") err = fpod.WaitForPodsRunningReady(ctx, client, csiNs, len(csipods.Items), @@ -2633,8 +2714,13 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f toggleWitnessPowerState(ctx, false) } - err = waitForAllNodes2BeReady(ctx, client) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = waitForAllNodes2BeReady(ctx, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } ginkgo.By("Verifying volume lifecycle actions works fine") volumeLifecycleActions(ctx, client, namespace, sc, "") @@ -3291,9 +3377,12 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Wait for k8s cluster to be healthy") if vanillaCluster { wait4AllK8sNodesToBeUp(nodeList) + + err = waitForAllNodes2BeReady(ctx, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if vanillaCluster || guestCluster { - err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) + if guestCluster { + err = waitForAllNodes2BeReady(ctx, adminClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -3346,8 +3435,13 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Wait for k8s cluster to be healthy") // wait for the VMs to move back - err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = waitForAllNodes2BeReady(ctx, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }) /* @@ -4396,9 +4490,12 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Wait for k8s cluster to be healthy") if vanillaCluster { wait4AllK8sNodesToBeUp(nodeList) + + err = waitForAllNodes2BeReady(ctx, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if vanillaCluster || guestCluster { - err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) + if guestCluster { + err = waitForAllNodes2BeReady(ctx, adminClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } time.Sleep(pollTimeout * 2) @@ -4464,8 +4561,13 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Wait for k8s cluster to be healthy") // wait for the VMs to move back - err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = waitForAllNodes2BeReady(ctx, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }) @@ -4934,11 +5036,14 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f if vanillaCluster { wait4AllK8sNodesToBeUp(nodeList) - } - if vanillaCluster || guestCluster { + err = waitForAllNodes2BeReady(ctx, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + if guestCluster { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } time.Sleep(pollTimeout * 2) if guestCluster { @@ -4997,8 +5102,13 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f fds.hostsDown = nil } - err = waitForAllNodes2BeReady(ctx, client) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = waitForAllNodes2BeReady(ctx, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + err = waitForAllNodes2BeReady(ctx, adminClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } if supervisorCluster { diff --git a/tests/e2e/vsan_stretched_cluster_utils.go b/tests/e2e/vsan_stretched_cluster_utils.go index 40adc6746c..0933f63d38 100644 --- a/tests/e2e/vsan_stretched_cluster_utils.go +++ b/tests/e2e/vsan_stretched_cluster_utils.go @@ -672,6 +672,7 @@ func createStsDeployment(ctx context.Context, client clientset.Interface, namesp sc *storagev1.StorageClass, isDeploymentRequired bool, modifyStsSpec bool, stsReplica int32, stsName string, depReplicaCount int32, accessMode v1.PersistentVolumeAccessMode) (*appsv1.StatefulSet, *appsv1.Deployment, []string) { + var err error var pvclaims []*v1.PersistentVolumeClaim if accessMode == "" { // If accessMode is not specified, set the default accessMode. @@ -745,7 +746,7 @@ func createStsDeployment(ctx context.Context, client clientset.Interface, namesp pvclaim, err := createPVC(ctx, client, namespace, nil, diskSize, sc, accessMode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) @@ -892,8 +893,9 @@ func volumeLifecycleActions(ctx context.Context, client clientset.Interface, nam // scaleDownStsAndVerifyPodMetadata scales down replica of a statefulset if required // and verifies count of sts replica and if its vSphere volumes match those in CNS cache func scaleDownStsAndVerifyPodMetadata(ctx context.Context, client clientset.Interface, - namespace string, statefulset *appsv1.StatefulSet, ssPodsBeforeScaleDown *v1.PodList, - replicas int32, isScaleDownRequired bool, verifyCnsVolumes bool) { + namespace string, statefulset *appsv1.StatefulSet, + ssPodsBeforeScaleDown *v1.PodList, replicas int32, isScaleDownRequired bool, + verifyCnsVolumes bool) { if isScaleDownRequired { framework.Logf("Scaling down statefulset: %v to number of Replica: %v", statefulset.Name, replicas) diff --git a/tests/e2e/vsphere_shared_datastore.go b/tests/e2e/vsphere_shared_datastore.go index c3fb4af65f..9eacd62981 100644 --- a/tests/e2e/vsphere_shared_datastore.go +++ b/tests/e2e/vsphere_shared_datastore.go @@ -61,17 +61,29 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] "+ sharedDatastoreURL string nonSharedDatastoreURL string storagePolicyName string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet namespace = f.Namespace.Name bootstrap() - scParameters = make(map[string]string) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) - storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) + var err error + var nodeList *v1.NodeList + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + scParameters = make(map[string]string) + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") } @@ -186,15 +198,17 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] "+ framework.Logf("storageclass name :%s", storageclass.GetName()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if supervisorCluster { ginkgo.By("Delete Resource quota") - deleteResourceQuota(client, namespace) + deleteResourceQuota(adminClient, namespace) } }() @@ -202,7 +216,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] "+ var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := persistentvolumes[0] volHandle := pv.Spec.CSI.VolumeHandle @@ -256,7 +270,11 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] "+ // Delete SC with Immediate Binding Mode ginkgo.By("Delete SC created with Immediate Binding Mode") - err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + if vanillaCluster { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + } else { + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Create SC with same name but with WaitForFirstConusmer Binding Mode @@ -270,7 +288,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] "+ /* Cannot Update SC binding mode from WaitForFirstConsumer to Immediate because it is an immutable field */ // If Supervisor Cluster, delete SC and recreate again with Immediate Binding Mode - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) _, err = createStorageClass(client, scParameters, nil, "", storagev1.VolumeBindingImmediate, false, storageclass.Name) diff --git a/tests/e2e/vsphere_volume_disksize.go b/tests/e2e/vsphere_volume_disksize.go index ec40541136..0ea2d6334c 100644 --- a/tests/e2e/vsphere_volume_disksize.go +++ b/tests/e2e/vsphere_volume_disksize.go @@ -52,11 +52,13 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] [csi-supervisor] datastoreURL string pvclaims []*v1.PersistentVolumeClaim storagePolicyName string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { bootstrap() client = f.ClientSet namespace = getNamespaceToRunTests(f) + adminClient, client = initializeClusterClientsByUserRoles(client) scParameters = make(map[string]string) datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) @@ -75,7 +77,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] [csi-supervisor] ginkgo.AfterEach(func() { if supervisorCluster { - deleteResourceQuota(client, namespace) + deleteResourceQuota(adminClient, namespace) dumpSvcNsEventsOnTestFailure(client, namespace) } if guestCluster { @@ -106,7 +108,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] [csi-supervisor] profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", true, "", storagePolicyName) } else { @@ -119,7 +121,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] [csi-supervisor] defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -131,7 +133,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] [csi-supervisor] pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle if guestCluster { diff --git a/tests/e2e/vsphere_volume_expansion.go b/tests/e2e/vsphere_volume_expansion.go index b403b74c40..f6c753bce4 100644 --- a/tests/e2e/vsphere_volume_expansion.go +++ b/tests/e2e/vsphere_volume_expansion.go @@ -66,6 +66,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { isVsanHealthServiceStopped bool isSPSServiceStopped bool fsType string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { @@ -76,6 +77,16 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defer cancel() var err error + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + isVsanHealthServiceStopped = false isSPSServiceStopped = false @@ -255,7 +266,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.It("[csi-block-vanilla] [csi-block-vanilla-parallelized] Verify volume expansion is not supported for PVC "+ "using vSAN-Default-Storage-Policy", ginkgo.Label(p0, block, vanilla, core, vc70), func() { - invokeTestForInvalidVolumeExpansionStaticProvision(f, client, namespace, storagePolicyName, profileID) + invokeTestForInvalidVolumeExpansionStaticProvision(f, adminClient, client, namespace, storagePolicyName, profileID) }) // Test to verify volume expansion can happen multiple times @@ -324,7 +335,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -447,7 +458,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - volHandle, pvclaim, pv, storageclass := createStaticPVC(ctx, f, client, + volHandle, pvclaim, pv, storageclass := createStaticPVC(ctx, f, adminClient, client, namespace, defaultDatastore, pandoraSyncWaitTime) defer func() { @@ -503,7 +514,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -578,7 +589,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ctx, f, client, "", storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -618,7 +629,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Increase PVC size and verify Volume resize") - increaseOnlineVolumeMultipleTimes(ctx, f, client, namespace, volHandle, pvclaim, pod) + increaseOnlineVolumeMultipleTimes(ctx, f, adminClient, client, namespace, volHandle, pvclaim, pod) }) /* @@ -650,7 +661,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ctx, f, client, "", storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -783,7 +794,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ctx, f, client, "", storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -1040,7 +1051,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ctx, f, client, sharedVVOLdatastoreURL, storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -1130,7 +1141,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ctx, f, client, sharedNFSdatastoreURL, storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -1226,7 +1237,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ctx, f, client, sharedVMFSdatastoreURL, storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -1480,13 +1491,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { var pv *v1.PersistentVolume storagePolicyName2 := GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores2) - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName2) - framework.Logf("Profile ID : %s", profileID) - scParameters := make(map[string]string) - scParameters["storagePolicyID"] = profileID - ginkgo.By("get StorageClass to Create PVC") - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName2, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName2, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) restClientConfig := getRestConfigClient() @@ -1494,7 +1500,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, + _, err = WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv = getPvFromClaim(client, namespace, pvclaim.Name) @@ -1672,7 +1678,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { vmUUID, exists = annotations[vmUUIDLabel] gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) } else { - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + vmUUID = getNodeUUID(ctx, adminClient, pod.Spec.NodeName) } framework.Logf("VMUUID : %s", vmUUID) isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) @@ -1892,7 +1898,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -2020,7 +2026,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if pvclaim != nil { @@ -2167,11 +2173,11 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Bringing SVC CSI controller down...") - svcCsiDeployment := updateDeploymentReplica(client, 0, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) + svcCsiDeployment := updateDeploymentReplica(adminClient, 0, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) defer func() { if *svcCsiDeployment.Spec.Replicas == 0 { ginkgo.By("Bringing SVC CSI controller up (cleanup)...") - updateDeploymentReplica(client, 3, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) + updateDeploymentReplica(adminClient, 3, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) } }() @@ -2202,7 +2208,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Bringing SVC CSI controller up...") - svcCsiDeployment = updateDeploymentReplica(client, 3, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) + svcCsiDeployment = updateDeploymentReplica(adminClient, 3, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) ginkgo.By("Waiting for controller volume resize to finish PVC1 (online volume expansion)") err = waitForPvResizeForGivenPvc(pvclaim, client, totalResizeWaitPeriod) @@ -2267,11 +2273,11 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Bringing SVC CSI controller down...") - svcCsiDeployment := updateDeploymentReplica(client, 0, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) + svcCsiDeployment := updateDeploymentReplica(adminClient, 0, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) defer func() { if *svcCsiDeployment.Spec.Replicas == 0 { ginkgo.By("Bringing SVC CSI controller up (cleanup)...") - updateDeploymentReplica(client, 1, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) + updateDeploymentReplica(adminClient, 1, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) } }() @@ -2291,7 +2297,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { gomega.Expect(pvclaim).NotTo(gomega.BeNil()) ginkgo.By("Bringing SVC CSI controller up...") - svcCsiDeployment = updateDeploymentReplica(client, 1, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) + svcCsiDeployment = updateDeploymentReplica(adminClient, 1, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) ginkgo.By("Create Pod using the above PVC") pod, vmUUID = createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") @@ -2369,7 +2375,6 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimit, thickProvPolicy) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", true, "", thickProvPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2445,7 +2450,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if pvclaim != nil { @@ -2539,7 +2544,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { zonalPolicy := GetAndExpectStringEnvVar(envZonalStoragePolicyName) scParameters[svStorageClassName] = zonalPolicy - storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -2548,7 +2553,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Expect claim to provision volume successfully") pvclaims = append(pvclaims, pvclaim) - pv, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + pv, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := pv[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) @@ -2656,7 +2661,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { // increaseOnlineVolumeMultipleTimes this method increases the same volume // multiple times and verifies PVC and Filesystem size. -func increaseOnlineVolumeMultipleTimes(ctx context.Context, f *framework.Framework, +func increaseOnlineVolumeMultipleTimes(ctx context.Context, f *framework.Framework, adminClient clientset.Interface, client clientset.Interface, namespace string, volHandle string, pvclaim *v1.PersistentVolumeClaim, pod *v1.Pod) { var originalSizeInMb, fsSize int64 @@ -2739,7 +2744,7 @@ func increaseOnlineVolumeMultipleTimes(ctx context.Context, f *framework.Framewo } // createStaticPVC this method creates static PVC -func createStaticPVC(ctx context.Context, f *framework.Framework, +func createStaticPVC(ctx context.Context, f *framework.Framework, adminClient clientset.Interface, client clientset.Interface, namespace string, defaultDatastore *object.Datastore, pandoraSyncWaitTime int) (string, *v1.PersistentVolumeClaim, *v1.PersistentVolume, *storagev1.StorageClass) { curtime := time.Now().Unix() @@ -2826,11 +2831,13 @@ func createSCwithVolumeExpansionTrueAndDynamicPVC(ctx context.Context, f *framew scParameters[scParamFsType] = fstype } + var err error + adminClient, _ := initializeClusterClientsByUserRoles(client) + // Create Storage class and PVC ginkgo.By("Creating Storage Class and PVC with allowVolumeExpansion = true") var storageclass *storagev1.StorageClass var pvclaim *v1.PersistentVolumeClaim - var err error var volHandle string if vanillaCluster { @@ -2851,10 +2858,8 @@ func createSCwithVolumeExpansionTrueAndDynamicPVC(ctx context.Context, f *framew } else if supervisorCluster { ginkgo.By("CNS_TEST: Running for WCP setup") framework.Logf("storagePolicyName: %s", storagePolicyName) - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) - scParameters[scParamStoragePolicyID] = profileID - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) restConfig = getRestConfigClient() @@ -2875,7 +2880,7 @@ func createSCwithVolumeExpansionTrueAndDynamicPVC(ctx context.Context, f *framew var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := persistentvolumes[0] if vcptocsi { @@ -2992,6 +2997,7 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter namespace string, expectedContent string, storagePolicyName string, profileID string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + adminClient, _ := initializeClusterClientsByUserRoles(client) ginkgo.By("Invoking Test for Volume Expansion") scParameters := make(map[string]string) if windowsEnv { @@ -3012,7 +3018,6 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter namespace, nil, scParameters, "", nil, "", true, "") } else if supervisorCluster { scParameters[scParamStoragePolicyID] = profileID - storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", true, "", storagePolicyName) } else if vanillaCluster { @@ -3030,7 +3035,7 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -3039,7 +3044,7 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter gomega.Expect(err).NotTo(gomega.HaveOccurred()) if supervisorCluster { ginkgo.By("Delete Resource quota") - deleteResourceQuota(client, namespace) + deleteResourceQuota(adminClient, namespace) } }() @@ -3048,7 +3053,7 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter var volHandle, svcPVCName string pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := persistentvolumes[0] if vcptocsi { @@ -3163,7 +3168,7 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter var exists bool ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, pod.Spec.NodeName)) if vanillaCluster { - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + vmUUID = getNodeUUID(ctx, adminClient, pod.Spec.NodeName) } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3237,6 +3242,7 @@ func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client c namespace string, fstype string, expectedContent string, storagePolicyName string, profileID string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + adminClient, _ := initializeClusterClientsByUserRoles(client) ginkgo.By("Invoking Test for Volume Expansion 2") scParameters := make(map[string]string) scParameters[scParamFsType] = fstype @@ -3267,7 +3273,7 @@ func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client c } defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() defer func() { @@ -3280,7 +3286,7 @@ func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client c var volHandle, svcPVCName string pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := persistentvolumes[0] @@ -3330,7 +3336,7 @@ func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client c var vmUUID string ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, pod.Spec.NodeName)) - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + vmUUID = getNodeUUID(ctx, adminClient, pod.Spec.NodeName) if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3421,7 +3427,7 @@ func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client c ginkgo.By(fmt.Sprintf("Verify volume after expansion: %s is attached to the node: %s", volHandle, pod.Spec.NodeName)) - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + vmUUID = getNodeUUID(ctx, adminClient, pod.Spec.NodeName) if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3474,6 +3480,7 @@ func invokeTestForInvalidVolumeExpansion(f *framework.Framework, client clientse namespace string, storagePolicyName string, profileID string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + adminClient, _ := initializeClusterClientsByUserRoles(client) scParameters := make(map[string]string) if windowsEnv { scParameters[scParamFsType] = ntfsFSType @@ -3504,8 +3511,10 @@ func invokeTestForInvalidVolumeExpansion(f *framework.Framework, client clientse } defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -3516,7 +3525,7 @@ func invokeTestForInvalidVolumeExpansion(f *framework.Framework, client clientse var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if vcptocsi { ginkgo.By("Verify annotations on PV/PVCs created after migration") @@ -3551,6 +3560,7 @@ func invokeTestForInvalidVolumeShrink(f *framework.Framework, client clientset.I namespace string, storagePolicyName string, profileID string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + adminClient, _ := initializeClusterClientsByUserRoles(client) scParameters := make(map[string]string) if windowsEnv { scParameters[scParamFsType] = ntfsFSType @@ -3573,7 +3583,7 @@ func invokeTestForInvalidVolumeShrink(f *framework.Framework, client clientset.I } else if supervisorCluster { scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", true, "", storagePolicyName) } else if vanillaCluster { @@ -3591,7 +3601,7 @@ func invokeTestForInvalidVolumeShrink(f *framework.Framework, client clientset.I gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -3608,7 +3618,7 @@ func invokeTestForInvalidVolumeShrink(f *framework.Framework, client clientset.I var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := persistentvolumes[0] if vcptocsi { @@ -3652,7 +3662,7 @@ func invokeTestForInvalidVolumeShrink(f *framework.Framework, client clientset.I gomega.Expect(err).To(gomega.HaveOccurred()) } -func invokeTestForInvalidVolumeExpansionStaticProvision(f *framework.Framework, +func invokeTestForInvalidVolumeExpansionStaticProvision(f *framework.Framework, adminClient clientset.Interface, client clientset.Interface, namespace string, storagePolicyName string, profileID string) { ginkgo.By("Invoking Test for Invalid Volume Expansion for Static Provisioning") @@ -3732,14 +3742,14 @@ func invokeTestForInvalidVolumeExpansionStaticProvision(f *framework.Framework, ginkgo.By("Creating the PV") pv = getPersistentVolumeSpec(fcdID, v1.PersistentVolumeReclaimDelete, staticPVLabels, ext4FSType) - pv, err = client.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) + pv, err = adminClient.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) if err != nil { return } defer func() { ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeout)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pv.Name, poll, pollTimeout)) }() err = e2eVSphere.waitForCNSVolumeToBeCreated(pv.Spec.CSI.VolumeHandle) @@ -3758,7 +3768,7 @@ func invokeTestForInvalidVolumeExpansionStaticProvision(f *framework.Framework, }() // Wait for PV and PVC to Bind - framework.ExpectNoError(fpv.WaitOnPVandPVC(ctx, client, f.Timeouts, namespace, pv, pvc)) + framework.ExpectNoError(fpv.WaitOnPVandPVC(ctx, adminClient, f.Timeouts, namespace, pv, pvc)) // Set deleteFCDRequired to false. // After PV, PVC is in the bind state, Deleting PVC should delete container volume. @@ -3786,6 +3796,7 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien namespace string, expectedContent string, storagePolicyName string, profileID string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + adminClient, _ := initializeClusterClientsByUserRoles(client) ginkgo.By("Invoking Test to verify Multiple Volume Expansions on the same volume") scParameters := make(map[string]string) if windowsEnv { @@ -3807,7 +3818,7 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien } else if supervisorCluster { scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + createResourceQuota(adminClient, namespace, rqLimit, storagePolicyName) storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", true, "", storagePolicyName) } else if vanillaCluster { @@ -3824,7 +3835,7 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if !supervisorCluster { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -3834,7 +3845,7 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien if supervisorCluster { ginkgo.By("Delete Resource quota") - deleteResourceQuota(client, namespace) + deleteResourceQuota(adminClient, namespace) } }() @@ -3843,7 +3854,7 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien var volHandle, svcPVCName string pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := persistentvolumes[0] @@ -3963,7 +3974,7 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien var exists bool ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, pod.Spec.NodeName)) if vanillaCluster { - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + vmUUID = getNodeUUID(ctx, adminClient, pod.Spec.NodeName) } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4037,6 +4048,7 @@ func invokeTestForUnsupportedFileVolumeExpansion(f *framework.Framework, client clientset.Interface, namespace string, storagePolicyName string, profileID string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + adminClient, _ := initializeClusterClientsByUserRoles(client) ginkgo.By("Invoking Test for Unsupported File Volume Expansion") scParameters := make(map[string]string) scParameters[scParamFsType] = nfs4FSType @@ -4051,7 +4063,7 @@ func invokeTestForUnsupportedFileVolumeExpansion(f *framework.Framework, namespace, nil, scParameters, "", nil, "", true, v1.ReadWriteMany) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() defer func() { @@ -4064,7 +4076,7 @@ func invokeTestForUnsupportedFileVolumeExpansion(f *framework.Framework, pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for all claims to be in bound state") // persistentvolumes - _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + _, err = WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Modify PVC spec to trigger volume expansion @@ -4123,9 +4135,12 @@ func expandPVCSize(origPVC *v1.PersistentVolumeClaim, size resource.Quantity, func waitForPvResizeForGivenPvc(pvc *v1.PersistentVolumeClaim, c clientset.Interface, duration time.Duration) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var adminClient clientset.Interface + var err error + adminClient, c = initializeClusterClientsByUserRoles(c) pvName := pvc.Spec.VolumeName pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] - pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) + pv, err := adminClient.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) return waitForPvResize(pv, c, pvcSize, duration) } @@ -4135,9 +4150,10 @@ func waitForPvResize(pv *v1.PersistentVolume, c clientset.Interface, size resource.Quantity, duration time.Duration) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + adminClient, c := initializeClusterClientsByUserRoles(c) return wait.PollUntilContextTimeout(ctx, resizePollInterval, duration, true, func(ctx context.Context) (bool, error) { - pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) + pv, err := adminClient.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching pv %q for resizing %v", pv.Name, err) @@ -4240,6 +4256,7 @@ func sizeInMb(size resource.Quantity) int64 { func testCleanUpUtil(ctx context.Context, restClientConfig *restclient.Config, c clientset.Interface, cnsRegistervolume *cnsregistervolumev1alpha1.CnsRegisterVolume, namespace string, pvcName string, pvName string) { + adminClient, _ := initializeClusterClientsByUserRoles(c) if guestCluster { c, _ = getSvcClientAndNamespace() } @@ -4247,7 +4264,7 @@ func testCleanUpUtil(ctx context.Context, restClientConfig *restclient.Config, c framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(ctx, c, pvcName, namespace), "Failed to delete PVC", pvcName) ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, c, pvName, poll, supervisorClusterOperationsTimeout)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, adminClient, pvName, poll, supervisorClusterOperationsTimeout)) if cnsRegistervolume != nil { ginkgo.By("Verify CRD should be deleted automatically") diff --git a/tests/e2e/vsphere_volume_fsgroup.go b/tests/e2e/vsphere_volume_fsgroup.go index 5483528a1a..e40fa029ce 100644 --- a/tests/e2e/vsphere_volume_fsgroup.go +++ b/tests/e2e/vsphere_volume_fsgroup.go @@ -62,16 +62,27 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] [csi-guest] [csi scParameters map[string]string storagePolicyName string datastoreURL string + adminClient clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet namespace = getNamespaceToRunTests(f) + var err error + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + runningAsDevopsUser := GetorIgnoreStringEnvVar("IS_DEVOPS_USER") + adminClient, client = initializeClusterClientsByUserRoles(client) + if guestCluster && runningAsDevopsUser == "yes" { + + saName := namespace + "sa" + client, err = createScopedClient(ctx, client, namespace, saName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) bootstrap() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -126,13 +137,13 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] [csi-guest] [csi defer func() { if !supervisorCluster { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + err = adminClient.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle