From 4b60c9abcd6bf0740151fe1ca1c3f111136f2de3 Mon Sep 17 00:00:00 2001 From: rajguptavm Date: Tue, 5 Aug 2025 17:51:27 +0530 Subject: [PATCH] multi-Svc test refactor --- hack/run-e2e-test.sh | 2 +- tests/e2e/constants/kubernetes.go | 1 + tests/e2e/e2e_common.go | 7 - tests/e2e/k8testutil/util.go | 185 ++++++++++++-- tests/e2e/{ => multiSvc}/multi_svc_test.go | 266 +++++++++++---------- tests/e2e/{ => multiSvc}/multi_svc_util.go | 99 ++++---- tests/e2e/multiSvc/suite_test.go | 51 ++++ tests/e2e/util.go | 45 ++-- tests/e2e/vcutil/vc_util.go | 31 +++ 9 files changed, 460 insertions(+), 227 deletions(-) rename tests/e2e/{ => multiSvc}/multi_svc_test.go (67%) rename tests/e2e/{ => multiSvc}/multi_svc_util.go (65%) create mode 100644 tests/e2e/multiSvc/suite_test.go diff --git a/hack/run-e2e-test.sh b/hack/run-e2e-test.sh index e33aa7cb34..fb88d173cb 100755 --- a/hack/run-e2e-test.sh +++ b/hack/run-e2e-test.sh @@ -73,7 +73,7 @@ then OPTS+=(-p) ginkgo -mod=mod "${OPTS[@]}" --focus="csi-block-vanilla-parallelized" tests/e2e else - ginkgo -mod=mod "${OPTS[@]}" --focus="$FOCUS" tests/e2e + ginkgo -mod=mod "${OPTS[@]}" --focus="$FOCUS" -r tests/e2e fi # Checking for test status diff --git a/tests/e2e/constants/kubernetes.go b/tests/e2e/constants/kubernetes.go index fc02424c26..899b14d72f 100644 --- a/tests/e2e/constants/kubernetes.go +++ b/tests/e2e/constants/kubernetes.go @@ -78,6 +78,7 @@ const ( Snapshotapigroup = "snapshot.storage.k8s.io" DefaultNginxStorageClassName = "nginx-sc" MountPath = "/usr/share/nginx/html" + ServiceName = "nginx" SpsServiceName = "sps" SnapshotterContainerName = "csi-snapshotter" SshdPort = "22" diff --git a/tests/e2e/e2e_common.go b/tests/e2e/e2e_common.go index 5c1248e57d..382c27a6d2 100644 --- a/tests/e2e/e2e_common.go +++ b/tests/e2e/e2e_common.go @@ -464,16 +464,9 @@ var ( // multiSvc env variables var ( - vcSessionWaitTime = 5 * time.Minute envStoragePolicyNameForSharedDsSvc1 = "STORAGE_POLICY_FOR_SHARED_DATASTORES_SVC1" envStoragePolicyNameForSharedDsSvc2 = "STORAGE_POLICY_FOR_SHARED_DATASTORES_SVC2" - envSupervisorClusterNamespace1 = "SVC_NAMESPACE1" - envNfsDatastoreName = "NFS_DATASTORE_NAME" - envNfsDatastoreIP = "NFS_DATASTORE_IP" pwdRotationTimeout = 10 * time.Minute - roleCnsDatastore = "CNS-SUPERVISOR-DATASTORE" - roleCnsSearchAndSpbm = "CNS-SUPERVISOR-SEARCH-AND-SPBM" - roleCnsHostConfigStorageAndCnsVm = "CNS-SUPERVISOR-HOST-CONFIG-STORAGE-AND-CNS-VM" ) // For rwx diff --git a/tests/e2e/k8testutil/util.go b/tests/e2e/k8testutil/util.go index e7b78d1c1a..40f83603d4 100644 --- a/tests/e2e/k8testutil/util.go +++ b/tests/e2e/k8testutil/util.go @@ -2556,12 +2556,12 @@ func DeleteService(ns string, c clientset.Interface, service *v1.Service) { // GetStatefulSetFromManifest creates a StatefulSet from the statefulset.yaml // file present in the manifest path. -func GetStatefulSetFromManifest(e2eTestConfig *config.E2eTestConfig, ns string) *appsv1.StatefulSet { +func GetStatefulSetFromManifest(e2eTestConfig *config.TestInputData, ns string) *appsv1.StatefulSet { ssManifestFilePath := filepath.Join(constants.ManifestPath, "statefulset.yaml") framework.Logf("Parsing statefulset from %v", ssManifestFilePath) ss, err := manifest.StatefulSetFromManifest(ssManifestFilePath, ns) framework.ExpectNoError(err) - if e2eTestConfig.TestInput.TestBedInfo.WindowsEnv { + if e2eTestConfig.TestBedInfo.WindowsEnv { ss.Spec.Template.Spec.Containers[0].Image = constants.WindowsImageOnMcr ss.Spec.Template.Spec.Containers[0].Command = []string{"Powershell.exe"} ss.Spec.Template.Spec.Containers[0].Args = []string{"-Command", constants.WindowsExecCmd} @@ -3053,6 +3053,7 @@ func TrimQuotes(str string) string { // Returns a de-serialized structured config data func ReadConfigFromSecretString(cfg string) (config.E2eTestConfig, error) { var config1 config.E2eTestConfig + var testInput config.TestInputData var netPerm config.NetPermissionConfig key, value := "", "" var permissions vsanfstypes.VsanFileShareAccessType @@ -3082,7 +3083,7 @@ func ReadConfigFromSecretString(cfg string) (config.E2eTestConfig, error) { value = words[1] // Remove trailing '"]' characters from value. value = strings.TrimSuffix(value, "]") - config1.TestInput.Global.VCenterHostname = TrimQuotes(value) + testInput.Global.VCenterHostname = TrimQuotes(value) fmt.Printf("Key: VirtualCenter, Value: %s\n", value) } continue @@ -3093,47 +3094,47 @@ func ReadConfigFromSecretString(cfg string) (config.E2eTestConfig, error) { switch key { case "insecure-flag": if strings.Contains(value, "true") { - config1.TestInput.Global.InsecureFlag = true + testInput.Global.InsecureFlag = true } else { - config1.TestInput.Global.InsecureFlag = false + testInput.Global.InsecureFlag = false } case "cluster-id": - config1.TestInput.Global.ClusterID = value + testInput.Global.ClusterID = value case "cluster-distribution": - config1.TestInput.Global.ClusterDistribution = value + testInput.Global.ClusterDistribution = value case "user": - config1.TestInput.Global.User = value + testInput.Global.User = value case "password": - config1.TestInput.Global.Password = value + testInput.Global.Password = value case "datacenters": - config1.TestInput.Global.Datacenters = value + testInput.Global.Datacenters = value case "port": - config1.TestInput.Global.VCenterPort = value + testInput.Global.VCenterPort = value case "cnsregistervolumes-cleanup-intervalinmin": - config1.TestInput.Global.CnsRegisterVolumesCleanupIntervalInMin, strconvErr = strconv.Atoi(value) + testInput.Global.CnsRegisterVolumesCleanupIntervalInMin, strconvErr = strconv.Atoi(value) gomega.Expect(strconvErr).NotTo(gomega.HaveOccurred()) case "topology-categories": - config1.TestInput.Labels.TopologyCategories = value + testInput.Labels.TopologyCategories = value case "global-max-snapshots-per-block-volume": - config1.TestInput.Snapshot.GlobalMaxSnapshotsPerBlockVolume, strconvErr = strconv.Atoi(value) + testInput.Snapshot.GlobalMaxSnapshotsPerBlockVolume, strconvErr = strconv.Atoi(value) gomega.Expect(strconvErr).NotTo(gomega.HaveOccurred()) case "csi-fetch-preferred-datastores-intervalinmin": - config1.TestInput.Global.CSIFetchPreferredDatastoresIntervalInMin, strconvErr = strconv.Atoi(value) + testInput.Global.CSIFetchPreferredDatastoresIntervalInMin, strconvErr = strconv.Atoi(value) gomega.Expect(strconvErr).NotTo(gomega.HaveOccurred()) case "query-limit": - config1.TestInput.Global.QueryLimit, strconvErr = strconv.Atoi(value) + testInput.Global.QueryLimit, strconvErr = strconv.Atoi(value) gomega.Expect(strconvErr).NotTo(gomega.HaveOccurred()) case "list-volume-threshold": - config1.TestInput.Global.ListVolumeThreshold, strconvErr = strconv.Atoi(value) + testInput.Global.ListVolumeThreshold, strconvErr = strconv.Atoi(value) gomega.Expect(strconvErr).NotTo(gomega.HaveOccurred()) case "ca-file": - config1.TestInput.Global.CaFile = value + testInput.Global.CaFile = value case "supervisor-id": - config1.TestInput.Global.SupervisorID = value + testInput.Global.SupervisorID = value case "targetvSANFileShareClusters": - config1.TestInput.Global.TargetVsanFileShareClusters = value + testInput.Global.TargetVsanFileShareClusters = value case "fileVolumeActivated": - config1.TestInput.Global.FileVolumeActivated, strconvErr = strconv.ParseBool(value) + testInput.Global.FileVolumeActivated, strconvErr = strconv.ParseBool(value) gomega.Expect(strconvErr).NotTo(gomega.HaveOccurred()) case "ips": netPerm.Ips = value @@ -3145,6 +3146,7 @@ func ReadConfigFromSecretString(cfg string) (config.E2eTestConfig, error) { return config1, fmt.Errorf("unknown key %s in the input string", key) } } + config1.TestInput = &testInput return config1, nil } @@ -5783,7 +5785,7 @@ func CreateParallelStatefulSets(client clientset.Interface, namespace string, gomega.Expect(err).NotTo(gomega.HaveOccurred()) } -func CreateParallelStatefulSetSpec(e2eTestConfig *config.E2eTestConfig, +func CreateParallelStatefulSetSpec(e2eTestConfig *config.TestInputData, namespace string, no_of_sts int, replicas int32) []*appsv1.StatefulSet { stss := []*appsv1.StatefulSet{} var statefulset *appsv1.StatefulSet @@ -7315,3 +7317,142 @@ func ListStoragePolicyUsages(ctx context.Context, c clientset.Interface, restCli fmt.Println("All required storage policy usages are available.") } + +// ExitHostMM exits a host from maintenance mode with a particular timeout +func ExitHostMM(ctx context.Context, host *object.HostSystem, timeout int32) { + task, err := host.ExitMaintenanceMode(ctx, timeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, err = task.WaitForResultEx(ctx, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Host: %v exited from maintenance mode", host) +} + +/* +CreateStatefulSetAndVerifyPVAndPodNodeAffinty creates user specified statefulset and +further checks the node and volumes affinities +*/ +func CreateStatefulSetAndVerifyPVAndPodNodeAffinty(ctx context.Context, client clientset.Interface, + vs *config.E2eTestConfig, namespace string, parallelPodPolicy bool, replicas int32, nodeAffinityToSet bool, + allowedTopologies []v1.TopologySelectorLabelRequirement, + podAntiAffinityToSet bool, parallelStatefulSetCreation bool, modifyStsSpec bool, + accessMode v1.PersistentVolumeAccessMode, + sc *storagev1.StorageClass, verifyTopologyAffinity bool, storagePolicy string) (*v1.Service, + *appsv1.StatefulSet, error) { + + ginkgo.By("Create service") + service := CreateService(namespace, client) + + framework.Logf("Create StatefulSet") + statefulset := CreateCustomisedStatefulSets(ctx, client, vs.TestInput, namespace, parallelPodPolicy, + replicas, nodeAffinityToSet, allowedTopologies, podAntiAffinityToSet, modifyStsSpec, + "", accessMode, sc, storagePolicy) + + if verifyTopologyAffinity { + framework.Logf("Verify PV node affinity and that the PODS are running on appropriate node") + err := VerifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, vs, client, statefulset, + namespace, allowedTopologies, parallelStatefulSetCreation) + if err != nil { + return nil, nil, fmt.Errorf("error verifying PV node affinity and POD node details: %v", err) + } + } + + return service, statefulset, nil +} + +/* +createCustomisedStatefulSets util methods creates statefulset as per the user's +specific requirement and returns the customised statefulset +*/ +func CreateCustomisedStatefulSets(ctx context.Context, client clientset.Interface, vs *config.TestInputData, + namespace string, isParallelPodMgmtPolicy bool, replicas int32, nodeAffinityToSet bool, + allowedTopologies []v1.TopologySelectorLabelRequirement, + podAntiAffinityToSet bool, modifyStsSpec bool, stsName string, + accessMode v1.PersistentVolumeAccessMode, sc *storagev1.StorageClass, storagePolicy string) *appsv1.StatefulSet { + framework.Logf("Preparing StatefulSet Spec") + statefulset := GetStatefulSetFromManifest(vs, namespace) + + if accessMode == "" { + // If accessMode is not specified, set the default accessMode. + defaultAccessMode := v1.ReadWriteOnce + statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. + Spec.AccessModes[0] = defaultAccessMode + } else { + statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].Spec.AccessModes[0] = + accessMode + } + + if modifyStsSpec { + if vs.TestBedInfo.MultipleSvc { + statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. + Spec.StorageClassName = &storagePolicy + } else { + statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. + Spec.StorageClassName = &sc.Name + } + + if stsName != "" { + statefulset.Name = stsName + statefulset.Spec.Template.Labels["app"] = statefulset.Name + statefulset.Spec.Selector.MatchLabels["app"] = statefulset.Name + } + + } + if nodeAffinityToSet { + nodeSelectorTerms := GetNodeSelectorTerms(allowedTopologies) + statefulset.Spec.Template.Spec.Affinity = new(v1.Affinity) + statefulset.Spec.Template.Spec.Affinity.NodeAffinity = new(v1.NodeAffinity) + statefulset.Spec.Template.Spec.Affinity.NodeAffinity. + RequiredDuringSchedulingIgnoredDuringExecution = new(v1.NodeSelector) + statefulset.Spec.Template.Spec.Affinity.NodeAffinity. + RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = nodeSelectorTerms + } + if podAntiAffinityToSet { + statefulset.Spec.Template.Spec.Affinity = &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "key": "app", + }, + }, + TopologyKey: "topology.kubernetes.io/zone", + }, + }, + }, + } + + } + if isParallelPodMgmtPolicy { + statefulset.Spec.PodManagementPolicy = appsv1.ParallelPodManagement + } + statefulset.Spec.Replicas = &replicas + + framework.Logf("Creating statefulset") + CreateStatefulSet(namespace, statefulset, client) + + framework.Logf("Wait for StatefulSet pods to be in up and running state") + fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) + gomega.Expect(fss.CheckMount(ctx, client, statefulset, constants.MountPath)).NotTo(gomega.HaveOccurred()) + ssPodsBeforeScaleDown, err := fss.GetPodList(ctx, client, statefulset) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), + fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) + gomega.Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), + "Number of Pods in the statefulset should match with number of replicas") + + return statefulset +} + +// CreateStatefulSet creates a StatefulSet from the manifest at manifestPath in the given namespace. +func CreateStatefulSet(ns string, ss *appsv1.StatefulSet, c clientset.Interface) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + framework.Logf("Creating statefulset %v/%v with %d replicas and selector %+v", + ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector) + _, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) + framework.ExpectNoError(err) + fss.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) +} diff --git a/tests/e2e/multi_svc_test.go b/tests/e2e/multiSvc/multi_svc_test.go similarity index 67% rename from tests/e2e/multi_svc_test.go rename to tests/e2e/multiSvc/multi_svc_test.go index 4bd9e4c5ed..bc73d4a062 100644 --- a/tests/e2e/multi_svc_test.go +++ b/tests/e2e/multiSvc/multi_svc_test.go @@ -14,7 +14,7 @@ limitations under the License. */ -package e2e +package multiSvc import ( "context" @@ -35,8 +35,16 @@ import ( fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" fss "k8s.io/kubernetes/test/e2e/framework/statefulset" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/bootstrap" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/config" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/constants" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/env" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/k8testutil" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/vcutil" ) +var e2eTestConfig *config.E2eTestConfig + var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { var ( namespaces []string @@ -71,31 +79,31 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() //getting list of clientset and namespace for both svc - clients, namespaces, errors = getMultiSvcClientAndNamespace() + clients, namespaces, errors = k8testutil.GetMultiSvcClientAndNamespace() if len(errors) > 0 { framework.Failf("Unable get client and namespace for supervisor clusters") } - bootstrap() + e2eTestConfig = bootstrap.Bootstrap() //getting total number of supervisor clusters and list of their compute cluster path var err error - numberOfSvc, computeClusterPaths, err = getSvcCountAndComputeClusterPath() + numberOfSvc, computeClusterPaths, err = GetSvcCountAndComputeClusterPath(e2eTestConfig) framework.ExpectNoError(err, "Unable to find any compute cluster") if !(numberOfSvc > 0) { framework.Failf("Unable to find any supervisor cluster") } // list of storage policy for both the supervisors - envStoragePolicyNameForSharedDatastoresList := []string{envStoragePolicyNameForSharedDsSvc1, - envStoragePolicyNameForSharedDsSvc2} + envStoragePolicyNameForSharedDatastoresList := []string{constants.EnvStoragePolicyNameForSharedDsSvc1, + constants.EnvStoragePolicyNameForSharedDsSvc2} for i := 0; i < numberOfSvc; i++ { - storagePolicyNames[i] = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastoresList[i]) - profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyNames[i]) + storagePolicyNames[i] = env.GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastoresList[i]) + profileID := vcutil.GetSpbmPolicyID(storagePolicyNames[i], e2eTestConfig) scParameters := make(map[string]string) scParametersList[i] = scParameters // adding profileID to storageClass param - StoragePolicyID - scParametersList[i][scParamStoragePolicyID] = profileID - scParametersList[i][scParamFsType] = ext4FSType + scParametersList[i][constants.ScParamStoragePolicyID] = profileID + scParametersList[i][constants.ScParamFsType] = constants.Ext4FSType } // Checking for any ready and schedulable node @@ -108,29 +116,29 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { } // Getting all env variables here - csiNamespace = GetAndExpectStringEnvVar(envCSINamespace) - dataCenter = GetAndExpectStringEnvVar(datacenter) - computeCluster = GetAndExpectStringEnvVar(envComputeClusterName) - datastoreName = GetAndExpectStringEnvVar(envNfsDatastoreName) - datastoreIP = GetAndExpectStringEnvVar(envNfsDatastoreIP) - kubeconfig = GetAndExpectStringEnvVar("KUBECONFIG") - kubeconfig1 = GetAndExpectStringEnvVar("KUBECONFIG1") + csiNamespace = env.GetAndExpectStringEnvVar(constants.EnvCSINamespace) + dataCenter = env.GetAndExpectStringEnvVar(constants.Datacenter) + computeCluster = env.GetAndExpectStringEnvVar(constants.EnvComputeClusterName) + datastoreName = env.GetAndExpectStringEnvVar(constants.EnvNfsDatastoreName) + datastoreIP = env.GetAndExpectStringEnvVar(constants.EnvNfsDatastoreIP) + kubeconfig = env.GetAndExpectStringEnvVar("KUBECONFIG") + kubeconfig1 = env.GetAndExpectStringEnvVar("KUBECONFIG1") ginkgo.By("Getting User and Supervisor-Id for both the supervisors") // Iterating through number of svc to read it's config secret to get supervisor id and service account user wcpServiceAccUsers = []string{} for i := 0; i < numberOfSvc; i++ { - vsphereCfg, err := getSvcConfigSecretData(clients[i], ctx, csiNamespace) + vsphereCfg, err := k8testutil.GetSvcConfigSecretData(clients[i], ctx, csiNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - supervisorIds = append(supervisorIds, vsphereCfg.Global.SupervisorID) + supervisorIds = append(supervisorIds, vsphereCfg.TestInput.Global.SupervisorID) // Getting service account user without domain name after spilittin it by @ - wcpServiceAccUsers = append(wcpServiceAccUsers, strings.Split(string(vsphereCfg.Global.User), "@")[0]) + wcpServiceAccUsers = append(wcpServiceAccUsers, strings.Split(string(vsphereCfg.TestInput.Global.User), "@")[0]) } sshClientConfig = &ssh.ClientConfig{ User: "root", Auth: []ssh.AuthMethod{ - ssh.Password(nimbusVcPwd), + ssh.Password(constants.NimbusVcPwd), }, HostKeyCallback: ssh.InsecureIgnoreHostKey(), } @@ -144,30 +152,30 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { // move back host to cluster if isHostRemoved { ginkgo.By("Moving host back to the cluster 1") - err := moveHostToCluster(computeClusterPaths[0], hostToBeRemoved) + err := MoveHostToCluster(e2eTestConfig, computeClusterPaths[0], hostToBeRemoved) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // exit host from MM if isHostInMM { ginkgo.By("Exit host from MM") - exitHostMM(ctx, hostsInCluster[0], timeout) + k8testutil.ExitHostMM(ctx, hostsInCluster[0], timeout) } // unmount ds from cluster1 if isDsMountedOnSvc1 { ginkgo.By("Remove mounted datastore from supervisor cluster 1") - err := UnMountNfsDatastoreFromClusterOrHost(datastoreName, computeClusterPaths[0]) + err := UnMountNfsDatastoreFromClusterOrHost(e2eTestConfig, datastoreName, computeClusterPaths[0]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // unmount ds from cluster2 if isDsMountedOnSvc2 { ginkgo.By("Remove mounted datastore from supervisor cluster 2") - err := UnMountNfsDatastoreFromClusterOrHost(datastoreName, computeClusterPaths[1]) + err := UnMountNfsDatastoreFromClusterOrHost(e2eTestConfig, datastoreName, computeClusterPaths[1]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // mount datastore back to host if isDsUnmountedFromHost { ginkgo.By("Mount back datastore to host in the supervisor cluster 1") - err := mountNfsDatastoreOnClusterOrHost(datastoreName, datastoreIP, hostPath) + err := MountNfsDatastoreOnClusterOrHost(e2eTestConfig, datastoreName, datastoreIP, hostPath) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -176,9 +184,9 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { // Changing kubeconfig for second supervisor var err error if i == 1 { - os.Setenv(kubeconfigEnvVar, kubeconfig1) + os.Setenv(constants.KubeconfigEnvVar, kubeconfig1) framework.TestContext.KubeConfig = kubeconfig1 - clients[i], err = createKubernetesClientFromConfig(kubeconfig1) + clients[i], err = k8testutil.CreateKubernetesClientFromConfig(kubeconfig1) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Error creating k8s client with %v: %v", kubeconfig1, err)) } @@ -186,13 +194,13 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespaces[i])) fss.DeleteAllStatefulSets(ctx, clients[i], namespaces[i]) ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespaces[i])) - err = clients[i].CoreV1().Services(namespaces[i]).Delete(ctx, servicename, *metav1.NewDeleteOptions(0)) + err = clients[i].CoreV1().Services(namespaces[i]).Delete(ctx, constants.ServiceName, *metav1.NewDeleteOptions(0)) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } ginkgo.By(fmt.Sprintf("Deleting all PVCs in namespace: %v", namespaces[i])) - pvcList := getAllPVCFromNamespace(clients[i], namespaces[i]) + pvcList := k8testutil.GetAllPVCFromNamespace(clients[i], namespaces[i]) for _, pvc := range pvcList.Items { framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(ctx, clients[i], pvc.Name, namespaces[i]), "Failed to delete PVC", pvc.Name) @@ -208,15 +216,15 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { } } - setResourceQuota(clients[i], namespaces[i], defaultrqLimit) + k8testutil.SetResourceQuota(clients[i], namespaces[i], constants.DefaultrqLimit) /* resetting and performing cleanup of kubeconfig export variable so that for next testcase it should pickup the default svc kubeconfig set in the env. export variable */ if i == numberOfSvc-1 { - os.Setenv(kubeconfigEnvVar, kubeconfig) + os.Setenv(constants.KubeconfigEnvVar, kubeconfig) framework.TestContext.KubeConfig = kubeconfig // setting it to first/default kubeconfig - clients[0], err = createKubernetesClientFromConfig(kubeconfig) + clients[0], err = k8testutil.CreateKubernetesClientFromConfig(kubeconfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } @@ -234,7 +242,7 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { */ ginkgo.It("[csi-multi-svc] Workload creation on each of the clusters", - ginkgo.Label(p0, wcp, multiSvc, vc80), func() { + ginkgo.Label(constants.P0, constants.Wcp, constants.MultiSvc, constants.Vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -245,33 +253,33 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { namespace := namespaces[n] // Changing kubeconfig for second supervisor if n == 1 { - os.Setenv(kubeconfigEnvVar, kubeconfig1) + os.Setenv(constants.KubeconfigEnvVar, kubeconfig1) framework.TestContext.KubeConfig = kubeconfig1 - client, err = createKubernetesClientFromConfig(kubeconfig1) + client, err = k8testutil.CreateKubernetesClientFromConfig(kubeconfig1) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Error creating k8s client with %v: %v", kubeconfig1, err)) } ginkgo.By("Create StatefulSet with 3 replicas with parallel pod management") - service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, - true, 3, false, nil, - false, false, true, "", nil, false, storagePolicyNames[n]) + service, statefulset, err := k8testutil.CreateStatefulSetAndVerifyPVAndPodNodeAffinty(ctx, client, + e2eTestConfig, namespace, true, 3, false, nil, false, false, true, "", nil, false, + storagePolicyNames[n]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { fss.DeleteAllStatefulSets(ctx, client, namespace) - deleteService(namespace, client, service) + k8testutil.DeleteService(namespace, client, service) }() framework.Logf("Scale up sts replica count to 5") scaleUpReplicaCount = 5 - err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, scaleUpReplicaCount, - true) + err = k8testutil.ScaleUpStatefulSetPod(ctx, client, e2eTestConfig, statefulset, namespace, + scaleUpReplicaCount, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Scale down sts replica count to 1") scaleDownReplicaCount = 1 - err = scaleDownStatefulSetPod(ctx, client, statefulset, namespace, scaleDownReplicaCount, - true) + err = k8testutil.ScaleDownStatefulSetPod(ctx, e2eTestConfig, client, statefulset, namespace, + scaleDownReplicaCount, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -292,7 +300,7 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { */ ginkgo.It("[csi-multi-svc] Verify volume lifecycle ops post password rotation", - ginkgo.Label(p0, wcp, multiSvc, vc80), func() { + ginkgo.Label(constants.P0, constants.Wcp, constants.MultiSvc, constants.Vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -303,43 +311,45 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { client := clients[i] namespace := namespaces[i] if i == 1 { - os.Setenv(kubeconfigEnvVar, kubeconfig1) + os.Setenv(constants.KubeconfigEnvVar, kubeconfig1) framework.TestContext.KubeConfig = kubeconfig1 - client, err = createKubernetesClientFromConfig(kubeconfig1) + client, err = k8testutil.CreateKubernetesClientFromConfig(kubeconfig1) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Error creating k8s client with %v: %v", kubeconfig1, err)) } ginkgo.By("Create StatefulSet with 3 replicas with parallel pod management") - service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, - true, 3, false, nil, false, false, true, "", nil, false, storagePolicyNames[i]) + service, statefulset, err := k8testutil.CreateStatefulSetAndVerifyPVAndPodNodeAffinty(ctx, client, + e2eTestConfig, namespace, true, 3, false, nil, false, false, true, "", nil, false, + storagePolicyNames[i]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { fss.DeleteAllStatefulSets(ctx, client, namespace) - deleteService(namespace, client, service) + k8testutil.DeleteService(namespace, client, service) }() ginkgo.By("Perform password rotation on the supervisor") - passwordRotated, err := performPasswordRotationOnSupervisor(client, ctx, csiNamespace, vcAddress) + passwordRotated, err := k8testutil.PerformPasswordRotationOnSupervisor(client, ctx, csiNamespace, + e2eTestConfig.TestInput.TestBedInfo.VcAddress, e2eTestConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(passwordRotated).To(gomega.BeTrue()) // scaling up/down sts created before password rotation framework.Logf("Scale up sts replica count to 5") scaleUpReplicaCount = 5 - err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, scaleUpReplicaCount, - true) + err = k8testutil.ScaleUpStatefulSetPod(ctx, client, e2eTestConfig, statefulset, namespace, + scaleUpReplicaCount, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Scale down sts replica count to 1") scaleDownReplicaCount = 1 - err = scaleDownStatefulSetPod(ctx, client, statefulset, namespace, scaleDownReplicaCount, - true) + err = k8testutil.ScaleDownStatefulSetPod(ctx, e2eTestConfig, client, statefulset, namespace, + scaleDownReplicaCount, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Create a Pvc and attach a pod to it - _, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParametersList[i], "", nil, "", false, "", - storagePolicyNames[i]) + _, pvclaim, err = k8testutil.CreatePVCAndStorageClass(ctx, e2eTestConfig, client, namespace, nil, + scParametersList[i], "", nil, "", false, "", storagePolicyNames[i]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -351,13 +361,15 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { var volHandle string pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for pvc to be in bound state") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv := persistentvolumes[0] volHandle = pv.Spec.CSI.VolumeHandle // Create a Pod to use this PVC, and verify volume has been attached ginkgo.By("Creating pod to attach PV to the node") - pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand) + pod, err := k8testutil.CreatePod(ctx, e2eTestConfig, client, namespace, nil, + []*v1.PersistentVolumeClaim{pvclaim}, false, constants.ExecCommand) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Deleting the pod") @@ -369,10 +381,10 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { var exists bool ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, pod.Spec.NodeName)) annotations := pod.Annotations - vmUUID, exists = annotations[vmUUIDLabel] - gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + vmUUID, exists = annotations[constants.VmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", constants.VmUUIDLabel)) framework.Logf("VMUUID : %s", vmUUID) - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) + isDiskAttached, err := vcutil.IsVolumeAttachedToVM(client, e2eTestConfig, volHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") @@ -391,14 +403,15 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { */ ginkgo.It("[csi-multi-svc] Verify permissions of the service account", - ginkgo.Label(p0, wcp, multiSvc, vc80), func() { + ginkgo.Label(constants.P0, constants.Wcp, constants.MultiSvc, constants.Vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ginkgo.By("Verify permission on root folder for each of the wcp service account users") for _, user := range wcpServiceAccUsers { framework.Logf("Verifying permission on root folder for user : %s", user) - userPermission, err := verifyPermissionForWcpStorageUser(ctx, "RootFolder", "", user, roleCnsSearchAndSpbm) + userPermission, err := VerifyPermissionForWcpStorageUser(ctx, e2eTestConfig, "RootFolder", "", user, + constants.RoleCnsSearchAndSpbm) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(userPermission).To(gomega.BeTrue(), "user permission is not valid for root folder") } @@ -406,16 +419,16 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { ginkgo.By("Verify permission on clusters for each of the wcp service account users") // creating array of roles for both service account users as per the desired cluster permission roles := [][]string{ - {roleCnsHostConfigStorageAndCnsVm, roleCnsSearchAndSpbm}, - {roleCnsSearchAndSpbm, roleCnsHostConfigStorageAndCnsVm}, + {constants.RoleCnsHostConfigStorageAndCnsVm, ""}, + {"", constants.RoleCnsHostConfigStorageAndCnsVm}, } // iterating through compute cluster paths for i, path := range computeClusterPaths { role := roles[i%2] // Alternates between the two roles // iterating through service account users for j, user := range wcpServiceAccUsers { - framework.Logf("Verifying permission on root folder for user: %s", user) - userPermission, err := verifyPermissionForWcpStorageUser(ctx, "Cluster", path, user, role[j]) + framework.Logf("Verifying permission on root folder for user: %s", wcpServiceAccUsers[i]) + userPermission, err := VerifyPermissionForWcpStorageUser(ctx, e2eTestConfig, "Cluster", path, user, role[j]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(userPermission).To(gomega.BeTrue(), "user permission is not valid for compute-cluster path") } @@ -423,29 +436,29 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { ginkgo.By("Verify service account permission on each of the datastore") // Getting list of all datastores - dataCenters, err := e2eVSphere.getAllDatacenters(ctx) + dataCenters, err := vcutil.GetAllDatacenters(ctx, e2eTestConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - datastores, err := getDatastoreNamesFromDCs(sshClientConfig, dataCenters) + datastores, err := GetDatastoreNamesFromDCs(sshClientConfig, e2eTestConfig, dataCenters) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Iterating thorugh datastores to verify permission for svc account users for _, datastorePath := range datastores { // roleForUser array to store roles for both svc account user based on datastore var roleForUser []string - switch { - case strings.Contains(datastorePath, "local"): - roleForUser = []string{roleCnsSearchAndSpbm, roleCnsSearchAndSpbm} - case strings.Contains(datastorePath, "nfs"): - roleForUser = []string{roleCnsDatastore, roleCnsDatastore} + case strings.Contains(datastorePath, "vsanDatastore (2)"): + roleForUser = []string{constants.RoleCnsDatastore, ""} + case strings.Contains(datastorePath, "nfs") || strings.Contains(datastorePath, "sharedVmfs"): + roleForUser = []string{constants.RoleCnsDatastore, constants.RoleCnsDatastore} case strings.Contains(datastorePath, "vsanDatastore (1)"): - roleForUser = []string{roleCnsSearchAndSpbm, roleCnsDatastore} - default: // for "vsanDatastore" - roleForUser = []string{roleCnsDatastore, roleCnsSearchAndSpbm} + roleForUser = []string{"", constants.RoleCnsDatastore} + default: // for "local-0" + roleForUser = []string{"", ""} } // iterating through service account users for j, user := range wcpServiceAccUsers { - userPermission, err := verifyPermissionForWcpStorageUser(ctx, "Datastore", datastorePath, user, roleForUser[j]) + userPermission, err := VerifyPermissionForWcpStorageUser(ctx, e2eTestConfig, "Datastore", + datastorePath, user, roleForUser[j]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(userPermission).To(gomega.BeTrue(), "user permission is not valid for datastore") } @@ -470,7 +483,7 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { */ ginkgo.It("[csi-multi-svc] Verify that an alarm is raised when a shared datastore "+ - "becomes non-shared", ginkgo.Label(p0, wcp, multiSvc, vc80), func() { + "becomes non-shared", ginkgo.Label(constants.P0, constants.Wcp, constants.MultiSvc, constants.Vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -478,94 +491,94 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { var alarmPresent bool ginkgo.By("Adding a shared datastore to supervisor cluster 1") - err := mountNfsDatastoreOnClusterOrHost(datastoreName, datastoreIP, computeClusterPaths[0]) + err := MountNfsDatastoreOnClusterOrHost(e2eTestConfig, datastoreName, datastoreIP, computeClusterPaths[0]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) isDsMountedOnSvc1 = true defer func() { if isDsMountedOnSvc1 { ginkgo.By("Remove mounted datastore from supervisor cluster 1") - err = UnMountNfsDatastoreFromClusterOrHost(datastoreName, computeClusterPaths[0]) + err = UnMountNfsDatastoreFromClusterOrHost(e2eTestConfig, datastoreName, computeClusterPaths[0]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) isDsMountedOnSvc1 = false } }() - datastorePath := "/" + dataCenter + "/datastore/" + datastoreName + datastorePath := dataCenter + "/datastore/" + datastoreName ginkgo.By("Verify datastore has permission for storage service account from supervisor cluster 1") - userPermission, err := verifyPermissionForWcpStorageUser(ctx, "Cluster", datastorePath, - wcpServiceAccUsers[0], roleCnsDatastore) + userPermission, err := VerifyPermissionForWcpStorageUser(ctx, e2eTestConfig, "Cluster", datastorePath, + wcpServiceAccUsers[0], constants.RoleCnsDatastore) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(userPermission).To(gomega.BeTrue(), "user permission is not changed for datastore "+ "in supervisor cluster 1") ginkgo.By("Unmount datastore from one of the host from supervisor cluster 1") - clusterComputeResource, _, err := getClusterName(ctx, &e2eVSphere) + clusterComputeResource, _, err := vcutil.GetClusterName(ctx, e2eTestConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - hostsInCluster = getHostsByClusterName(ctx, clusterComputeResource, computeCluster) + hostsInCluster = vcutil.GetHostsByClusterName(ctx, clusterComputeResource, computeCluster) hostIP1, err := hostsInCluster[0].ManagementIPs(ctx) gomega.Expect(err).NotTo(gomega.HaveOccurred()) hostToBeRemoved = hostIP1[0].String() hostPath = computeClusterPaths[0] + "/" + hostToBeRemoved framework.Logf("Unmount datastore from host : %v", hostToBeRemoved) - err = UnMountNfsDatastoreFromClusterOrHost(datastoreName, hostToBeRemoved) + err = UnMountNfsDatastoreFromClusterOrHost(e2eTestConfig, datastoreName, hostToBeRemoved) gomega.Expect(err).NotTo(gomega.HaveOccurred()) isDsUnmountedFromHost = true defer func() { if isDsUnmountedFromHost { ginkgo.By("Remove mounted datastore from host in the supervisor cluster 1") - err = mountNfsDatastoreOnClusterOrHost(datastoreName, datastoreIP, hostPath) + err = MountNfsDatastoreOnClusterOrHost(e2eTestConfig, datastoreName, datastoreIP, hostPath) gomega.Expect(err).NotTo(gomega.HaveOccurred()) isDsUnmountedFromHost = false } }() ginkgo.By("Verify an alarm is raised for unmounted datastore and host in the supervisor cluster 1") - alarm := "Datastore not accessible to all hosts under the cluster" - alarmPresent, err = isAlarmPresentOnDatacenter(ctx, dataCenter, alarm, true) + alarm := "Datastore no longer accessible to all hosts in the cluster compute resource" + alarmPresent, err = IsAlarmPresentOnDatacenter(ctx, e2eTestConfig, dataCenter, alarm, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(alarmPresent).To(gomega.BeTrue()) ginkgo.By("Remove host from the cluster from STEP 3 and Verify alarm has disappeared") - enterHostIntoMM(ctx, hostsInCluster[0], ensureAccessibilityMModeType, timeout, false) + vcutil.EnterHostIntoMM(ctx, hostsInCluster[0], constants.EnsureAccessibilityMModeType, timeout, false) isHostInMM = true defer func() { if isHostInMM { - exitHostMM(ctx, hostsInCluster[0], timeout) + vcutil.ExitHostMM(ctx, hostsInCluster[0], timeout) isHostInMM = false } }() - isHostRemoved, err := removeEsxiHostFromCluster(dataCenter, computeCluster, hostToBeRemoved) + isHostRemoved, err := RemoveEsxiHostFromCluster(e2eTestConfig, dataCenter, computeCluster, hostToBeRemoved) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isHostRemoved).To(gomega.BeTrue(), "Host was not removed from cluster") defer func() { if isHostRemoved { ginkgo.By("Adding host back to the cluster 1") - err = moveHostToCluster(computeClusterPaths[0], hostToBeRemoved) + err = MoveHostToCluster(e2eTestConfig, computeClusterPaths[0], hostToBeRemoved) gomega.Expect(err).NotTo(gomega.HaveOccurred()) isHostRemoved = false } }() - alarmPresent, err = isAlarmPresentOnDatacenter(ctx, dataCenter, alarm, false) + alarmPresent, err = IsAlarmPresentOnDatacenter(ctx, e2eTestConfig, dataCenter, alarm, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(alarmPresent).To(gomega.BeTrue()) ginkgo.By("Add host back to cluster and Verify alarm has appeared again") - err = moveHostToCluster(computeClusterPaths[0], hostToBeRemoved) + err = MoveHostToCluster(e2eTestConfig, computeClusterPaths[0], hostToBeRemoved) gomega.Expect(err).NotTo(gomega.HaveOccurred()) isHostRemoved = false - exitHostMM(ctx, hostsInCluster[0], timeout) + vcutil.ExitHostMM(ctx, hostsInCluster[0], timeout) isHostInMM = false - alarmPresent, err = isAlarmPresentOnDatacenter(ctx, dataCenter, alarm, true) + alarmPresent, err = IsAlarmPresentOnDatacenter(ctx, e2eTestConfig, dataCenter, alarm, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(alarmPresent).To(gomega.BeTrue()) ginkgo.By("Mount datastore back to host in the cluster and Verify alarm has disappeared") - err = mountNfsDatastoreOnClusterOrHost(datastoreName, datastoreIP, hostPath) + err = MountNfsDatastoreOnClusterOrHost(e2eTestConfig, datastoreName, datastoreIP, hostPath) gomega.Expect(err).NotTo(gomega.HaveOccurred()) isDsUnmountedFromHost = false - alarmPresent, err = isAlarmPresentOnDatacenter(ctx, dataCenter, alarm, false) + alarmPresent, err = IsAlarmPresentOnDatacenter(ctx, e2eTestConfig, dataCenter, alarm, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(alarmPresent).To(gomega.BeTrue()) }) @@ -586,20 +599,20 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { */ ginkgo.It("[csi-multi-svc] Move a shared datastore from one SVC to another and check permission", - ginkgo.Label(p0, wcp, multiSvc, vc80), func() { + ginkgo.Label(constants.P0, constants.Wcp, constants.MultiSvc, constants.Vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var roleForSvcUser []string ginkgo.By("Adding a shared datastore to supervisor cluster 1") - err := mountNfsDatastoreOnClusterOrHost(datastoreName, datastoreIP, computeClusterPaths[0]) + err := MountNfsDatastoreOnClusterOrHost(e2eTestConfig, datastoreName, datastoreIP, computeClusterPaths[0]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) isDsMountedOnSvc1 = true defer func() { if isDsMountedOnSvc1 { ginkgo.By("Remove mounted datastore from supervisor cluster 1") - err = UnMountNfsDatastoreFromClusterOrHost(datastoreName, computeClusterPaths[0]) + err = UnMountNfsDatastoreFromClusterOrHost(e2eTestConfig, datastoreName, computeClusterPaths[0]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) isDsMountedOnSvc1 = false } @@ -607,45 +620,48 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { datastorePath := "/" + dataCenter + "/datastore/" + datastoreName ginkgo.By("Verify datastore has permission for storage service account from supervisor cluster 1") - roleForSvcUser = []string{roleCnsDatastore, roleCnsSearchAndSpbm} + roleForSvcUser = []string{constants.RoleCnsDatastore, ""} // iterating through service account users for j, user := range wcpServiceAccUsers { - userPermission, err := verifyPermissionForWcpStorageUser(ctx, "Datastore", datastorePath, user, roleForSvcUser[j]) + userPermission, err := VerifyPermissionForWcpStorageUser(ctx, e2eTestConfig, "Datastore", + datastorePath, user, roleForSvcUser[j]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(userPermission).To(gomega.BeTrue()) } ginkgo.By("Adding same shared datastore to supervisor cluster 2") - err = mountNfsDatastoreOnClusterOrHost(datastoreName, datastoreIP, computeClusterPaths[1]) + err = MountNfsDatastoreOnClusterOrHost(e2eTestConfig, datastoreName, datastoreIP, computeClusterPaths[1]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) isDsMountedOnSvc2 = true defer func() { if isDsMountedOnSvc2 { ginkgo.By("Remove mounted datastore from supervisor cluster 2") - err = UnMountNfsDatastoreFromClusterOrHost(datastoreName, computeClusterPaths[1]) + err = UnMountNfsDatastoreFromClusterOrHost(e2eTestConfig, datastoreName, computeClusterPaths[1]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) isDsMountedOnSvc2 = false } }() ginkgo.By("Verify datastore has permission for storage service account from both the supervisor clusters") - roleForSvcUser = []string{roleCnsDatastore, roleCnsDatastore} + roleForSvcUser = []string{constants.RoleCnsDatastore, constants.RoleCnsDatastore} // iterating through service account users for j, user := range wcpServiceAccUsers { - userPermission, err := verifyPermissionForWcpStorageUser(ctx, "Datastore", datastorePath, user, roleForSvcUser[j]) + userPermission, err := VerifyPermissionForWcpStorageUser(ctx, e2eTestConfig, "Datastore", + datastorePath, user, roleForSvcUser[j]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(userPermission).To(gomega.BeTrue()) } ginkgo.By("Removing mounted shared datastore from supervisor cluster 1") - err = UnMountNfsDatastoreFromClusterOrHost(datastoreName, computeClusterPaths[1]) + err = UnMountNfsDatastoreFromClusterOrHost(e2eTestConfig, datastoreName, computeClusterPaths[1]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) isDsMountedOnSvc2 = false ginkgo.By("Verify datastore has permission for storage service account from the svc1 but not from svc2") - roleForSvcUser = []string{roleCnsDatastore, roleCnsSearchAndSpbm} + roleForSvcUser = []string{constants.RoleCnsDatastore, ""} // iterating through service account users for j, user := range wcpServiceAccUsers { - userPermission, err := verifyPermissionForWcpStorageUser(ctx, "Datastore", datastorePath, user, roleForSvcUser[j]) + userPermission, err := VerifyPermissionForWcpStorageUser(ctx, e2eTestConfig, "Datastore", + datastorePath, user, roleForSvcUser[j]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(userPermission).To(gomega.BeTrue()) } @@ -666,7 +682,7 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { */ ginkgo.It("[csi-multi-svc] Kill VC session from a service account and attempt CSI ops from "+ - "the corresponding SVC", ginkgo.Label(p0, wcp, multiSvc, vc80), func() { + "the corresponding SVC", ginkgo.Label(constants.P0, constants.Wcp, constants.MultiSvc, constants.Vc80), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -675,19 +691,19 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { ginkgo.By("Getting VC session Id for both the supervisors") for i := 0; i < numberOfSvc; i++ { // getting session ids for each svc - sessionIDs, err := getVcSessionIDsforSupervisor(supervisorIds[i]) + sessionIDs, err := GetVcSessionIDsforSupervisor(e2eTestConfig, supervisorIds[i]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Storing it in oldSessionIds to validate later oldSessionIds = append(oldSessionIds, sessionIDs) } ginkgo.By("Kill VC session from svc1") - err = killVcSessionIDs(oldSessionIds[0]) + err = KillVcSessionIDs(e2eTestConfig, oldSessionIds[0]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify VC session Ids are changed for svc1 but not for svc2") for i := 0; i < numberOfSvc; i++ { - isSessionIdSame, err := waitAndCompareSessionIDList(ctx, supervisorIds[i], oldSessionIds[i]) + isSessionIdSame, err := WaitAndCompareSessionIDList(ctx, e2eTestConfig, supervisorIds[i], oldSessionIds[i]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // for first supervisor session id will change after killing vc session if i == 0 { @@ -704,32 +720,32 @@ var _ = ginkgo.Describe("[csi-multi-svc] Multi-SVC", func() { namespace := namespaces[n] // Changing kubeconfig for second supervisor if n == 1 { - os.Setenv(kubeconfigEnvVar, kubeconfig1) + os.Setenv(constants.KubeconfigEnvVar, kubeconfig1) framework.TestContext.KubeConfig = kubeconfig1 - client, err = createKubernetesClientFromConfig(kubeconfig1) + client, err = k8testutil.CreateKubernetesClientFromConfig(kubeconfig1) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Error creating k8s client with %v: %v", kubeconfig1, err)) } ginkgo.By("Create StatefulSet with 3 replica with parallel pod management") - service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, - true, 3, false, nil, - false, false, true, "", nil, false, storagePolicyNames[n]) + service, statefulset, err := k8testutil.CreateStatefulSetAndVerifyPVAndPodNodeAffinty(ctx, client, + e2eTestConfig, namespace, true, 3, false, nil, false, false, true, "", nil, false, + storagePolicyNames[n]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { fss.DeleteAllStatefulSets(ctx, client, namespace) - deleteService(namespace, client, service) + k8testutil.DeleteService(namespace, client, service) }() framework.Logf("Scale up sts replica count to 5") scaleUpReplicaCount = 5 - err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, scaleUpReplicaCount, + err = k8testutil.ScaleUpStatefulSetPod(ctx, client, e2eTestConfig, statefulset, namespace, scaleUpReplicaCount, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Scale down sts replica count to 1") scaleDownReplicaCount = 1 - err = scaleDownStatefulSetPod(ctx, client, statefulset, namespace, scaleDownReplicaCount, + err = k8testutil.ScaleDownStatefulSetPod(ctx, e2eTestConfig, client, statefulset, namespace, scaleDownReplicaCount, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } diff --git a/tests/e2e/multi_svc_util.go b/tests/e2e/multiSvc/multi_svc_util.go similarity index 65% rename from tests/e2e/multi_svc_util.go rename to tests/e2e/multiSvc/multi_svc_util.go index e876903f80..c9d0599515 100644 --- a/tests/e2e/multi_svc_util.go +++ b/tests/e2e/multiSvc/multi_svc_util.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package e2e +package multiSvc import ( "context" @@ -27,6 +27,10 @@ import ( "github.com/vmware/govmomi/object" "golang.org/x/crypto/ssh" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/config" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/constants" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/k8testutil" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/vcutil" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -34,9 +38,9 @@ import ( "k8s.io/kubernetes/test/e2e/framework" ) -// getSvcCountAndComputeClusterPath method is used to get number of clusters and it's computeCluster path -func getSvcCountAndComputeClusterPath() (int, []string, error) { - computeClusterPath := govcLoginCmd() + "govc namespace.cluster.ls" +// GetSvcCountAndComputeClusterPath method is used to get number of clusters and it's computeCluster path +func GetSvcCountAndComputeClusterPath(e2eTestConfig *config.E2eTestConfig) (int, []string, error) { + computeClusterPath := vcutil.GovcLoginCmd(e2eTestConfig) + "govc namespace.cluster.ls" framework.Logf("To get number of compute cluster and it's path - command : %s", computeClusterPath) result, err := exec.Command("/bin/sh", "-c", computeClusterPath).Output() if err != nil { @@ -49,9 +53,10 @@ func getSvcCountAndComputeClusterPath() (int, []string, error) { return len(listPath), listPath, nil } -// mountNfsDatastoreOnClusterOrHost method is used to add a new datastore to cluster -func mountNfsDatastoreOnClusterOrHost(datastoreName string, datastoreIP string, clusterPath string) error { - mountDsOnCluster := govcLoginCmd() + "govc datastore.create -type nfs -name " + datastoreName + +// MountNfsDatastoreOnClusterOrHost method is used to add a new datastore to cluster +func MountNfsDatastoreOnClusterOrHost(e2eTestConfig *config.E2eTestConfig, datastoreName string, datastoreIP string, + clusterPath string) error { + mountDsOnCluster := vcutil.GovcLoginCmd(e2eTestConfig) + "govc datastore.create -type nfs -name " + datastoreName + " -remote-host " + datastoreIP + " -remote-path /shared-nfs " + clusterPath framework.Logf("Mount datastore on cluster/host - command : %s", mountDsOnCluster) _, err := exec.Command("/bin/sh", "-c", mountDsOnCluster).Output() @@ -64,8 +69,10 @@ func mountNfsDatastoreOnClusterOrHost(datastoreName string, datastoreIP string, } // UnMountNfsDatastoreFromClusterOrHost method is used to remove a datastore from cluster -func UnMountNfsDatastoreFromClusterOrHost(datastoreName string, clusterOrHostPath string) error { - UnMountDsOnCluster := govcLoginCmd() + "govc datastore.remove -ds " + datastoreName + " " + clusterOrHostPath +func UnMountNfsDatastoreFromClusterOrHost(e2eTestConfig *config.E2eTestConfig, datastoreName string, + clusterOrHostPath string) error { + UnMountDsOnCluster := vcutil.GovcLoginCmd(e2eTestConfig) + "govc datastore.remove -ds " + datastoreName + " " + + clusterOrHostPath framework.Logf("Un-mount datastore on cluster/Host - command : %s", UnMountDsOnCluster) _, err := exec.Command("/bin/sh", "-c", UnMountDsOnCluster).Output() if err != nil { @@ -76,10 +83,10 @@ func UnMountNfsDatastoreFromClusterOrHost(datastoreName string, clusterOrHostPat return nil } -// verifyPermissionForWcpStorageUser method is used to check permission of service account user -func verifyPermissionForWcpStorageUser(ctx context.Context, entity string, path string, - serviceAccountUser string, role string) (bool, error) { - var permissionCheckSvcUser string = govcLoginCmd() +// VerifyPermissionForWcpStorageUser method is used to check permission of service account user +func VerifyPermissionForWcpStorageUser(ctx context.Context, e2eTestConfig *config.E2eTestConfig, entity string, + path string, serviceAccountUser string, role string) (bool, error) { + var permissionCheckSvcUser string = vcutil.GovcLoginCmd(e2eTestConfig) var grepServiceAccUser string = " | grep " + serviceAccountUser + " | awk '{print $1}' " switch entity { @@ -96,7 +103,7 @@ func verifyPermissionForWcpStorageUser(ctx context.Context, entity string, path framework.Logf("Check permission of service account user on %s - command : %s", entity, permissionCheckSvcUser) var permission string - waitErr := wait.PollUntilContextTimeout(ctx, healthStatusPollInterval, pollTimeoutSixMin, true, + waitErr := wait.PollUntilContextTimeout(ctx, constants.HealthStatusPollInterval, constants.PollTimeoutSixMin, true, func(ctx context.Context) (bool, error) { result, err := exec.Command("/bin/sh", "-c", permissionCheckSvcUser).Output() if err != nil { @@ -112,12 +119,12 @@ func verifyPermissionForWcpStorageUser(ctx context.Context, entity string, path return true, waitErr } -// isAlarmPresentOnDatacenter method is used to check if alarm is generated on a dataCenter -func isAlarmPresentOnDatacenter(ctx context.Context, datacenter string, alarmToVerify string, - alarmShouldExists bool) (bool, error) { - alarmCmd := govcLoginCmd() + "govc events /" + datacenter + " | grep 'warning'" +// IsAlarmPresentOnDatacenter method is used to check if alarm is generated on a dataCenter +func IsAlarmPresentOnDatacenter(ctx context.Context, e2eTestConfig *config.E2eTestConfig, datacenter string, + alarmToVerify string, alarmShouldExists bool) (bool, error) { + alarmCmd := vcutil.GovcLoginCmd(e2eTestConfig) + "govc alarms " + datacenter framework.Logf("Get alarms from datacenter - command : %s", alarmCmd) - waitErr := wait.PollUntilContextTimeout(ctx, healthStatusPollInterval, pollTimeoutSixMin, true, + waitErr := wait.PollUntilContextTimeout(ctx, constants.HealthStatusPollInterval, constants.PollTimeoutSixMin, true, func(ctx context.Context) (bool, error) { result, err := exec.Command("/bin/sh", "-c", alarmCmd).Output() if err != nil { @@ -159,9 +166,10 @@ func isAlarmPresentOnDatacenter(ctx context.Context, datacenter string, alarmToV return true, waitErr } -// removeEsxiHostFromCluster method is used to remove esxi hosts from cluster -func removeEsxiHostFromCluster(datacenter string, cluster string, hostIP string) (bool, error) { - removeHostFromCluster := govcLoginCmd() + "govc object.mv /" + datacenter + "/host/" + cluster + +// RemoveEsxiHostFromCluster method is used to remove esxi hosts from cluster +func RemoveEsxiHostFromCluster(e2eTestConfig *config.E2eTestConfig, datacenter string, cluster string, + hostIP string) (bool, error) { + removeHostFromCluster := vcutil.GovcLoginCmd(e2eTestConfig) + "govc object.mv /" + datacenter + "/host/" + cluster + "/" + hostIP + " /" + datacenter + "/host/" framework.Logf("Remove an ESXi host from cluster command : %s", removeHostFromCluster) _, err := exec.Command("/bin/sh", "-c", removeHostFromCluster).Output() @@ -173,9 +181,9 @@ func removeEsxiHostFromCluster(datacenter string, cluster string, hostIP string) return true, nil } -// moveHostToCluster method is used to move a host to cluster -func moveHostToCluster(clusterPath string, hostIP string) error { - moveHostToCluster := govcLoginCmd() + "govc cluster.mv -cluster " + clusterPath + " " + hostIP +// MoveHostToCluster method is used to move a host to cluster +func MoveHostToCluster(e2eTestConfig *config.E2eTestConfig, clusterPath string, hostIP string) error { + moveHostToCluster := vcutil.GovcLoginCmd(e2eTestConfig) + "govc cluster.mv -cluster " + clusterPath + " " + hostIP framework.Logf("Move a host to cluster command : %s", moveHostToCluster) _, err := exec.Command("/bin/sh", "-c", moveHostToCluster).Output() if err != nil { @@ -186,9 +194,9 @@ func moveHostToCluster(clusterPath string, hostIP string) error { return nil } -// getVcSessionIDsforSupervisor method returns list of vc session id for a supervisor id and returns error if any -func getVcSessionIDsforSupervisor(supervisorId string) ([]string, error) { - getSessionId := govcLoginCmd() + "govc session.ls | grep 'csi-useragent' | grep '" + +// GetVcSessionIDsforSupervisor method returns list of vc session id for a supervisor id and returns error if any +func GetVcSessionIDsforSupervisor(e2eTestConfig *config.E2eTestConfig, supervisorId string) ([]string, error) { + getSessionId := vcutil.GovcLoginCmd(e2eTestConfig) + "govc session.ls | grep 'csi-useragent' | grep '" + supervisorId + "' | awk '{print $1}'" framework.Logf("Get Vc session ID for cluster command : %s", getSessionId) result, err := exec.Command("/bin/sh", "-c", getSessionId).Output() @@ -200,9 +208,9 @@ func getVcSessionIDsforSupervisor(supervisorId string) ([]string, error) { return sessionIds, nil } -// killVcSessionIDs remove vc session id for a supervisor cluster -func killVcSessionIDs(sessionIds []string) error { - var govcLogin string = govcLoginCmd() +// KillVcSessionIDs remove vc session id for a supervisor cluster +func KillVcSessionIDs(e2eTestConfig *config.E2eTestConfig, sessionIds []string) error { + var govcLogin string = vcutil.GovcLoginCmd(e2eTestConfig) for _, sessionId := range sessionIds { removeSessionIdCmd := govcLogin + "govc session.rm " + sessionId framework.Logf("Remove vc session id from cluster - command : %s", removeSessionIdCmd) @@ -216,17 +224,17 @@ func killVcSessionIDs(sessionIds []string) error { return nil } -// getSvcConfigSecretData returns data obtained fom csi config secret +// GetSvcConfigSecretData returns data obtained fom csi config secret // in namespace where CSI is deployed -func getSvcConfigSecretData(client clientset.Interface, ctx context.Context, - csiNamespace string) (e2eTestConfig, error) { - var vsphereCfg e2eTestConfig - currentSecret, err := client.CoreV1().Secrets(csiNamespace).Get(ctx, configSecret, metav1.GetOptions{}) +func GetSvcConfigSecretData(client clientset.Interface, ctx context.Context, e2eTestConfig *config.E2eTestConfig, + csiNamespace string) (config.E2eTestConfig, error) { + var vsphereCfg config.E2eTestConfig + currentSecret, err := client.CoreV1().Secrets(csiNamespace).Get(ctx, constants.ConfigSecret, metav1.GetOptions{}) if err != nil { return vsphereCfg, err } - originalConf := string(currentSecret.Data[vsphereCloudProviderConfiguration]) - vsphereCfg, err = readConfigFromSecretString(originalConf) + originalConf := string(currentSecret.Data[constants.VsphereCloudProviderConfiguration]) + vsphereCfg, err = k8testutil.ReadConfigFromSecretString(originalConf) if err != nil { return vsphereCfg, err } @@ -234,13 +242,13 @@ func getSvcConfigSecretData(client clientset.Interface, ctx context.Context, return vsphereCfg, nil } -// getDatastoreNamesFromDCs method is used to fetch datastore details from a multi-supervisor testbed -func getDatastoreNamesFromDCs(sshClientConfig *ssh.ClientConfig, +// GetDatastoreNamesFromDCs method is used to fetch datastore details from a multi-supervisor testbed +func GetDatastoreNamesFromDCs(sshClientConfig *ssh.ClientConfig, e2eTestConfig *config.E2eTestConfig, dataCenters []*object.Datacenter) ([]string, error) { var dsList, datastores []string framework.Logf("Fetching datastore details") for i := 0; i < len(dataCenters); i++ { - ds := govcLoginCmd() + "govc ls " + dataCenters[i].InventoryPath + "/datastore" + ds := vcutil.GovcLoginCmd(e2eTestConfig) + "govc ls " + dataCenters[i].InventoryPath + "/datastore" dsResult, err := exec.Command("/bin/sh", "-c", ds).Output() if err != nil { framework.Logf("dsResult %s", string(dsResult)) @@ -259,16 +267,17 @@ func getDatastoreNamesFromDCs(sshClientConfig *ssh.ClientConfig, return datastores, nil } -// waitAndCompareSessionIDList method is used to match new session ids with old session ids -func waitAndCompareSessionIDList(ctx context.Context, supervisorId string, oldSessionIds []string) (bool, error) { +// WaitAndCompareSessionIDList method is used to match new session ids with old session ids +func WaitAndCompareSessionIDList(ctx context.Context, e2eTestConfig *config.E2eTestConfig, supervisorId string, + oldSessionIds []string) (bool, error) { var newSessionIds []string var err error var retryCount int framework.Logf("Old Session Ids : %s", oldSessionIds) // polling for current vc session ids for svc - waitErr := wait.PollUntilContextTimeout(ctx, poll*10, vcSessionWaitTime, true, + waitErr := wait.PollUntilContextTimeout(ctx, constants.Poll*10, constants.VcSessionWaitTime, true, func(ctx context.Context) (bool, error) { - newSessionIds, err = getVcSessionIDsforSupervisor(supervisorId) + newSessionIds, err = GetVcSessionIDsforSupervisor(e2eTestConfig, supervisorId) if err != nil { // If there was an error, return the error return false, err diff --git a/tests/e2e/multiSvc/suite_test.go b/tests/e2e/multiSvc/suite_test.go new file mode 100644 index 0000000000..64a200fc84 --- /dev/null +++ b/tests/e2e/multiSvc/suite_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multiSvc + +import ( + "flag" + "os" + "strings" + "testing" + + ginkgo "github.com/onsi/ginkgo/v2" + gomega "github.com/onsi/gomega" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/config" + _ "k8s.io/kubernetes/test/e2e/framework/debug/init" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/constants" +) + +func init() { + framework.AfterReadingAllFlags(&framework.TestContext) +} + +func TestE2E(t *testing.T) { + handleFlags() + gomega.RegisterFailHandler(ginkgo.Fail) + _, reporterConfig := ginkgo.GinkgoConfiguration() + reporterConfig.JUnitReport = "junit.xml" + ginkgo.RunSpecs(t, "CNS-CSI-Driver-End-to-End-MultiSvc-Tests", reporterConfig) +} + +func handleFlags() { + config.CopyFlags(config.Flags, flag.CommandLine) + framework.RegisterCommonFlags(flag.CommandLine) + framework.TestContext.KubeConfig = os.Getenv(constants.KubeconfigEnvVar) + mydir, err := os.Getwd() + framework.ExpectNoError(err) + framework.TestContext.RepoRoot = strings.ReplaceAll(mydir, "/tests/e2e/multiSvc", "") + flag.Parse() +} diff --git a/tests/e2e/util.go b/tests/e2e/util.go index 6a6d0b8579..a49b56afe9 100644 --- a/tests/e2e/util.go +++ b/tests/e2e/util.go @@ -100,9 +100,7 @@ import ( var ( defaultCluster *object.ClusterComputeResource svcClient clientset.Interface - svcClient1 clientset.Interface svcNamespace string - svcNamespace1 string vsanHealthClient *VsanClient clusterComputeResource []*object.ClusterComputeResource defaultDatastore *object.Datastore @@ -1113,31 +1111,6 @@ func getSvcClientAndNamespace() (clientset.Interface, string) { return svcClient, svcNamespace } -// create kubernetes client for multi-supervisors clusters and returns it along with namespaces -func getMultiSvcClientAndNamespace() ([]clientset.Interface, []string, []error) { - var err error - if svcClient == nil { - if k8senv := GetAndExpectStringEnvVar("KUBECONFIG"); k8senv != "" { - svcClient, err = createKubernetesClientFromConfig(k8senv) - if err != nil { - return []clientset.Interface{}, []string{}, []error{err, nil} - } - } - svcNamespace = GetAndExpectStringEnvVar(envSupervisorClusterNamespace) - } - if svcClient1 == nil { - if k8senv := GetAndExpectStringEnvVar("KUBECONFIG1"); k8senv != "" { - svcClient1, err = createKubernetesClientFromConfig(k8senv) - if err != nil { - return []clientset.Interface{}, []string{}, []error{nil, err} - } - } - svcNamespace1 = GetAndExpectStringEnvVar(envSupervisorClusterNamespace1) - } - // returns list of clientset, namespace and error if any for both svc - return []clientset.Interface{svcClient, svcClient1}, []string{svcNamespace, svcNamespace1}, []error{} -} - // updateCSIDeploymentTemplateFullSyncInterval helps to update the // FULL_SYNC_INTERVAL_MINUTES in deployment template. For this to take effect, // we need to terminate the running csi controller pod. @@ -8371,3 +8344,21 @@ func initializeClusterClientsByUserRoles(client clientset.Interface) (clientset. } return adminClient, client } + +// getSvcConfigSecretData returns data obtained fom csi config secret +// in namespace where CSI is deployed +func getSvcConfigSecretData(client clientset.Interface, ctx context.Context, + csiNamespace string) (e2eTestConfig, error) { + var vsphereCfg e2eTestConfig + currentSecret, err := client.CoreV1().Secrets(csiNamespace).Get(ctx, configSecret, metav1.GetOptions{}) + if err != nil { + return vsphereCfg, err + } + originalConf := string(currentSecret.Data[vsphereCloudProviderConfiguration]) + vsphereCfg, err = readConfigFromSecretString(originalConf) + if err != nil { + return vsphereCfg, err + } + + return vsphereCfg, nil +} diff --git a/tests/e2e/vcutil/vc_util.go b/tests/e2e/vcutil/vc_util.go index df1093ba5c..2687221f1a 100644 --- a/tests/e2e/vcutil/vc_util.go +++ b/tests/e2e/vcutil/vc_util.go @@ -2500,3 +2500,34 @@ func CheckVcenterServicesRunning( gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Got timed-out while waiting for all required VC services to be up and running") } + +// EnterHostIntoMM puts a host into maintenance mode with a particular timeout and +// maintenance mode type +func EnterHostIntoMM(ctx context.Context, host *object.HostSystem, mmModeType string, + timeout int32, evacuateVms bool) { + mmSpec := vim25types.VsanHostDecommissionMode{ + ObjectAction: mmModeType, + } + hostMMSpec := vim25types.HostMaintenanceSpec{ + VsanMode: &mmSpec, + Purpose: "", + } + task, err := host.EnterMaintenanceMode(ctx, timeout, false, &hostMMSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, err = task.WaitForResultEx(ctx, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Host: %v in in maintenance mode", host) +} + +// ExitHostMM exits a host from maintenance mode with a particular timeout +func ExitHostMM(ctx context.Context, host *object.HostSystem, timeout int32) { + task, err := host.ExitMaintenanceMode(ctx, timeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, err = task.WaitForResultEx(ctx, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Host: %v exited from maintenance mode", host) +}