diff --git a/tests/e2e/blueprint_pvc.go b/tests/e2e/blueprint_pvc.go new file mode 100644 index 0000000000..fefb3288fc --- /dev/null +++ b/tests/e2e/blueprint_pvc.go @@ -0,0 +1,315 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "strconv" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + admissionapi "k8s.io/pod-security-admission/api" +) + +var _ bool = ginkgo.Describe("[domain-isolation-vmsvc] Domain-Isolation-VmServiceVm", func() { + + f := framework.NewDefaultFramework("vmsvc") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + f.SkipNamespaceCreation = true // tests will create their own namespaces + var ( + client clientset.Interface + namespace string + vcRestSessionId string + allowedTopologies []v1.TopologySelectorLabelRequirement + topkeyStartIndex int + topologyCategories []string + labelsMap map[string]string + labels_ns map[string]string + zone2 string + vmClass string + contentLibId string + datastoreURL string + //vmopC ctlrclient.Client + //cnsopC ctlrclient.Client + nodeList *v1.NodeList + topologyAffinityDetails map[string][]string + storagePolicyNameZone2 string + storageProfileIdZone2 string + restConfig *rest.Config + err error + pandoraSyncWaitTime int + ) + + ginkgo.BeforeEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // making vc connection + client = f.ClientSet + bootstrap() + + // reading vc session id + if vcRestSessionId == "" { + vcRestSessionId = createVcSession4RestApis(ctx) + } + + // fetching nodes list + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + + // reading topology map for management and workload domain + topologyMap := GetAndExpectStringEnvVar(envTopologyMap) + allowedTopologies = createAllowedTopolgies(topologyMap) + + // Set namespace labels to allow privileged pod creation + labels_ns = map[string]string{} + labels_ns[admissionapi.EnforceLevelLabel] = string(admissionapi.LevelPrivileged) + labels_ns["e2e-framework"] = f.BaseName + + //setting labels map on pvc + labelsMap = make(map[string]string) + labelsMap["app"] = "test" + + //fetching zones + topologyAffinityDetails, topologyCategories = createTopologyMapLevel5(topologyMap) + zone2 = topologyAffinityDetails[topologyCategories[0]][1] + + // get or set vm class required for VM creation + vmClass = os.Getenv(envVMClass) + if vmClass == "" { + vmClass = vmClassBestEffortSmall + } + + // fetch shared vsphere datatsore url + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + dsRef := getDsMoRefFromURL(ctx, datastoreURL) + framework.Logf("dsmoId: %v", dsRef.Value) + + // reading zonal storage policy of zone-2 workload domain + storagePolicyNameZone2 = GetAndExpectStringEnvVar(envZonal2StoragePolicyName) + storageProfileIdZone2 = e2eVSphere.GetSpbmPolicyID(storagePolicyNameZone2) + + // read or create content library if it is empty + if contentLibId == "" { + contentLibId, err = createAndOrGetContentlibId4Url(vcRestSessionId, GetAndExpectStringEnvVar(envContentLibraryUrl), + dsRef.Value, &e2eVSphere) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + /* Sets up a Kubernetes client with a custom scheme, adds the vmopv1 API types to the scheme, + and ensures that the client is properly initialized without errors */ + // vmopScheme := runtime.NewScheme() + // gomega.Expect(vmopv1.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + // vmopC, err = ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // cnsOpScheme := runtime.NewScheme() + // gomega.Expect(cnsop.AddToScheme(cnsOpScheme)).Should(gomega.Succeed()) + // cnsopC, err = ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: cnsOpScheme}) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Get snapshot client using the rest config + restConfig = getRestConfigClient() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + framework.Logf("Collecting supervisor PVC events before performing PV/PVC cleanup") + dumpSvcNsEventsOnTestFailure(client, namespace) + eventList, err := client.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, item := range eventList.Items { + framework.Logf("%q", item.Message) + } + }) + + /* + Basic test + Steps: + zonal NS → zonal DS → zonal FCD + az1 cluster → az1 policy → FCD created with Az1 datastore url + + Without setting any annotation on BluePrint PVC and using az1 storage policy + + 1. Create namespace "test-ns" and assign az1 policy to NS with sufficient storage quota. + Note that only Az1 zone and Az1 compatible storage profile is added to test-ns NS + 2. Create a dynamic PVC using above storage profile and in the above ns using "immediate" binding mode + and "DELETE" policy. + 3. Verify the PVC annotation and PV affinity. It should list Az1 details only. + 4. Wait for PVC to reach the Bound state + 5. Create a VM Service VM and attach it to above created dynamic PVC + 6. Verify VM got created only on Az1 node. + 7. Wait for VM to get and IP and to be in a power-on state + 8. Create a blueprint PVC using "DataSource Ref", specify CRD annotation "apiGroup: + vmoperator.vmware.com" and it should point to above created VM name. + 9. PVC created from "DataSource Ref" will be stuck in a pending state. + 10. Create a FCD using an API call using zonal datastore url and zonal storage policy + pointing to Az1 only + 11. Create a CNSRegisterVolume by passing the above created "FCD id" and "PVC name" created in step #11 + 12. Verify static volume created successfully. + 13. Verify newly created static PVC/PV state. + 14. Verify static PV affinity. It should display only Az1 affinity. + 15. Verify static PVC. No annotation should come on pvc as it was not set initially. + 16. Perform cleanup. + */ + + ginkgo.It("TC1", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + curtime := time.Now().Unix() + curtimestring := strconv.FormatInt(curtime, 10) + + /* + EX - zone -> az1, az2, az3, az4 + so topValStartIndex=1 and topValEndIndex=2 will fetch the 1st index value from topology map string + */ + topValStartIndex := 1 + topValEndIndex := 2 + + ginkgo.By("Create wcp namespace with az2 zone and az2 zonal storage policy") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + allowedTopologiesMap := convertToTopologyMap(allowedTopologies) + namespace, statuscode, err := createtWcpNsWithZonesAndPolicies(vcRestSessionId, + []string{storageProfileIdZone2}, getSvcId(vcRestSessionId, &e2eVSphere), + []string{zone2}, vmClass, contentLibId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) + defer func() { + delTestWcpNs(vcRestSessionId, namespace) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + }() + + ginkgo.By("Read az2 storage policy which is tagged to wcp namespace") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyNameZone2, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow namespace to get created and "+ + "storage policy and zonal tag to get added to it", + oneMinuteWaitTimeInSeconds)) + time.Sleep(time.Duration(oneMinuteWaitTimeInSeconds) * time.Second) + + ginkgo.By("Create a PVC using az2 storage policy") + pvc, err := createPVC(ctx, client, namespace, labelsMap, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for PVC to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + ginkgo.By("Refresh PVC state") + pvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify volume affinity annotation state") + err = verifyVolumeAnnotationAffinity(pvc, pv, allowedTopologiesMap, topologyCategories) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for VM images to get listed under namespace and create VM") + vmImageName := GetAndExpectStringEnvVar(envVmsvcVmImageName) + err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Create VM and attach the above created dynamic PVC to it") + // _, vm1, _, err := createVmServiceVm(ctx, client, vmopC, cnsopC, namespace, + // []*v1.PersistentVolumeClaim{pvc}, vmClass, storageclass.Name, true) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Verify attached volumes are accessible and validate data integrity") + // err = verifyVolumeAccessibilityAndDataIntegrityOnVM(ctx, vm1, vmopC, namespace) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Verify vm affinity annotation state") + // err = verifyVmServiceVmAnnotationAffinity(vm1, allowedTopologiesMap, nodeList) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create blueprint pvc using VM DataSourceRef and using az2 storage policy") + blueprintPvc, err := createBluePrintPVC(ctx, client, namespace, storageclass, v1.ReadWriteOnce, + "testvm", diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create FCD Disk") + zone2datastoreURL := GetAndExpectStringEnvVar(envDatastoreUrlZone2) + defaultDatastore := readDatastoreUrlForFcdCreation(ctx, zone2datastoreURL) + fcdID, err := e2eVSphere.createFCDwithValidProfileID(ctx, + "staticfcd"+curtimestring, storageProfileIdZone2, diskSizeInMb, defaultDatastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + ginkgo.By("Create CNS register volume with above created FCD ") + cnsRegisterVolume := getCNSRegisterVolumeSpecForBluePrintPvc(namespace, fcdID, + blueprintPvc.Name, v1.ReadWriteOnce) + + err = createCNSRegisterVolume(ctx, restConfig, cnsRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.ExpectNoError(waitForCNSRegisterVolumeToGetCreated(ctx, restConfig, + namespace, cnsRegisterVolume, poll, supervisorClusterOperationsTimeout)) + cnsRegisterVolumeName := cnsRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + + ginkgo.By("Verify blueprint pvc and statically generated pv status") + blueprintPvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, blueprintPvc.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + staticPv := getPvFromClaim(client, namespace, blueprintPvc.Name) + verifyBidirectionalReferenceOfPVandPVC(ctx, client, blueprintPvc, staticPv, fcdID) + + // ginkgo.By("Create VM and attach static blueprint pvc to it") + // _, vm2, _, err := createVmServiceVm(ctx, client, vmopC, cnsopC, namespace, + // []*v1.PersistentVolumeClaim{pvc}, vmClass, storageclass.Name, true) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Verify attached volumes are accessible and validate data integrity") + // err = verifyVolumeAccessibilityAndDataIntegrityOnVM(ctx, vm2, vmopC, namespace) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Verify vm affinity annotation state") + // err = verifyVmServiceVmAnnotationAffinity(vm2, allowedTopologiesMap, nodeList) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) +}) diff --git a/tests/e2e/blueprint_pvc_utils.go b/tests/e2e/blueprint_pvc_utils.go new file mode 100644 index 0000000000..f7a25bb5e2 --- /dev/null +++ b/tests/e2e/blueprint_pvc_utils.go @@ -0,0 +1,165 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "strings" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + "k8s.io/utils/pointer" + cnsregistervolumev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsregistervolume/v1alpha1" +) + +func createBluePrintPvcSpec(namespace string, scName string, + vmName string, diskSize string, accessMode v1.PersistentVolumeAccessMode) *v1.PersistentVolumeClaim { + if accessMode == "" { + accessMode = v1.ReadWriteOnce + } + if diskSize == "" { + diskSize = "1Gi" + } + + claim := &v1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "PersistentVolumeClaim", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "blueprint-pvc-", + Namespace: namespace, + }, + Spec: v1.PersistentVolumeClaimSpec{ + StorageClassName: &scName, + DataSourceRef: &v1.TypedObjectReference{ + APIGroup: pointer.String("vmoperator.vmware.com"), + Kind: "VirtualMachine", + Name: vmName, + }, + AccessModes: []v1.PersistentVolumeAccessMode{ + accessMode, + }, + Resources: v1.VolumeResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse(diskSize), + }, + }, + }, + } + + return claim +} + +func createBluePrintPVC(ctx context.Context, client clientset.Interface, namespace string, + storageclass *storagev1.StorageClass, accessMode v1.PersistentVolumeAccessMode, vmName string, + diskSize string) (*v1.PersistentVolumeClaim, error) { + + storageClassName := storageclass.Name + + // Generate the PVC spec + pvcSpec := createBluePrintPvcSpec(namespace, storageClassName, vmName, diskSize, accessMode) + + storageQty := pvcSpec.Spec.Resources.Requests[v1.ResourceStorage] + storageSize := (&storageQty).String() + + // Log all values going into the PVC spec + ginkgo.By(fmt.Sprintf( + "Creating Blueprint PVC with details:\n"+ + " Namespace : %s\n"+ + " StorageClassName : %s\n"+ + " DataSourceRef : (apiGroup=%s, kind=%s, name=%s)\n"+ + " DiskSize : %s\n"+ + " AccessMode : %s\n", + namespace, + storageClassName, + *pvcSpec.Spec.DataSourceRef.APIGroup, + pvcSpec.Spec.DataSourceRef.Kind, + pvcSpec.Spec.DataSourceRef.Name, + storageSize, + pvcSpec.Spec.AccessModes[0], + )) + + // Create the PVC + pvc, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Failed to create Blueprint PVC in namespace %s: %v", namespace, err)) + + framework.Logf("Blueprint PVC created successfully: %q in namespace: %q", pvc.Name, namespace) + return pvc, nil +} + +func getCNSRegisterVolumeSpecForBluePrintPvc(namespace string, volumeID string, pvcName string, + accessMode v1.PersistentVolumeAccessMode, +) *cnsregistervolumev1alpha1.CnsRegisterVolume { + + framework.Logf("Creating CNSRegisterVolume spec for blueprint pvc: %s, VolumeID: %s", pvcName, volumeID) + if accessMode == "" { + accessMode = v1.ReadWriteOnce + } + + cnsRegisterVolume := &cnsregistervolumev1alpha1.CnsRegisterVolume{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "cns.vmware.com/v1alpha1", + Kind: "CnsRegisterVolume", + }, + ObjectMeta: metav1.ObjectMeta{ + // Use GenerateName to let Kubernetes append a random suffix + GenerateName: "cnsregvol-", + Namespace: namespace, + }, + Spec: cnsregistervolumev1alpha1.CnsRegisterVolumeSpec{ + VolumeID: volumeID, + AccessMode: accessMode, + PvcName: pvcName, + }, + } + + return cnsRegisterVolume +} + +func readDatastoreUrlForFcdCreation(ctx context.Context, datastoreURL string) *object.Datastore { + var datacenters []string + finder := find.NewFinder(e2eVSphere.Client.Client, false) + cfg, err := getConfig() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + dcList := strings.Split(cfg.Global.Datacenters, ",") + for _, dc := range dcList { + dcName := strings.TrimSpace(dc) + if dcName != "" { + datacenters = append(datacenters, dcName) + } + } + for _, dc := range datacenters { + defaultDatacenter, err := finder.Datacenter(ctx, dc) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + finder.SetDatacenter(defaultDatacenter) + defaultDatastore, err = getDatastoreByURL(ctx, datastoreURL, defaultDatacenter) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + return defaultDatastore +} diff --git a/tests/e2e/e2e_common.go b/tests/e2e/e2e_common.go index 28ec43d1b2..3ee45ea1cf 100644 --- a/tests/e2e/e2e_common.go +++ b/tests/e2e/e2e_common.go @@ -518,6 +518,11 @@ var ( serviceAccountKeyword = "ServiceAccount" ) +// blueprint pvc +var ( + envDatastoreUrlZone2 = "DATASTORE_URL_ZONE2" +) + // storage policy usages for storage quota validation var usageSuffixes = []string{ "-pvc-usage", diff --git a/tests/e2e/vmservice_utils.go b/tests/e2e/vmservice_utils.go index 1cb2d8fdaa..144a3fb679 100644 --- a/tests/e2e/vmservice_utils.go +++ b/tests/e2e/vmservice_utils.go @@ -1486,28 +1486,32 @@ func getVmImages(ctx context.Context, namespace string) string { } // Waits for vm images to get listed in namespace -func pollWaitForVMImageToSync(ctx context.Context, namespace string, expectedImage string, Poll, - timeout time.Duration) error { - - for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { +func pollWaitForVMImageToSync(ctx context.Context, namespace string, expectedImage string, poll, timeout time.Duration) error { + start := time.Now() + for time.Since(start) < timeout { listOfVmImages := getVmImages(ctx, namespace) - // Split output into lines and search for the expected image lines := strings.Split(listOfVmImages, "\n") found := false + for _, line := range lines { + // Skip header or empty lines + if strings.HasPrefix(line, "NAME") || strings.TrimSpace(line) == "" { + continue + } + if strings.Contains(line, expectedImage) { found = true - framework.Logf("Found : %t, Image: %s\n", found, expectedImage) + framework.Logf("Found VM Image in namespace [%s]:\n%s", namespace, line) break } } - if !found { - continue - } else { + + if found { return nil } + time.Sleep(poll) } - return fmt.Errorf("failed to load vm-image timed out after %v", timeout) + return fmt.Errorf("failed to find VM image %q in namespace %q after timeout %v", expectedImage, namespace, timeout) }