diff --git a/tests/e2e/e2e_common.go b/tests/e2e/e2e_common.go index 74431b02fb..443cfae9ce 100644 --- a/tests/e2e/e2e_common.go +++ b/tests/e2e/e2e_common.go @@ -274,9 +274,14 @@ const ( envStoragePolicyNameForVsanNfsDatastores = "STORAGE_POLICY_FOR_VSAN_NFS_DATASTORES" devopsKubeConf = "DEV_OPS_USER_KUBECONFIG" quotaSupportedVCVersion = "9.0.0" - lateBinding = "-latebinding" - cnsVolumeDeleteTimeout = 5 * time.Minute - vscDeleteTimeout = 5 * time.Minute + + lateBinding = "-latebinding" + cnsVolumeDeleteTimeout = 5 * time.Minute + vscDeleteTimeout = 5 * time.Minute + + selectedNodeIsZone = "cns.vmware.com/selected-node-is-zone" + selectedNodeAnnotationOnPVC = "volume.kubernetes.io/selected-node" + vmZoneLabel = "topology.kubernetes.io/zone" ) /* diff --git a/tests/e2e/util.go b/tests/e2e/util.go index 2be801cb4f..48fa9dfda4 100644 --- a/tests/e2e/util.go +++ b/tests/e2e/util.go @@ -8373,3 +8373,19 @@ func getSvcConfigSecretData(client clientset.Interface, ctx context.Context, return vsphereCfg, nil } + +// validate Annotation on PVC +func validateAnnotationOnPVC(pvc *v1.PersistentVolumeClaim, annotationKey string, expectedValue string) error { + val, exists := pvc.Annotations[annotationKey] + if !exists { + framework.Logf("PVC %s does NOT have annotation %q\n", pvc.Name, annotationKey) + return fmt.Errorf("PVC %s does NOT have annotation %q\n", pvc.Name, annotationKey) + } + if val == expectedValue { + framework.Logf("PVC %s has annotation %q with correct value: %s\n", pvc.Name, annotationKey, val) + } else { + return fmt.Errorf("PVC %s has annotation %q but value is %q (expected %q)\n", + pvc.Name, annotationKey, val, expectedValue) + } + return nil +} diff --git a/tests/e2e/vmservice_late_binding.go b/tests/e2e/vmservice_late_binding.go index 7d6ed8610a..4cbe786e46 100644 --- a/tests/e2e/vmservice_late_binding.go +++ b/tests/e2e/vmservice_late_binding.go @@ -20,10 +20,15 @@ import ( "context" "os" "strings" + "time" + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" + vmopv2 "github.com/vmware-tanzu/vm-operator/api/v1alpha2" + vmopv3 "github.com/vmware-tanzu/vm-operator/api/v1alpha3" + vmopv4 "github.com/vmware-tanzu/vm-operator/api/v1alpha4" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" v1 "k8s.io/api/core/v1" @@ -45,25 +50,26 @@ var _ bool = ginkgo.Describe("[vmsvc] VM-Service-VM-LateBinding", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.SkipNamespaceCreation = true // tests will create their own namespaces var ( - client clientset.Interface - namespace string - datastoreURL string - wffsStoragePolicyName string - storagePolicyName string - storageProfileId string - vcRestSessionId string - vmi string - vmClass string - vmopC ctlrclient.Client - defaultDatastore *object.Datastore - statuscode int - vmImageName string - quota map[string]*resource.Quantity - isLateBinding bool - expectedTotalStorage []string + client clientset.Interface + namespace string + datastoreURL string + wffsStoragePolicyName string + storagePolicyName string + storageProfileId string + vcRestSessionId string + vmi string + vmClass string + vmopC ctlrclient.Client + defaultDatastore *object.Datastore + statuscode int + vmImageName string + quota map[string]*resource.Quantity + isLateBinding bool + //expectedTotalStorage []string expected_pvcQuotaInMbStr string storageclass *storagev1.StorageClass expected_vmQuotaStr string + snapc *snapclient.Clientset ) ginkgo.BeforeEach(func() { @@ -107,6 +113,9 @@ var _ bool = ginkgo.Describe("[vmsvc] VM-Service-VM-LateBinding", func() { vmopScheme := runtime.NewScheme() gomega.Expect(vmopv1.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + gomega.Expect(vmopv2.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + gomega.Expect(vmopv3.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + gomega.Expect(vmopv4.AddToScheme(vmopScheme)).Should(gomega.Succeed()) vmopC, err = ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -116,8 +125,15 @@ var _ bool = ginkgo.Describe("[vmsvc] VM-Service-VM-LateBinding", func() { vmImageName = GetAndExpectStringEnvVar(envVmsvcVmImageName) framework.Logf("Waiting for virtual machine image list to be available in namespace '%s' for image '%s'", namespace, vmImageName) + ginkgo.By("Wait for VM images to get listed under namespace and create VM") + err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vmi = waitNGetVmiForImageName(ctx, vmopC, vmImageName) gomega.Expect(vmi).NotTo(gomega.BeEmpty()) + framework.Logf("vm image: %s", vmi) + // vmi = waitNGetVmiForImageName4(ctx, vmopC, vmImageName) + // gomega.Expect(vmi).NotTo(gomega.BeEmpty()) var datacenters []string datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) @@ -164,35 +180,39 @@ var _ bool = ginkgo.Describe("[vmsvc] VM-Service-VM-LateBinding", func() { storageclass.Name, namespace, vmUsage, vmServiceExtensionName, isLateBinding) framework.Logf("quota[vm_storagePolicyQuotaBefore] : %s", quota["vm_storagePolicyQuotaBefore"]) + restConfig = getRestConfigClient() + snapc, err = snapclient.NewForConfig(restConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) ginkgo.AfterEach(func() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + // ctx, cancel := context.WithCancel(context.Background()) + // defer cancel() framework.Logf("expected_pvcQuotaInMbStr: %s", expected_pvcQuotaInMbStr) framework.Logf("expected_vmQuotaStr: %s", expected_vmQuotaStr) //Validate TotalQuota - _, quotavalidationStatus := validateTotalQuota(ctx, restConfig, storageclass.Name, namespace, - expectedTotalStorage, quota["totalQuotaUsedBefore"], true) - gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + // _, quotavalidationStatus := validateTotalQuota(ctx, restConfig, storageclass.Name, namespace, + // expectedTotalStorage, quota["totalQuotaUsedBefore"], true) + // gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) //Validates PVC quota in both StoragePolicyQuota and StoragePolicyUsage CR - sp_quota_status_pvc, sp_usage_status_pvc := validateQuotaUsageAfterResourceCreation(ctx, restConfig, - storageclass.Name, namespace, pvcUsage, volExtensionName, []string{expected_pvcQuotaInMbStr}, - quota["totalQuotaUsedBefore"], quota["pvc_storagePolicyQuotaBefore"], quota["pvc_storagePolicyUsageBefore"], true) - gomega.Expect(sp_quota_status_pvc && sp_usage_status_pvc).NotTo(gomega.BeFalse()) + // sp_quota_status_pvc, sp_usage_status_pvc := validateQuotaUsageAfterResourceCreation(ctx, restConfig, + // storageclass.Name, namespace, pvcUsage, volExtensionName, []string{expected_pvcQuotaInMbStr}, + // quota["totalQuotaUsedBefore"], quota["pvc_storagePolicyQuotaBefore"], quota["pvc_storagePolicyUsageBefore"], true) + // gomega.Expect(sp_quota_status_pvc && sp_usage_status_pvc).NotTo(gomega.BeFalse()) - //Validates VM quota in both StoragePolicyQuota and StoragePolicyUsage CR - sp_quota_status_vm, sp_usage_status_vm := validateQuotaUsageAfterResourceCreation(ctx, restConfig, - storageclass.Name, namespace, vmUsage, vmServiceExtensionName, []string{expected_vmQuotaStr}, - quota["vm_totalQuotaUsedBefore"], quota["vm_storagePolicyQuotaBefore"], - quota["vm_storagePolicyUsageBefore"], true) - gomega.Expect(sp_quota_status_vm && sp_usage_status_vm).NotTo(gomega.BeFalse()) + // //Validates VM quota in both StoragePolicyQuota and StoragePolicyUsage CR + // sp_quota_status_vm, sp_usage_status_vm := validateQuotaUsageAfterResourceCreation(ctx, restConfig, + // storageclass.Name, namespace, vmUsage, vmServiceExtensionName, []string{expected_vmQuotaStr}, + // quota["vm_totalQuotaUsedBefore"], quota["vm_storagePolicyQuotaBefore"], + // quota["vm_storagePolicyUsageBefore"], true) + // gomega.Expect(sp_quota_status_vm && sp_usage_status_vm).NotTo(gomega.BeFalse()) - delTestWcpNs(vcRestSessionId, namespace) - gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + // delTestWcpNs(vcRestSessionId, namespace) + // gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) dumpSvcNsEventsOnTestFailure(client, namespace) }) @@ -213,6 +233,14 @@ var _ bool = ginkgo.Describe("[vmsvc] VM-Service-VM-LateBinding", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + vmopScheme := runtime.NewScheme() + gomega.Expect(vmopv4.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + vmopC, err := ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + cnsOpScheme := runtime.NewScheme() + gomega.Expect(cnsop.AddToScheme(cnsOpScheme)).Should(gomega.Succeed()) + ginkgo.By("Get WFFC stotage class") wffcStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, wffsStoragePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -222,6 +250,118 @@ var _ bool = ginkgo.Describe("[vmsvc] VM-Service-VM-LateBinding", func() { pvc, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // ginkgo.By("Wait for VM images to get listed under namespace and create VM") + // err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + + ginkgo.By("Create vm service vm") + vm1 := createVmServiceVmV4(ctx, vmopC, CreateVmOptionsV4{ + Namespace: namespace, + VmClass: vmClass, + VMI: vmi, + StorageClassName: wffsStoragePolicyName, + PVCs: []*v1.PersistentVolumeClaim{pvc}, + SecretName: secretName, + WaitForReadyStatus: true, + }) + + ginkgo.By("Wait for VMs to come up and get an IP") + time.Sleep(200) + vmIp1, err := waitNgetVmsvcVmIpV4(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("vmIp1 : %s", vmIp1) + + ginkgo.By("Waiting PVC to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + ginkgo.By("get VM storage") + vmQuotaUsed := getVMStorageData(ctx, vmopC, namespace, vm1.Name) + framework.Logf("vmQuotaUsed : %s", vmQuotaUsed) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + isPrivateNetwork := GetBoolEnvVarOrDefault("IS_PRIVATE_NETWORK", false) + if !isPrivateNetwork { + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + //vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + vm1, err = getVmsvcVmV4(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm1.Status.Volumes { + //volFolder := formatNVerifyPvcIsAccessible(vol.DiskUUID, i+1, vmIp1) + volFolder := formatNVerifyPvcIsAccessibleV4(vol.DiskUUID, i+1, vmIp1) + framework.Logf("volFolder: %s", volFolder) + //verifyDataIntegrityOnVmDisk(vmIp1, volFolder) + } + } + + //Get zone details of VM + zone, err := getVMzoneV4(ctx, vm1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + //Verify Annotations and Zonal details on PVC + pvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) + err = validateAnnotationOnPVC(pvc, selectedNodeIsZone, "true") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = validateAnnotationOnPVC(pvc, selectedNodeAnnotationOnPVC, zone) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Validate StorageQuotaDetails after creating the workloads") + //expected_pvcQuotaInMbStr = convertInt64ToStrMbFormat(diskSizeInMb) + // expected_vmQuotaStr = vmQuotaUsed + // expectedTotalStorage = append(expectedTotalStorage, expected_pvcQuotaInMbStr, expected_vmQuotaStr) + + }) + + /** + 1. Create multiple PVC's . Consider 2 PVC's with WFFC and 2 PVC's with immediate + 2. Use the above PVC's and create a VmService VM's + 3. Wait for VM get powered ON + 4. Once the VM is on Verify that the PVC's with WFFC should go to bound state + Describe PVC and verify the below annotations + cns.vmware.com/selected-node-is-zone is set to true + volume.kubernetes.io/selected-node is set to Zone - This should have the zone name where + the VM gets provisioned + 5. PVC's with Immediate binding will not have any annotation + 6. Verify CNS metadata for PVC + 7. Verify the storagePolicyQuotaUsage and VMstorageQuotaUsage CR's on the late binding storage class + Quota should have the appropriate quota consumption details + Clean up all the above data + */ + ginkgo.It("MultiplePVC-AttachedTo-SingleVM-ImmediateAndWFFC", ginkgo.Label(p0, block, wcp, vmsvc, vc901), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Get WFFC stotage class") + wffcStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, wffsStoragePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + immediateBindingStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + ginkgo.By("Create a PVC") + pvc1, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc2, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc3, err := createPVC(ctx, client, namespace, nil, "", immediateBindingStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc4, err := createPVC(ctx, client, namespace, nil, "", immediateBindingStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Wait for VM images to get listed under namespace and create VM") err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -230,7 +370,7 @@ var _ bool = ginkgo.Describe("[vmsvc] VM-Service-VM-LateBinding", func() { ginkgo.By("Create vm service vm") vm1 := createVmServiceVmWithPvcs( - ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, wffsStoragePolicyName, secretName) + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc1, pvc2, pvc3, pvc4}, vmi, wffsStoragePolicyName, secretName) ginkgo.By("Creating loadbalancing service for ssh with the VM") _ = createService4Vm(ctx, vmopC, namespace, vm1.Name) @@ -240,13 +380,8 @@ var _ bool = ginkgo.Describe("[vmsvc] VM-Service-VM-LateBinding", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting PVC to be in bound state") - pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1, pvc2, pvc3, pvc4}, pollTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - pv := pvs[0] - volHandle := pv.Spec.CSI.VolumeHandle - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - - //TODO : Annotations and Zonal details on PVC to be added ginkgo.By("get VM storage") vmQuotaUsed := getVMStorageData(ctx, vmopC, namespace, vm1.Name) @@ -255,17 +390,666 @@ var _ bool = ginkgo.Describe("[vmsvc] VM-Service-VM-LateBinding", func() { ginkgo.By("Verify PVCs are accessible to the VM") ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") - vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + //vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + instanceKey := ctlrclient.ObjectKey{Name: vm1.Name, Namespace: namespace} + vm1 = &vmopv1.VirtualMachine{} + err = vmopC.Get(ctx, instanceKey, vm1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for i, vol := range vm1.Status.Volumes { volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp1) verifyDataIntegrityOnVmDisk(vmIp1, volFolder) } - ginkgo.By("Validate StorageQuotaDetails after creating the workloads") - expected_pvcQuotaInMbStr = convertInt64ToStrMbFormat(diskSizeInMb) - expected_vmQuotaStr = vmQuotaUsed - expectedTotalStorage = append(expectedTotalStorage, expected_pvcQuotaInMbStr, expected_vmQuotaStr) + //Get zone details of VM + zone, err := getVMzonev1(ctx, vm1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + //Verify Annotations and Zonal details on PVC + pvc1, err = client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc1.Name, metav1.GetOptions{}) + err = validateAnnotationOnPVC(pvc1, selectedNodeIsZone, "true") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvc2, err = client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc2.Name, metav1.GetOptions{}) + err = validateAnnotationOnPVC(pvc2, selectedNodeIsZone, "true") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = validateAnnotationOnPVC(pvc1, selectedNodeAnnotationOnPVC, zone) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = validateAnnotationOnPVC(pvc2, selectedNodeAnnotationOnPVC, zone) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = validateAnnotationOnPVC(pvc3, selectedNodeAnnotationOnPVC, zone) + gomega.Expect(err).To(gomega.HaveOccurred()) + + // ginkgo.By("Validate StorageQuotaDetails after creating the workloads") + //expected_pvcQuotaInMbStr = convertInt64ToStrMbFormat(diskSizeInMb) + // expected_vmQuotaStr = vmQuotaUsed + // expectedTotalStorage = append(expectedTotalStorage, expected_pvcQuotaInMbStr, expected_vmQuotaStr) + + }) + + /** + 1. Create PVC using WFFC storage class - PVC will be in pending state + 2. Create a VM service VM using above PVC + 3. Wait for VM to power on + 4. Wait for PVC to reach bound state, PVC should have below annotation + cns.vmware.com/selected-node-is-zone is set to true + volume.kubernetes.io/selected-node is set to Zone - This should have the zone + name where the VM gets provisioned + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Create a volume snapshot for the PVC created in step 1 + 7. Wait for snapshots ready state reach to true + 8. Verify CNS metadata from Volume and snapshot + 9. Create a new PVC2 from the snapshot created in step 7 use WFFC policy on same datastore, + PVC should not reach bound state untill the VM is created] + 10. attach the above created pvc2 to VM1. + 11. Verify CNS metadata for a PVC2 + 12. Verify PVC2 should also has the annotations mentioned in step 4 + 13. Once the VM is up, verify that the volume is accessible inside the VM + 14. Verify reading/writing data in the volume. + 15. Clean up all the above data + */ + + ginkgo.It("Attach-restored-WFFC-PVC-to-ExistingVMServiceVM", ginkgo.Label(p0, block, wcp, vmsvc, vc901), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Get WFFC stotage class") + wffcStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, wffsStoragePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + ginkgo.By("Create a PVC") + pvc1, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for VM images to get listed under namespace and create VM") + err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + + ginkgo.By("Create vm service vm") + vm1 := createVmServiceVmV4(ctx, vmopC, CreateVmOptionsV4{ + Namespace: namespace, + VmClass: vmClass, + VMI: vmi, + StorageClassName: wffsStoragePolicyName, + PVCs: []*v1.PersistentVolumeClaim{pvc1}, + SecretName: secretName, + WaitForReadyStatus: true, + }) + + // ginkgo.By("Creating loadbalancing service for ssh with the VM") + // _ = createService4Vm(ctx, vmopC, namespace, vm1.Name) + + ginkgo.By("Wait for VMs to come up and get an IP") + vmIp1, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("vmIp1 : %s", vmIp1) + + ginkgo.By("Waiting PVC to be in bound state") + pv, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pv[0].Spec.CSI.VolumeHandle + + ginkgo.By("get VM storage") + vmQuotaUsed := getVMStorageData(ctx, vmopC, namespace, vm1.Name) + framework.Logf("vmQuotaUsed : %s", vmQuotaUsed) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm1, err = getVmsvcVM4(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, _, _, + _, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc1, volHandle, diskSize, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + //snapshotSize := getAggregatedSnapshotCapacityInMb(e2eVSphere, volHandle) + //snapshotSizeStr := convertInt64ToStrMbFormat(snapshotSize) + + ginkgo.By("Restore a pvc using a dynamic volume snapshot created above using WFFC storageclass") + ginkgo.By("Create PVC from snapshot") + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, wffcStorageclass, nil, + v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) + + pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Try to attach pvc2 to vm1") + vm1, err = getVmsvcVM4(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vm1.Spec.Volumes = append(vm1.Spec.Volumes, + vmopv4.VirtualMachineVolume{ + VirtualMachineVolumeSource: vmopv4.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv4.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim2.Name}, + }, + }, + }, + ) + + // vols = append(vols, vmopv4.VirtualMachineVolume{ + // Name: pvc.Name, + // VirtualMachineVolumeSource: vmopv4.VirtualMachineVolumeSource{ + // PersistentVolumeClaim: &vmopv4.PersistentVolumeClaimVolumeSource{ + // PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ClaimName: pvc.Name}, + // }, + // }, + // }) + + err = vmopC.Update(ctx, vm1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Waiting PVC to be in bound state") + pv, err = fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + //Get zone details of VM + zone, err := getVMzone(ctx, vm1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + //Verify Annotations and Zonal details on PVC + pvclaim2, err = client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvclaim2.Name, metav1.GetOptions{}) + err = validateAnnotationOnPVC(pvclaim2, selectedNodeIsZone, "true") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = validateAnnotationOnPVC(pvclaim2, selectedNodeAnnotationOnPVC, zone) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Validate StorageQuotaDetails after creating the workloads") + //expected_pvcQuotaInMbStr = convertInt64ToStrMbFormat(diskSizeInMb) + // expected_vmQuotaStr = vmQuotaUsed + // expectedTotalStorage = append(expectedTotalStorage, expected_pvcQuotaInMbStr, expected_vmQuotaStr) + + }) + + /** + 1. Create PVC using WFFC storage class - PVC will be in pending state + 2. Create a VM service VM using above PVC + 3. Wait for VM to power on + 4. Wait for PVC to reach bound state, PVC should have below annotation + cns.vmware.com/selected-node-is-zone is set to true + volume.kubernetes.io/selected-node is set to Zone - This should have the zone name where the VM gets provisioned + Once the VM is up, verify that the volume is accessible inside the VM + 5. Write some IO to the CSI volumes, read it back from them and verify the data integrity + 6. Create a volume snapshot for the PVC created in step 1 + 7. Wait for snapshots ready state reach to true + 8. Verify CNS metadata from Volume and snapshot + 9. Create a new PVC2 from the snapshot created in step 7 use different policy on the same datastore with immediate binding + 10. Create VM2 from PVC2 created in step10 + 11. Verify CNS metadata for a PVC2 + 12. Verify PVC2 should also has the annotations mentioned in step 4 + 13. Once the VM is up, verify that the volume is accessible inside the VM + 14. Verify reading/writing data in the volume. + 15. Clean up all the above data + */ + ginkgo.It("Attach-restoredPVC-UsingImmediateBindingStorageclass-to-ExistingVMServiceVM", ginkgo.Label(p0, block, wcp, vmsvc, vc901), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Get WFFC stotage class") + wffcStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, wffsStoragePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + ginkgo.By("Create a PVC") + pvc1, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for VM images to get listed under namespace and create VM") + err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + + ginkgo.By("Create vm service vm") + vm1 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc1}, vmi, wffsStoragePolicyName, secretName) + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + _ = createService4Vm(ctx, vmopC, namespace, vm1.Name) + + ginkgo.By("Wait for VMs to come up and get an IP") + vmIp1, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("vmIp1 : %s", vmIp1) + + ginkgo.By("Waiting PVC to be in bound state") + pv, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pv[0].Spec.CSI.VolumeHandle + + ginkgo.By("get VM storage") + vmQuotaUsed := getVMStorageData(ctx, vmopC, namespace, vm1.Name) + framework.Logf("vmQuotaUsed : %s", vmQuotaUsed) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, _, _, + _, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc1, volHandle, diskSize, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + //snapshotSize := getAggregatedSnapshotCapacityInMb(e2eVSphere, volHandle) + //snapshotSizeStr := convertInt64ToStrMbFormat(snapshotSize) + + ginkgo.By("Restore a pvc using a dynamic volume snapshot created above using WFFC storageclass") + pvclaim2, _, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, + volumeSnapshot, diskSize, false) + + ginkgo.By("Try to attach pvc2 to vm1") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vm1.Spec.Volumes = append(vm1.Spec.Volumes, vmopv1.VirtualMachineVolume{Name: pvclaim2.Name, + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim2.Name}, + }}) + err = vmopC.Update(ctx, vm1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Wait and verify PVCs are attached to the VM") + // match := verifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, vm1, []*v1.PersistentVolumeClaim{pvc1}) + // gomega.Expect(match).To(gomega.BeTrue()) + // match = verifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, vm1, []*v1.PersistentVolumeClaim{pvclaim2}) + // gomega.Expect(match).To(gomega.BeTrue()) + + //Verify Annotations and Zonal details on PVC + pvclaim2, err = client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvclaim2.Name, metav1.GetOptions{}) + err = validateAnnotationOnPVC(pvclaim2, selectedNodeIsZone, "true") + gomega.Expect(err).To(gomega.HaveOccurred()) + + // ginkgo.By("Validate StorageQuotaDetails after creating the workloads") + //expected_pvcQuotaInMbStr = convertInt64ToStrMbFormat(diskSizeInMb) + // expected_vmQuotaStr = vmQuotaUsed + // expectedTotalStorage = append(expectedTotalStorage, expected_pvcQuotaInMbStr, expected_vmQuotaStr) + + }) + + /** + 1. Create a PVC using WFFC / Late binding storage class + 2. PVC's with WFFC will be in pending state + 3. Use the above PVC and create a VmService VM + 4. Once the VM is on Verify that the PVC should go to bound state + 5. TODO : verify PVC with the below annotations cns.vmware.com/selected-node-is-zone is set to true + volume.kubernetes.io/selected-node is set to zone - This should have the zone name where the VM gets provisioned + 6. Verify CNS metadata for PVC + 7. Verify PVC's attached to VM + 8. Validate TotalQuota, StoragePolicyQuota, storageQuotaUsage of VmserviceVm's and PVc's + + */ + ginkgo.It("vmserviceVM-With-VolumesFrom-AllDifferentDatastore", ginkgo.Label(p0, block, wcp, vmsvc, vc901), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vmopScheme := runtime.NewScheme() + gomega.Expect(vmopv4.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + vmopC, err := ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + storagePolicyNameForNFSDatastore := GetAndExpectStringEnvVar(envStoragePolicyNameForNfsDatastores) + storagePolicyNameForSharedVMFSDatastore := GetAndExpectStringEnvVar(envStoragePolicyNameForVmfsDatastores) + + if storagePolicyNameForNFSDatastore == "" || storagePolicyNameForSharedVMFSDatastore == "" { + ginkgo.Skip("Skipping the test because NFS and SHARED_VMFS datastore ") + } else { + cnsOpScheme := runtime.NewScheme() + gomega.Expect(cnsop.AddToScheme(cnsOpScheme)).Should(gomega.Succeed()) + + ginkgo.By("Get WFFC stotage class") + wffcStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, wffsStoragePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + ginkgo.By("Get WFFC stotage class of NFS datastore") + nfsPolicyLateBinding := storagePolicyNameForNFSDatastore + "-latebinding" + nfsPolicyLateBindingStorageClass, err := client.StorageV1().StorageClasses().Get(ctx, nfsPolicyLateBinding, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", nfsPolicyLateBinding, nfsPolicyLateBindingStorageClass) + + ginkgo.By("Get WFFC stotage class of sharedVMFS datastore") + sharedVMFSPolicyLateBinding := storagePolicyNameForSharedVMFSDatastore + "-latebinding" + sharedVMFPolicyLateBindingStorageClass, err := client.StorageV1().StorageClasses().Get(ctx, sharedVMFSPolicyLateBinding, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", sharedVMFSPolicyLateBinding, sharedVMFPolicyLateBindingStorageClass) + + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc_NFS, err := createPVC(ctx, client, namespace, nil, "", nfsPolicyLateBindingStorageClass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc_sharedVMFS, err := createPVC(ctx, client, namespace, nil, "", sharedVMFPolicyLateBindingStorageClass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Wait for VM images to get listed under namespace and create VM") + // err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + + ginkgo.By("Create vm service vm") + vm1 := createVmServiceVmV4(ctx, vmopC, CreateVmOptionsV4{ + Namespace: namespace, + VmClass: vmClass, + VMI: vmi, + StorageClassName: wffsStoragePolicyName, + PVCs: []*v1.PersistentVolumeClaim{pvc, pvc_NFS, pvc_sharedVMFS}, + SecretName: secretName, + WaitForReadyStatus: true, + }) + + ginkgo.By("Wait for VMs to come up and get an IP") + vmIp1, err := waitNgetVmsvcVmIpV4(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("vmIp1 : %s", vmIp1) + + ginkgo.By("Waiting PVC to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, pvc_NFS, pvc_sharedVMFS}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + } + + // ginkgo.By("Validate StorageQuotaDetails after creating the workloads") + //expected_pvcQuotaInMbStr = convertInt64ToStrMbFormat(diskSizeInMb) + // expected_vmQuotaStr = vmQuotaUsed + // expectedTotalStorage = append(expectedTotalStorage, expected_pvcQuotaInMbStr, expected_vmQuotaStr) + + }) + + /** + 1. Consider a namespace with limited Quota, Where VMshould come up But PVC should not reach bound state + 2. Using WFFC storage class , create PVC it will be in pending state + 3. Create VM - wait for Vm to reach power ON state + 4. Now, For PVC to reach bound state , The quota is not sufficient - Verify the behaviour + */ + ginkgo.It("vmserviceVM-InsuffecientQuota", ginkgo.Label(p0, block, wcp, vmsvc, vc901), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vmopScheme := runtime.NewScheme() + gomega.Expect(vmopv4.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + vmopC, err := ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, "3Gi") + + cnsOpScheme := runtime.NewScheme() + gomega.Expect(cnsop.AddToScheme(cnsOpScheme)).Should(gomega.Succeed()) + + ginkgo.By("Get WFFC stotage class") + wffcStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, wffsStoragePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Wait for VM images to get listed under namespace and create VM") + // err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + + vols := []vmopv1.VirtualMachineVolume{} + + vols = append(vols, vmopv1.VirtualMachineVolume{ + Name: pvc.Name, + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ClaimName: pvc.Name}, + }, + }) + + ginkgo.By("Create vm service vm") + vm1 := vmopv1.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{Name: "vm1", Namespace: namespace}, + Spec: vmopv1.VirtualMachineSpec{ + PowerState: vmopv1.VirtualMachinePoweredOn, + ImageName: vmi, + ClassName: vmClass, + StorageClass: wffsStoragePolicyName, + Volumes: vols, + VmMetadata: &vmopv1.VirtualMachineMetadata{Transport: cloudInitLabel, SecretName: secretName}, + }, + } + err = vmopC.Create(ctx, &vm1) + gomega.Expect(err).To(gomega.HaveOccurred()) + + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // vm1 := createVmServiceVmWithPvcs( + // ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, wffsStoragePolicyName, secretName) + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + _ = createService4Vm(ctx, vmopC, namespace, vm1.Name) + + }) + + /** + 1. Consider a namespace with limited Quota, Where VMshould come up But PVC should not reach bound state + 2. Using WFFC storage class , create PVC it will be in pending state + 3. Create VM - wait for Vm to reach power ON state + 4. Now, For PVC to reach bound state , The quota is not sufficient - Verify the behaviour + */ + ginkgo.It("reuse-pvc-attched-to-vm-and-create-pod", ginkgo.Label(p0, block, wcp, vmsvc, vc901), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vmopScheme := runtime.NewScheme() + gomega.Expect(vmopv4.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + vmopC, err := ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, "3Gi") + + cnsOpScheme := runtime.NewScheme() + gomega.Expect(cnsop.AddToScheme(cnsOpScheme)).Should(gomega.Succeed()) + + ginkgo.By("Get WFFC stotage class") + wffcStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, wffsStoragePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Wait for VM images to get listed under namespace and create VM") + // err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + + vols := []vmopv1.VirtualMachineVolume{} + + vols = append(vols, vmopv1.VirtualMachineVolume{ + Name: pvc.Name, + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ClaimName: pvc.Name}, + }, + }) + + ginkgo.By("Create vm service vm") + vm1 := vmopv1.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{Name: "vm1", Namespace: namespace}, + Spec: vmopv1.VirtualMachineSpec{ + PowerState: vmopv1.VirtualMachinePoweredOn, + ImageName: vmi, + ClassName: vmClass, + StorageClass: wffsStoragePolicyName, + Volumes: vols, + VmMetadata: &vmopv1.VirtualMachineMetadata{Transport: cloudInitLabel, SecretName: secretName}, + }, + } + err = vmopC.Create(ctx, &vm1) + gomega.Expect(err).To(gomega.HaveOccurred()) + + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // vm1 := createVmServiceVmWithPvcs( + // ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, wffsStoragePolicyName, secretName) + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + _ = createService4Vm(ctx, vmopC, namespace, vm1.Name) + + }) + + /** + 1. Consider a namespace with limited Quota, Where VMshould come up But PVC should not reach bound state + 2. Using WFFC storage class , create PVC it will be in pending state + 3. Create VM - wait for Vm to reach power ON state + 4. Now, For PVC to reach bound state , The quota is not sufficient - Verify the behaviour + */ + ginkgo.It("Reuse-PVC-attachedToPod-to-create-VMserviceVM", ginkgo.Label(p0, block, wcp, vmsvc, vc901), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vmopScheme := runtime.NewScheme() + gomega.Expect(vmopv4.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + vmopC, err := ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, "3Gi") + + cnsOpScheme := runtime.NewScheme() + gomega.Expect(cnsop.AddToScheme(cnsOpScheme)).Should(gomega.Succeed()) + + ginkgo.By("Get WFFC stotage class") + wffcStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, wffsStoragePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Wait for VM images to get listed under namespace and create VM") + // err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + + vols := []vmopv1.VirtualMachineVolume{} + + vols = append(vols, vmopv1.VirtualMachineVolume{ + Name: pvc.Name, + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ClaimName: pvc.Name}, + }, + }) + + ginkgo.By("Create vm service vm") + vm1 := vmopv1.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{Name: "vm1", Namespace: namespace}, + Spec: vmopv1.VirtualMachineSpec{ + PowerState: vmopv1.VirtualMachinePoweredOn, + ImageName: vmi, + ClassName: vmClass, + StorageClass: wffsStoragePolicyName, + Volumes: vols, + VmMetadata: &vmopv1.VirtualMachineMetadata{Transport: cloudInitLabel, SecretName: secretName}, + }, + } + err = vmopC.Create(ctx, &vm1) + gomega.Expect(err).To(gomega.HaveOccurred()) + + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // vm1 := createVmServiceVmWithPvcs( + // ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, wffsStoragePolicyName, secretName) + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + _ = createService4Vm(ctx, vmopC, namespace, vm1.Name) + + }) + + /** + 1. Consider a namespace with limited Quota, Where VMshould come up But PVC should not reach bound state + 2. Using WFFC storage class , create PVC it will be in pending state + 3. Create VM - wait for Vm to reach power ON state + 4. Now, For PVC to reach bound state , The quota is not sufficient - Verify the behaviour + */ + ginkgo.It("Reuse-PVC-attachedToPod-to-create-VMserviceVM", ginkgo.Label(p0, block, wcp, vmsvc, vc901), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vmopScheme := runtime.NewScheme() + gomega.Expect(vmopv4.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + vmopC, err := ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, "3Gi") + + cnsOpScheme := runtime.NewScheme() + gomega.Expect(cnsop.AddToScheme(cnsOpScheme)).Should(gomega.Succeed()) + + ginkgo.By("Get WFFC stotage class") + wffcStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, wffsStoragePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Wait for VM images to get listed under namespace and create VM") + // err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + + vols := []vmopv1.VirtualMachineVolume{} + + vols = append(vols, vmopv1.VirtualMachineVolume{ + Name: pvc.Name, + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ClaimName: pvc.Name}, + }, + }) + + ginkgo.By("Create vm service vm") + vm1 := vmopv1.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{Name: "vm1", Namespace: namespace}, + Spec: vmopv1.VirtualMachineSpec{ + PowerState: vmopv1.VirtualMachinePoweredOn, + ImageName: vmi, + ClassName: vmClass, + StorageClass: wffsStoragePolicyName, + Volumes: vols, + VmMetadata: &vmopv1.VirtualMachineMetadata{Transport: cloudInitLabel, SecretName: secretName}, + }, + } + err = vmopC.Create(ctx, &vm1) + gomega.Expect(err).To(gomega.HaveOccurred()) + + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // vm1 := createVmServiceVmWithPvcs( + // ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, wffsStoragePolicyName, secretName) + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + _ = createService4Vm(ctx, vmopC, namespace, vm1.Name) }) diff --git a/tests/e2e/vmservice_stretched_svc.go b/tests/e2e/vmservice_stretched_svc.go new file mode 100644 index 0000000000..c62889695f --- /dev/null +++ b/tests/e2e/vmservice_stretched_svc.go @@ -0,0 +1,784 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "os" + "strings" + "time" + + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" + vmopv2 "github.com/vmware-tanzu/vm-operator/api/v1alpha2" + vmopv3 "github.com/vmware-tanzu/vm-operator/api/v1alpha3" + vmopv4 "github.com/vmware-tanzu/vm-operator/api/v1alpha4" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + admissionapi "k8s.io/pod-security-admission/api" + ctlrclient "sigs.k8s.io/controller-runtime/pkg/client" + + cnsop "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator" +) + +var _ bool = ginkgo.Describe("[vmsvc] VM-Service-VM-LateBinding", func() { + f := framework.NewDefaultFramework("vmsvc") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + f.SkipNamespaceCreation = true // tests will create their own namespaces + var ( + client clientset.Interface + namespace string + datastoreURL string + wffsStoragePolicyName string + storagePolicyName string + storageProfileId string + vcRestSessionId string + vmi string + vmClass string + vmopC ctlrclient.Client + defaultDatastore *object.Datastore + statuscode int + vmImageName string + quota map[string]*resource.Quantity + isLateBinding bool + //expectedTotalStorage []string + expected_pvcQuotaInMbStr string + storageclass *storagev1.StorageClass + expected_vmQuotaStr string + snapc *snapclient.Clientset + ) + + ginkgo.BeforeEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client = f.ClientSet + bootstrap() + + var err error + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) + wffsStoragePolicyName = storagePolicyName + "-latebinding" + isLateBinding = true + + //datastoreURL is required to get dsRef ID which is used to get contentLibId + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + dsRef := getDsMoRefFromURL(ctx, datastoreURL) + framework.Logf("dsmoId: %v", dsRef.Value) + + storageProfileId = e2eVSphere.GetSpbmPolicyID(storagePolicyName) + framework.Logf("storageProfileId: %s", storageProfileId) + vcAddress = e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + + vcRestSessionId = createVcSession4RestApis(ctx) + contentLibId, err := createAndOrGetContentlibId4Url(vcRestSessionId, GetAndExpectStringEnvVar(envContentLibraryUrl), + dsRef.Value, &e2eVSphere) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Create a WCP namespace for the test") + vmClass = os.Getenv(envVMClass) + if vmClass == "" { + vmClass = vmClassBestEffortSmall + } + + // Create SVC namespace and assign storage policy and vmContent Library + namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, + []string{storageProfileId}, getSvcId(vcRestSessionId, &e2eVSphere), + nil, vmClass, contentLibId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) + + vmopScheme := runtime.NewScheme() + gomega.Expect(vmopv1.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + gomega.Expect(vmopv2.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + gomega.Expect(vmopv3.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + gomega.Expect(vmopv4.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + vmopC, err = ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + cnsOpScheme := runtime.NewScheme() + gomega.Expect(cnsop.AddToScheme(cnsOpScheme)).Should(gomega.Succeed()) + + vmImageName = GetAndExpectStringEnvVar(envVmsvcVmImageName) + framework.Logf("Waiting for virtual machine image list to be available in namespace '%s' for image '%s'", + namespace, vmImageName) + ginkgo.By("Wait for VM images to get listed under namespace and create VM") + err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + vmi = waitNGetVmiForImageName(ctx, vmopC, vmImageName) + gomega.Expect(vmi).NotTo(gomega.BeEmpty()) + framework.Logf("vm image: %s", vmi) + // vmi = waitNGetVmiForImageName4(ctx, vmopC, vmImageName) + // gomega.Expect(vmi).NotTo(gomega.BeEmpty()) + + var datacenters []string + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + + finder := find.NewFinder(e2eVSphere.Client.Client, false) + cfg, err := getConfig() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + dcList := strings.Split(cfg.Global.Datacenters, ",") + for _, dc := range dcList { + dcName := strings.TrimSpace(dc) + if dcName != "" { + datacenters = append(datacenters, dcName) + } + } + for _, dc := range datacenters { + defaultDatacenter, err := finder.Datacenter(ctx, dc) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + finder.SetDatacenter(defaultDatacenter) + defaultDatastore, err = getDatastoreByURL(ctx, datastoreURL, defaultDatacenter) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("DefaultDatastore: %s", defaultDatastore) + } + + ginkgo.By("Set Storage Quota on namespace") + restConfig = getRestConfigClient() + setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimit) + + ginkgo.By("Get Immediate binding stotage class") + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Read QuotaDetails Before creating workload") + quota = make(map[string]*resource.Quantity) + //PVCQuota Details Before creating workload + quota["totalQuotaUsedBefore"], _, quota["pvc_storagePolicyQuotaBefore"], _, + quota["pvc_storagePolicyUsageBefore"], _ = getStoragePolicyUsedAndReservedQuotaDetails(ctx, restConfig, + storageclass.Name, namespace, pvcUsage, volExtensionName, isLateBinding) + + framework.Logf("quota[totalQuotaUsedBefore] : %s", quota["totalQuotaUsedBefore"]) + + //VMQuota Details Before creating workload + quota["vm_totalQuotaUsedBefore"], _, quota["vm_storagePolicyQuotaBefore"], _, + quota["vm_storagePolicyUsageBefore"], _ = getStoragePolicyUsedAndReservedQuotaDetails(ctx, restConfig, + storageclass.Name, namespace, vmUsage, vmServiceExtensionName, isLateBinding) + framework.Logf("quota[vm_storagePolicyQuotaBefore] : %s", quota["vm_storagePolicyQuotaBefore"]) + + restConfig = getRestConfigClient() + snapc, err = snapclient.NewForConfig(restConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) + + ginkgo.AfterEach(func() { + // ctx, cancel := context.WithCancel(context.Background()) + // defer cancel() + + framework.Logf("expected_pvcQuotaInMbStr: %s", expected_pvcQuotaInMbStr) + framework.Logf("expected_vmQuotaStr: %s", expected_vmQuotaStr) + + //Validate TotalQuota + // _, quotavalidationStatus := validateTotalQuota(ctx, restConfig, storageclass.Name, namespace, + // expectedTotalStorage, quota["totalQuotaUsedBefore"], true) + // gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + + //Validates PVC quota in both StoragePolicyQuota and StoragePolicyUsage CR + // sp_quota_status_pvc, sp_usage_status_pvc := validateQuotaUsageAfterResourceCreation(ctx, restConfig, + // storageclass.Name, namespace, pvcUsage, volExtensionName, []string{expected_pvcQuotaInMbStr}, + // quota["totalQuotaUsedBefore"], quota["pvc_storagePolicyQuotaBefore"], quota["pvc_storagePolicyUsageBefore"], true) + // gomega.Expect(sp_quota_status_pvc && sp_usage_status_pvc).NotTo(gomega.BeFalse()) + + // //Validates VM quota in both StoragePolicyQuota and StoragePolicyUsage CR + // sp_quota_status_vm, sp_usage_status_vm := validateQuotaUsageAfterResourceCreation(ctx, restConfig, + // storageclass.Name, namespace, vmUsage, vmServiceExtensionName, []string{expected_vmQuotaStr}, + // quota["vm_totalQuotaUsedBefore"], quota["vm_storagePolicyQuotaBefore"], + // quota["vm_storagePolicyUsageBefore"], true) + // gomega.Expect(sp_quota_status_vm && sp_usage_status_vm).NotTo(gomega.BeFalse()) + + // delTestWcpNs(vcRestSessionId, namespace) + // gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + + dumpSvcNsEventsOnTestFailure(client, namespace) + }) + + /** + 1. Create a PVC using WFFC / Late binding storage class + 2. PVC's with WFFC will be in pending state + 3. Use the above PVC and create a VmService VM + 4. Once the VM is on Verify that the PVC should go to bound state + 5. TODO : verify PVC with the below annotations cns.vmware.com/selected-node-is-zone is set to true + volume.kubernetes.io/selected-node is set to zone - This should have the zone name where the VM gets provisioned + 6. Verify CNS metadata for PVC + 7. Verify PVC's attached to VM + 8. Validate TotalQuota, StoragePolicyQuota, storageQuotaUsage of VmserviceVm's and PVc's + + */ + ginkgo.It("vmserviceVM-WFFC-stretched-SVC-zonal", ginkgo.Label(p0, block, wcp, vmsvc, vc901), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vmopScheme := runtime.NewScheme() + gomega.Expect(vmopv4.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + vmopC, err := ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + cnsOpScheme := runtime.NewScheme() + gomega.Expect(cnsop.AddToScheme(cnsOpScheme)).Should(gomega.Succeed()) + + ginkgo.By("Get WFFC stotage class") + wffcStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, wffsStoragePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Wait for VM images to get listed under namespace and create VM") + // err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + + ginkgo.By("Create vm service vm") + vm1 := createVmServiceVmV4(ctx, vmopC, CreateVmOptionsV4{ + Namespace: namespace, + VmClass: vmClass, + VMI: vmi, + StorageClassName: wffsStoragePolicyName, + PVCs: []*v1.PersistentVolumeClaim{pvc}, + SecretName: secretName, + WaitForReadyStatus: true, + }) + + ginkgo.By("Wait for VMs to come up and get an IP") + time.Sleep(200) + vmIp1, err := waitNgetVmsvcVmIpV4(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("vmIp1 : %s", vmIp1) + + ginkgo.By("Waiting PVC to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + ginkgo.By("get VM storage") + vmQuotaUsed := getVMStorageData(ctx, vmopC, namespace, vm1.Name) + framework.Logf("vmQuotaUsed : %s", vmQuotaUsed) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + isPrivateNetwork := GetBoolEnvVarOrDefault("IS_PRIVATE_NETWORK", false) + if !isPrivateNetwork { + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + //vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + vm1, err = getVmsvcVmV4(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm1.Status.Volumes { + //volFolder := formatNVerifyPvcIsAccessible(vol.DiskUUID, i+1, vmIp1) + volFolder := formatNVerifyPvcIsAccessibleV4(vol.DiskUUID, i+1, vmIp1) + framework.Logf("volFolder: %s", volFolder) + //verifyDataIntegrityOnVmDisk(vmIp1, volFolder) + } + } + + //Get zone details of VM + zone, err := getVMzoneV4(ctx, vm1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + //Verify Annotations and Zonal details on PVC + pvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) + err = validateAnnotationOnPVC(pvc, selectedNodeIsZone, "true") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = validateAnnotationOnPVC(pvc, selectedNodeAnnotationOnPVC, zone) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Validate StorageQuotaDetails after creating the workloads") + //expected_pvcQuotaInMbStr = convertInt64ToStrMbFormat(diskSizeInMb) + // expected_vmQuotaStr = vmQuotaUsed + // expectedTotalStorage = append(expectedTotalStorage, expected_pvcQuotaInMbStr, expected_vmQuotaStr) + + }) + + /** + 1. Create multiple PVC's . Consider 2 PVC's with WFFC and 2 PVC's with immediate + 2. Use the above PVC's and create a VmService VM's + 3. Wait for VM get powered ON + 4. Once the VM is on Verify that the PVC's with WFFC should go to bound state + Describe PVC and verify the below annotations + cns.vmware.com/selected-node-is-zone is set to true + volume.kubernetes.io/selected-node is set to Zone - This should have the zone name where + the VM gets provisioned + 5. PVC's with Immediate binding will not have any annotation + 6. Verify CNS metadata for PVC + 7. Verify the storagePolicyQuotaUsage and VMstorageQuotaUsage CR's on the late binding storage class + Quota should have the appropriate quota consumption details + Clean up all the above data + */ + ginkgo.It("vmservcice-crosszonal-stretched-SVC", ginkgo.Label(p0, block, wcp, vmsvc, vc901), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Get WFFC stotage class") + wffcStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, wffsStoragePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + immediateBindingStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + ginkgo.By("Create a PVC") + pvc1, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc2, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc3, err := createPVC(ctx, client, namespace, nil, "", immediateBindingStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc4, err := createPVC(ctx, client, namespace, nil, "", immediateBindingStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for VM images to get listed under namespace and create VM") + err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + + ginkgo.By("Create vm service vm") + vm1 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc1, pvc2, pvc3, pvc4}, vmi, wffsStoragePolicyName, secretName) + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + _ = createService4Vm(ctx, vmopC, namespace, vm1.Name) + + ginkgo.By("Wait for VMs to come up and get an IP") + vmIp1, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Waiting PVC to be in bound state") + _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1, pvc2, pvc3, pvc4}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("get VM storage") + vmQuotaUsed := getVMStorageData(ctx, vmopC, namespace, vm1.Name) + framework.Logf("vmQuotaUsed : %s", vmQuotaUsed) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + //vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + instanceKey := ctlrclient.ObjectKey{Name: vm1.Name, Namespace: namespace} + vm1 = &vmopv1.VirtualMachine{} + err = vmopC.Get(ctx, instanceKey, vm1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm1.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp1) + verifyDataIntegrityOnVmDisk(vmIp1, volFolder) + } + + //Get zone details of VM + zone, err := getVMzonev1(ctx, vm1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + //Verify Annotations and Zonal details on PVC + pvc1, err = client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc1.Name, metav1.GetOptions{}) + err = validateAnnotationOnPVC(pvc1, selectedNodeIsZone, "true") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvc2, err = client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc2.Name, metav1.GetOptions{}) + err = validateAnnotationOnPVC(pvc2, selectedNodeIsZone, "true") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = validateAnnotationOnPVC(pvc1, selectedNodeAnnotationOnPVC, zone) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = validateAnnotationOnPVC(pvc2, selectedNodeAnnotationOnPVC, zone) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = validateAnnotationOnPVC(pvc3, selectedNodeAnnotationOnPVC, zone) + gomega.Expect(err).To(gomega.HaveOccurred()) + + // ginkgo.By("Validate StorageQuotaDetails after creating the workloads") + //expected_pvcQuotaInMbStr = convertInt64ToStrMbFormat(diskSizeInMb) + // expected_vmQuotaStr = vmQuotaUsed + // expectedTotalStorage = append(expectedTotalStorage, expected_pvcQuotaInMbStr, expected_vmQuotaStr) + + }) + + /** + 1. Create PVC using WFFC storage class - PVC will be in pending state + 2. Create a VM service VM using above PVC + 3. Wait for VM to power on + 4. Wait for PVC to reach bound state, PVC should have below annotation + cns.vmware.com/selected-node-is-zone is set to true + volume.kubernetes.io/selected-node is set to Zone - This should have the zone + name where the VM gets provisioned + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Create a volume snapshot for the PVC created in step 1 + 7. Wait for snapshots ready state reach to true + 8. Verify CNS metadata from Volume and snapshot + 9. Create a new PVC2 from the snapshot created in step 7 use WFFC policy on same datastore, + PVC should not reach bound state untill the VM is created] + 10. attach the above created pvc2 to VM1. + 11. Verify CNS metadata for a PVC2 + 12. Verify PVC2 should also has the annotations mentioned in step 4 + 13. Once the VM is up, verify that the volume is accessible inside the VM + 14. Verify reading/writing data in the volume. + 15. Clean up all the above data + */ + + ginkgo.It("PVC-Policy-VmPolicy-are-notsame", ginkgo.Label(p0, block, wcp, vmsvc, vc901), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Get WFFC stotage class") + wffcStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, wffsStoragePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + ginkgo.By("Create a PVC") + pvc1, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for VM images to get listed under namespace and create VM") + err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + + ginkgo.By("Create vm service vm") + vm1 := createVmServiceVmV4(ctx, vmopC, CreateVmOptionsV4{ + Namespace: namespace, + VmClass: vmClass, + VMI: vmi, + StorageClassName: wffsStoragePolicyName, + PVCs: []*v1.PersistentVolumeClaim{pvc1}, + SecretName: secretName, + WaitForReadyStatus: true, + }) + + // ginkgo.By("Creating loadbalancing service for ssh with the VM") + // _ = createService4Vm(ctx, vmopC, namespace, vm1.Name) + + ginkgo.By("Wait for VMs to come up and get an IP") + vmIp1, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("vmIp1 : %s", vmIp1) + + ginkgo.By("Waiting PVC to be in bound state") + pv, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pv[0].Spec.CSI.VolumeHandle + + ginkgo.By("get VM storage") + vmQuotaUsed := getVMStorageData(ctx, vmopC, namespace, vm1.Name) + framework.Logf("vmQuotaUsed : %s", vmQuotaUsed) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm1, err = getVmsvcVM4(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, _, _, + _, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc1, volHandle, diskSize, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + //snapshotSize := getAggregatedSnapshotCapacityInMb(e2eVSphere, volHandle) + //snapshotSizeStr := convertInt64ToStrMbFormat(snapshotSize) + + ginkgo.By("Restore a pvc using a dynamic volume snapshot created above using WFFC storageclass") + ginkgo.By("Create PVC from snapshot") + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, wffcStorageclass, nil, + v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) + + pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Try to attach pvc2 to vm1") + vm1, err = getVmsvcVM4(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vm1.Spec.Volumes = append(vm1.Spec.Volumes, + vmopv4.VirtualMachineVolume{ + VirtualMachineVolumeSource: vmopv4.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv4.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim2.Name}, + }, + }, + }, + ) + + // vols = append(vols, vmopv4.VirtualMachineVolume{ + // Name: pvc.Name, + // VirtualMachineVolumeSource: vmopv4.VirtualMachineVolumeSource{ + // PersistentVolumeClaim: &vmopv4.PersistentVolumeClaimVolumeSource{ + // PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ClaimName: pvc.Name}, + // }, + // }, + // }) + + err = vmopC.Update(ctx, vm1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Waiting PVC to be in bound state") + pv, err = fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + //Get zone details of VM + zone, err := getVMzone(ctx, vm1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + //Verify Annotations and Zonal details on PVC + pvclaim2, err = client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvclaim2.Name, metav1.GetOptions{}) + err = validateAnnotationOnPVC(pvclaim2, selectedNodeIsZone, "true") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = validateAnnotationOnPVC(pvclaim2, selectedNodeAnnotationOnPVC, zone) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Validate StorageQuotaDetails after creating the workloads") + //expected_pvcQuotaInMbStr = convertInt64ToStrMbFormat(diskSizeInMb) + // expected_vmQuotaStr = vmQuotaUsed + // expectedTotalStorage = append(expectedTotalStorage, expected_pvcQuotaInMbStr, expected_vmQuotaStr) + + }) + + /** + 1. Create PVC using WFFC storage class - PVC will be in pending state + 2. Create a VM service VM using above PVC + 3. Wait for VM to power on + 4. Wait for PVC to reach bound state, PVC should have below annotation + cns.vmware.com/selected-node-is-zone is set to true + volume.kubernetes.io/selected-node is set to Zone - This should have the zone name where the VM gets provisioned + Once the VM is up, verify that the volume is accessible inside the VM + 5. Write some IO to the CSI volumes, read it back from them and verify the data integrity + 6. Create a volume snapshot for the PVC created in step 1 + 7. Wait for snapshots ready state reach to true + 8. Verify CNS metadata from Volume and snapshot + 9. Create a new PVC2 from the snapshot created in step 7 use different policy on the same datastore with immediate binding + 10. Create VM2 from PVC2 created in step10 + 11. Verify CNS metadata for a PVC2 + 12. Verify PVC2 should also has the annotations mentioned in step 4 + 13. Once the VM is up, verify that the volume is accessible inside the VM + 14. Verify reading/writing data in the volume. + 15. Clean up all the above data + */ + ginkgo.It("stop-wcp-duringVMcreation", ginkgo.Label(p0, block, wcp, vmsvc, vc901), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Get WFFC stotage class") + wffcStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, wffsStoragePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + ginkgo.By("Create a PVC") + pvc1, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for VM images to get listed under namespace and create VM") + err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + + ginkgo.By("Create vm service vm") + vm1 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc1}, vmi, wffsStoragePolicyName, secretName) + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + _ = createService4Vm(ctx, vmopC, namespace, vm1.Name) + + ginkgo.By("Wait for VMs to come up and get an IP") + vmIp1, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("vmIp1 : %s", vmIp1) + + ginkgo.By("Waiting PVC to be in bound state") + pv, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc1}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pv[0].Spec.CSI.VolumeHandle + + ginkgo.By("get VM storage") + vmQuotaUsed := getVMStorageData(ctx, vmopC, namespace, vm1.Name) + framework.Logf("vmQuotaUsed : %s", vmQuotaUsed) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, _, _, + _, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc1, volHandle, diskSize, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + //snapshotSize := getAggregatedSnapshotCapacityInMb(e2eVSphere, volHandle) + //snapshotSizeStr := convertInt64ToStrMbFormat(snapshotSize) + + ginkgo.By("Restore a pvc using a dynamic volume snapshot created above using WFFC storageclass") + pvclaim2, _, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, + volumeSnapshot, diskSize, false) + + ginkgo.By("Try to attach pvc2 to vm1") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vm1.Spec.Volumes = append(vm1.Spec.Volumes, vmopv1.VirtualMachineVolume{Name: pvclaim2.Name, + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim2.Name}, + }}) + err = vmopC.Update(ctx, vm1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Wait and verify PVCs are attached to the VM") + // match := verifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, vm1, []*v1.PersistentVolumeClaim{pvc1}) + // gomega.Expect(match).To(gomega.BeTrue()) + // match = verifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, vm1, []*v1.PersistentVolumeClaim{pvclaim2}) + // gomega.Expect(match).To(gomega.BeTrue()) + + //Verify Annotations and Zonal details on PVC + pvclaim2, err = client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvclaim2.Name, metav1.GetOptions{}) + err = validateAnnotationOnPVC(pvclaim2, selectedNodeIsZone, "true") + gomega.Expect(err).To(gomega.HaveOccurred()) + + // ginkgo.By("Validate StorageQuotaDetails after creating the workloads") + //expected_pvcQuotaInMbStr = convertInt64ToStrMbFormat(diskSizeInMb) + // expected_vmQuotaStr = vmQuotaUsed + // expectedTotalStorage = append(expectedTotalStorage, expected_pvcQuotaInMbStr, expected_vmQuotaStr) + + }) + + /** + 1. Create a PVC using WFFC / Late binding storage class + 2. PVC's with WFFC will be in pending state + 3. Use the above PVC and create a VmService VM + 4. Once the VM is on Verify that the PVC should go to bound state + 5. TODO : verify PVC with the below annotations cns.vmware.com/selected-node-is-zone is set to true + volume.kubernetes.io/selected-node is set to zone - This should have the zone name where the VM gets provisioned + 6. Verify CNS metadata for PVC + 7. Verify PVC's attached to VM + 8. Validate TotalQuota, StoragePolicyQuota, storageQuotaUsage of VmserviceVm's and PVc's + + */ + ginkgo.It("Bring-down-CSI-replica-to-0-during-VM-creation", ginkgo.Label(p0, block, wcp, vmsvc, vc901), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vmopScheme := runtime.NewScheme() + gomega.Expect(vmopv4.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + vmopC, err := ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + storagePolicyNameForNFSDatastore := GetAndExpectStringEnvVar(envStoragePolicyNameForNfsDatastores) + storagePolicyNameForSharedVMFSDatastore := GetAndExpectStringEnvVar(envStoragePolicyNameForVmfsDatastores) + + if storagePolicyNameForNFSDatastore == "" || storagePolicyNameForSharedVMFSDatastore == "" { + ginkgo.Skip("Skipping the test because NFS and SHARED_VMFS datastore ") + } else { + cnsOpScheme := runtime.NewScheme() + gomega.Expect(cnsop.AddToScheme(cnsOpScheme)).Should(gomega.Succeed()) + + ginkgo.By("Get WFFC stotage class") + wffcStorageclass, err := client.StorageV1().StorageClasses().Get(ctx, wffsStoragePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", wffsStoragePolicyName, wffcStorageclass) + + ginkgo.By("Get WFFC stotage class of NFS datastore") + nfsPolicyLateBinding := storagePolicyNameForNFSDatastore + "-latebinding" + nfsPolicyLateBindingStorageClass, err := client.StorageV1().StorageClasses().Get(ctx, nfsPolicyLateBinding, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", nfsPolicyLateBinding, nfsPolicyLateBindingStorageClass) + + ginkgo.By("Get WFFC stotage class of sharedVMFS datastore") + sharedVMFSPolicyLateBinding := storagePolicyNameForSharedVMFSDatastore + "-latebinding" + sharedVMFPolicyLateBindingStorageClass, err := client.StorageV1().StorageClasses().Get(ctx, sharedVMFSPolicyLateBinding, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("wffsStoragePolicyName: %s wffcStorageclass: %s", sharedVMFSPolicyLateBinding, sharedVMFPolicyLateBindingStorageClass) + + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", wffcStorageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc_NFS, err := createPVC(ctx, client, namespace, nil, "", nfsPolicyLateBindingStorageClass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc_sharedVMFS, err := createPVC(ctx, client, namespace, nil, "", sharedVMFPolicyLateBindingStorageClass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Wait for VM images to get listed under namespace and create VM") + // err = pollWaitForVMImageToSync(ctx, namespace, vmImageName, poll, supervisorClusterOperationsTimeout) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + + ginkgo.By("Create vm service vm") + vm1 := createVmServiceVmV4(ctx, vmopC, CreateVmOptionsV4{ + Namespace: namespace, + VmClass: vmClass, + VMI: vmi, + StorageClassName: wffsStoragePolicyName, + PVCs: []*v1.PersistentVolumeClaim{pvc, pvc_NFS, pvc_sharedVMFS}, + SecretName: secretName, + WaitForReadyStatus: true, + }) + + ginkgo.By("Wait for VMs to come up and get an IP") + vmIp1, err := waitNgetVmsvcVmIpV4(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("vmIp1 : %s", vmIp1) + + ginkgo.By("Waiting PVC to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, pvc_NFS, pvc_sharedVMFS}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + } + + // ginkgo.By("Validate StorageQuotaDetails after creating the workloads") + //expected_pvcQuotaInMbStr = convertInt64ToStrMbFormat(diskSizeInMb) + // expected_vmQuotaStr = vmQuotaUsed + // expectedTotalStorage = append(expectedTotalStorage, expected_pvcQuotaInMbStr, expected_vmQuotaStr) + + }) + +}) diff --git a/tests/e2e/vmservice_utils.go b/tests/e2e/vmservice_utils.go index 51d25f4ae4..255de68282 100644 --- a/tests/e2e/vmservice_utils.go +++ b/tests/e2e/vmservice_utils.go @@ -40,6 +40,8 @@ import ( vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" vmopv3 "github.com/vmware-tanzu/vm-operator/api/v1alpha3" vmopv3common "github.com/vmware-tanzu/vm-operator/api/v1alpha3/common" + vmopv4 "github.com/vmware-tanzu/vm-operator/api/v1alpha4" + vmopv4common "github.com/vmware-tanzu/vm-operator/api/v1alpha4/common" "golang.org/x/crypto/ssh" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -365,6 +367,7 @@ func waitNGetVmiForImageName(ctx context.Context, c ctlrclient.Client, imageName func(ctx context.Context) (bool, error) { vmImagesList := &vmopv1.VirtualMachineImageList{} err := c.List(ctx, vmImagesList) + defer ginkgo.GinkgoRecover() gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, instance := range vmImagesList.Items { if instance.Status.ImageName == imageName { @@ -415,6 +418,7 @@ func createVmServiceVmV3(ctx context.Context, c ctlrclient.Client, opts CreateVm }, }) } + vm := &vmopv3.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{Name: vmName, Namespace: opts.Namespace}, Spec: vmopv3.VirtualMachineSpec{ @@ -1447,11 +1451,11 @@ func verifyVmServiceVMNodeLocation(vm *vmopv1.VirtualMachine, nodeList *v1.NodeL // getVmsvcVmDetailedOutput gets the detailed status output of the vm func getVmsvcVmDetailedOutput(ctx context.Context, c ctlrclient.Client, namespace string, name string) string { - vm, _ := getVmsvcVM(ctx, c, namespace, name) + vm, _ := getVmsvcVM4(ctx, c, namespace, name) // Command to write data and sync it cmd := []string{"get", "vm", vm.Name, "-o", "yaml"} output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) - framework.Logf("StatusCode of addContentLibToNamespace : %s", output) + framework.Logf("Describe vm : %s", output) return output } @@ -1510,3 +1514,349 @@ func pollWaitForVMImageToSync(ctx context.Context, namespace string, expectedIma return fmt.Errorf("failed to load vm-image timed out after %v", timeout) } + +// get zone name on which vm is scheduled +func getVMzone(ctx context.Context, vm *vmopv4.VirtualMachine) (string, error) { + vmlabel := vm.GetLabels() + val, labelOk := vmlabel[vmZoneLabel] + framework.Logf("val %v, labelOk: %v", val, labelOk) + if !labelOk { + fmt.Errorf("zone is not present on vm: %s", vm.Name) + return val, fmt.Errorf("zone is not present on vm: %s", vm.Name) + } + // Get labels and print them + framework.Logf("vm Labels") + vmlabel = vm.GetLabels() + for k, v := range vmlabel { + fmt.Printf("%s = %s\n", k, v) + } + + return val, nil +} + +type CreateVmOptionsV4 struct { + Namespace string + VmClass string + VMI string + StorageClassName string + PVCs []*v1.PersistentVolumeClaim + SecretName string + WaitForReadyStatus bool +} + +// func createVmServiceVmV4(ctx context.Context, c ctlrclient.Client, opts CreateVmOptionsV4, namespace string, +// vmClass string, pvcs []*v1.PersistentVolumeClaim, vmi string, storagepolicyName string, +// secretName string, waitForReadyStatus bool) *vmopv4.VirtualMachine { + +// createVmServiceVmV3 creates VM v3 via VM service with given options +func createVmServiceVmV4(ctx context.Context, c ctlrclient.Client, opts CreateVmOptionsV4) *vmopv4.VirtualMachine { + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + vols := []vmopv4.VirtualMachineVolume{} + vmName := fmt.Sprintf("csi-test-vm-%d", r.Intn(10000)) + + if opts.VmClass == "" { + opts.VmClass = vmClassBestEffortSmall + } + + for _, pvc := range opts.PVCs { + vols = append(vols, vmopv4.VirtualMachineVolume{ + Name: pvc.Name, + VirtualMachineVolumeSource: vmopv4.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv4.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ClaimName: pvc.Name}, + }, + }, + }) + } + + vm := &vmopv4.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{Name: vmName, Namespace: opts.Namespace}, + Spec: vmopv4.VirtualMachineSpec{ + PowerState: vmopv4.VirtualMachinePowerStateOn, + ImageName: opts.VMI, + ClassName: opts.VmClass, + StorageClass: opts.StorageClassName, + Volumes: vols, + }, + } + + if opts.SecretName != "" { + vm.Spec.Bootstrap = &vmopv4.VirtualMachineBootstrapSpec{ + CloudInit: &vmopv4.VirtualMachineBootstrapCloudInitSpec{ + RawCloudConfig: &vmopv4common.SecretKeySelector{ + Name: opts.SecretName, + Key: opts.SecretName, + }, + }, + } + } + + err := c.Create(ctx, vm) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + vmKey := ctlrclient.ObjectKey{Name: vmName, Namespace: opts.Namespace} + + err = wait.PollUntilContextTimeout(ctx, poll*5, pollTimeout, true, + func(ctx context.Context) (bool, error) { + err := c.Get(ctx, vmKey, vm) + if err != nil { + if !apierrors.IsNotFound(err) { + return false, err + } + return false, nil + } + + if opts.WaitForReadyStatus && + !slices.ContainsFunc(vm.GetConditions(), func(c metav1.Condition) bool { + return c.Type == vmopv4.VirtualMachineReconcileReady && c.Status == metav1.ConditionTrue + }) { + return false, nil + } + + return true, nil + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Found VM %s in namespace %s", vmName, opts.Namespace) + + r = rand.New(rand.NewSource(time.Now().UnixNano())) + svcName := fmt.Sprintf("%s-svc-%d", vmName, r.Intn(10000)) + framework.Logf("Creating loadbalancer VM: %s for vm: %s", svcName, vmName) + vmService := vmopv4.VirtualMachineService{ + ObjectMeta: metav1.ObjectMeta{Name: svcName, Namespace: opts.Namespace}, + Spec: vmopv4.VirtualMachineServiceSpec{ + Ports: []vmopv4.VirtualMachineServicePort{{Name: "ssh", Port: 22, Protocol: "TCP", TargetPort: 22}}, + Type: "LoadBalancer", + Selector: map[string]string{"app": "vmName"}, + }, + } + err = c.Create(ctx, &vmService) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + vm, _ = getVmsvcVmV4(ctx, c, opts.Namespace, vmName) + framework.Logf("Found VM %s in namespace %s", vmName, opts.Namespace) + + return vm +} + +// getVmsvcVM fetches the vm from the specified ns +func getVmsvcVmV4( + ctx context.Context, c ctlrclient.Client, namespace string, vmName string) (*vmopv4.VirtualMachine, error) { + instanceKey := ctlrclient.ObjectKey{Name: vmName, Namespace: namespace} + vm := &vmopv4.VirtualMachine{} + err := c.Get(ctx, instanceKey, vm) + return vm, err +} + +// get zone name on which vm is scheduled +func getVMzoneV4(ctx context.Context, vm *vmopv4.VirtualMachine) (string, error) { + vmlabel := vm.GetLabels() + val, labelOk := vmlabel[vmZoneLabel] + framework.Logf("val %v, labelOk: %v", val, labelOk) + if !labelOk { + fmt.Errorf("zone is not present on vm: %s", vm.Name) + return val, fmt.Errorf("zone is not present on vm: %s", vm.Name) + } + // Get labels and print them + framework.Logf("vm Labels") + vmlabel = vm.GetLabels() + for k, v := range vmlabel { + fmt.Printf("%s = %s\n", k, v) + } + + return val, nil +} + +// waitNgetVmLbSvc wait and fetches the virtualmachineservice(loadbalancer) for given vm in the specified ns +func waitNgetVmLbSvcV4( + ctx context.Context, c ctlrclient.Client, namespace string, name string) *vmopv4.VirtualMachineService { + + vmLbSvc := &vmopv4.VirtualMachineService{} + var err error + + err = wait.PollUntilContextTimeout(ctx, poll*5, pollTimeout, true, + func(ctx context.Context) (bool, error) { + vmLbSvc, err = getVmsvcVmLbSvcV4(ctx, c, namespace, name) + if err != nil { + if !apierrors.IsNotFound(err) { + return false, err + } + return false, nil + } + return true, nil + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return vmLbSvc +} + +// getVmsvcVmLbSvc fetches the virtualmachineservice(loadbalancer) for given vm in the specified ns +func getVmsvcVmLbSvcV4(ctx context.Context, c ctlrclient.Client, namespace string, name string) ( + *vmopv4.VirtualMachineService, error) { + instanceKey := ctlrclient.ObjectKey{Name: name, Namespace: namespace} + svc := &vmopv4.VirtualMachineService{} + err := c.Get(ctx, instanceKey, svc) + return svc, err +} + +// waitNgetVmsvcVmIp wait and fetch the primary IP of the vm in give ns +func waitNgetVmsvcVmIpV4(ctx context.Context, c ctlrclient.Client, namespace string, name string) (string, error) { + ip := "" + err := wait.PollUntilContextTimeout(ctx, poll*10, pollTimeout*4, true, + func(ctx context.Context) (bool, error) { + vm, err := getVmsvcVmV4(ctx, c, namespace, name) + if err != nil { + if !apierrors.IsNotFound(err) { + return false, err + } + return false, nil + } + if vm.Status.Network.PrimaryIP4 == "" { + return false, nil + } + ip = vm.Status.Network.PrimaryIP4 + return true, nil + }) + framework.Logf("Found IP '%s' for VM '%s'", ip, name) + return ip, err +} + +// getVmsvcVM fetches the vm from the specified ns +func getVmsvcVM4( + ctx context.Context, c ctlrclient.Client, namespace string, vmName string) (*vmopv4.VirtualMachine, error) { + instanceKey := ctlrclient.ObjectKey{Name: vmName, Namespace: namespace} + vm := &vmopv4.VirtualMachine{} + err := c.Get(ctx, instanceKey, vm) + return vm, err +} + +// waitNGetVmiForImageName waits and fetches VM image CR for given image name in the specified namespace +func waitNGetVmiForImageName4(ctx context.Context, c ctlrclient.Client, imageName string) string { + vmi := "" + err := wait.PollUntilContextTimeout(ctx, poll*5, pollTimeout, true, + func(ctx context.Context) (bool, error) { + vmImagesList := &vmopv4.VirtualMachineImageList{} + err := c.List(ctx, vmImagesList) + defer ginkgo.GinkgoRecover() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, instance := range vmImagesList.Items { + if instance.Spec.ProviderRef.Name == imageName { + framework.Logf("Found vmi %v for image name %v", instance.Name, imageName) + vmi = instance.Name + return true, nil + } + } + return false, nil + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return vmi +} + +// formatNVerifyPvcIsAccessible format the pvc inside vm and create a file system on it and returns a folder with 777 +// permissions under the mount point +func formatNVerifyPvcIsAccessibleV4(diskUuid string, mountIndex int, vmIp string) string { + // Construct the disk path from the UUID + p := "/dev/disk/by-id/wwn-0x" + strings.ReplaceAll(strings.ToLower(diskUuid), "-", "") + fmt.Println("Checking disk path:", p) + + // List the available disks + results := execSshOnVmThroughGatewayVm(vmIp, []string{ + "ls -l /dev/disk/by-uuid/", + }) + fmt.Println("Disk list results:", results) + + // Check if the desired disk exists + diskCheckResults := execSshOnVmThroughGatewayVm(vmIp, []string{ + "ls -l " + p, + }) + + // If the disk is not found, try rescanning SCSI devices + if strings.Contains(diskCheckResults[0].Stderr, "No such file or directory") { + fmt.Printf("Disk %s not found. Rescanning SCSI devices.\n", p) + rescanResults := execSshOnVmThroughGatewayVm(vmIp, []string{ + "echo '- - -' | sudo tee /sys/class/scsi_host/host*/scan", + "ls -l /dev/disk/by-uuid/", + "ls -l " + p, + }) + fmt.Println("Rescan results:", rescanResults) + + // Check again if the disk is available after rescanning + diskCheckResults = execSshOnVmThroughGatewayVm(vmIp, []string{ + "ls -l " + p, + }) + } + + // If the disk is still not found, fail the test + if strings.Contains(diskCheckResults[0].Stderr, "No such file or directory") { + framework.Failf("Disk %s not found on VM %s after rescanning.", p, vmIp) + } + + // Extract the device name + parts := strings.Split(strings.TrimSpace(diskCheckResults[0].Stdout), "/") + if len(parts) < 7 { + framework.Failf("Unexpected ls output: %s", diskCheckResults[0].Stdout) + } + dev := "/dev/" + parts[6] + fmt.Println("Device:", dev) + + gomega.Expect(dev).ShouldNot(gomega.Equal("/dev/")) + framework.Logf("Found device %s for disk with UUID %s", dev, diskUuid) + + partitionDev := dev + "1" + fmt.Println("Partition Device:", partitionDev) + + // Unmount any existing partitions on the device + unmountCommands := []string{ + fmt.Sprintf("sudo umount %s* || true", dev), + } + res := execSshOnVmThroughGatewayVm(vmIp, unmountCommands) + fmt.Println("Unmount Results:", res) + + // Partition and format the disk + partitionCommands := []string{ + fmt.Sprintf("sudo parted --script %s mklabel gpt", dev), + fmt.Sprintf("sudo parted --script -a optimal %s mkpart primary 0%% 100%%", dev), + "lsblk -l", + fmt.Sprintf("sudo mkfs.ext4 %s", partitionDev), + } + res = execSshOnVmThroughGatewayVm(vmIp, partitionCommands) + fmt.Println("Partitioning Results:", res) + + // Mount the new partition + volMountPath := "/mnt/volume" + strconv.Itoa(mountIndex) + volFolder := volMountPath + "/data" + mountCommands := []string{ + fmt.Sprintf("sudo mkdir -p %s", volMountPath), + fmt.Sprintf("sudo mount %s %s", partitionDev, volMountPath), + fmt.Sprintf("sudo mkdir -p %s", volFolder), + fmt.Sprintf("sudo chmod -R 777 %s", volFolder), + fmt.Sprintf("bash -c 'df -Th %s | tee %s/fstype'", partitionDev, volFolder), + fmt.Sprintf("grep -c ext4 %s/fstype", volFolder), + "sync", + } + results = execSshOnVmThroughGatewayVm(vmIp, mountCommands) + fmt.Println("Mounting Results:", results) + + // Verify the filesystem type + gomega.Expect(strings.TrimSpace(results[5].Stdout)).To(gomega.Equal("1"), "Filesystem type is not ext4") + + return volFolder +} + +// get zone name on which vm is scheduled +func getVMzonev1(ctx context.Context, vm *vmopv1.VirtualMachine) (string, error) { + vmlabel := vm.GetLabels() + val, labelOk := vmlabel[vmZoneLabel] + framework.Logf("val %v, labelOk: %v", val, labelOk) + if !labelOk { + fmt.Errorf("zone is not present on vm: %s", vm.Name) + return val, fmt.Errorf("zone is not present on vm: %s", vm.Name) + } + // Get labels and print them + framework.Logf("vm Labels") + vmlabel = vm.GetLabels() + for k, v := range vmlabel { + fmt.Printf("%s = %s\n", k, v) + } + + return val, nil +}