diff --git a/tests/e2e/constants/kubernetes.go b/tests/e2e/constants/kubernetes.go index 9d78cc8c29..1bb7a8217a 100644 --- a/tests/e2e/constants/kubernetes.go +++ b/tests/e2e/constants/kubernetes.go @@ -138,6 +138,7 @@ const ( StorageQuotaWebhookPrefix = "storage-quota-webhook" DevopsKubeConf = "DEV_OPS_USER_KUBECONFIG" QuotaSupportedVCVersion = "9.0.0" + Latebinding = "-latebinding" ) // For busybox pod image diff --git a/tests/e2e/csisnapshot/util.go b/tests/e2e/csisnapshot/util.go index 7d5ae0343e..e08b61053d 100644 --- a/tests/e2e/csisnapshot/util.go +++ b/tests/e2e/csisnapshot/util.go @@ -27,6 +27,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -389,3 +390,79 @@ func WaitForVolumeSnapshotContentToBeDeleted(client snapclient.Clientset, ctx co }) return waitErr } + +// getPersistentVolumeClaimSpecWithDatasource return the PersistentVolumeClaim +// spec with specified storage class. +func GetPersistentVolumeClaimSpecWithDatasource(namespace string, ds string, storageclass *storagev1.StorageClass, + pvclaimlabels map[string]string, accessMode v1.PersistentVolumeAccessMode, + datasourceName string, snapshotapigroup string) *v1.PersistentVolumeClaim { + disksize := constants.DiskSize + if ds != "" { + disksize = ds + } + if accessMode == "" { + // If accessMode is not specified, set the default accessMode. + accessMode = v1.ReadWriteOnce + } + claim := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "pvc-", + Namespace: namespace, + }, + Spec: v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{ + accessMode, + }, + Resources: v1.VolumeResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse(disksize), + }, + }, + StorageClassName: &(storageclass.Name), + DataSource: &v1.TypedLocalObjectReference{ + APIGroup: &snapshotapigroup, + Kind: "VolumeSnapshot", + Name: datasourceName, + }, + }, + } + + if pvclaimlabels != nil { + claim.Labels = pvclaimlabels + } + + return claim +} + +// DeleteVolumeSnapshotWithPollWait request deletion of Volume Snapshot and waits until it is deleted +func DeleteVolumeSnapshotWithPollWait(ctx context.Context, snapc *snapclient.Clientset, + namespace string, name string) { + + err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, name, metav1.DeleteOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + err = WaitForVolumeSnapshotToBeDeleted(ctx, snapc, namespace, name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} + +// WaitForVolumeSnapshotToBeDeleted wait till the volume snapshot is deleted +func WaitForVolumeSnapshotToBeDeleted(ctx context.Context, client *snapclient.Clientset, + namespace string, name string) error { + + waitErr := wait.PollUntilContextTimeout(ctx, constants.Poll, 2*constants.PollTimeout, true, + func(ctx context.Context) (bool, error) { + _, err := client.SnapshotV1().VolumeSnapshots(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + framework.Logf("VolumeSnapshot: %s is deleted", name) + return true, nil + } else { + return false, fmt.Errorf("error fetching volumesnapshot details : %v", err) + } + } + return false, nil + }) + return waitErr +} diff --git a/tests/e2e/restore_snapshot/basic_test.go b/tests/e2e/restore_snapshot/basic_test.go new file mode 100644 index 0000000000..74ec10e12d --- /dev/null +++ b/tests/e2e/restore_snapshot/basic_test.go @@ -0,0 +1,84 @@ +package restore_snapshot + +import ( + "context" + "fmt" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "k8s.io/kubernetes/test/e2e/framework" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + admissionapi "k8s.io/pod-security-admission/api" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/constants" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/csisnapshot" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/vcutil" +) + +var _ bool = ginkgo.Describe("[restore-snapshot-other-ds] restore Snapshot on different Datastore-Basic", func() { + + f := framework.NewDefaultFramework("restore-snapshot") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + var ( + preSetupData *PreSetupTest + ) + ginkgo.Context("Snapshot restore on diferent datastores", func() { + sharedDatastoreType := "VSAN" + // Generate entries dynamically from the JSON file at test construction time. + var entries []ginkgo.TableEntry + testCases, _ := LoadRestoreMatrix(sharedDatastoreType) + for _, tc := range testCases { + entries = append(entries, ginkgo.Entry(fmt.Sprintf("%s → %v", tc.SourceSC, tc.TargetSCs), tc)) + } + + ginkgo.DescribeTableSubtree("Restore-Snapshot-On-Different-Datastore-Basic-Test", + func(tc RestoreMatrixEntry) { + + ginkgo.BeforeEach(func() { + ginkgo.By("In BeforeEach") + preSetupData = PreSetup(f, tc.SourceSC) + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ginkgo.By("Cleaning up preSetup PVC,Snapshot,SnapshotClass") + if preSetupData != nil && preSetupData.VolumeSnapshot != nil { + csisnapshot.DeleteVolumeSnapshotWithPandoraWait(ctx, preSetupData.SnapC, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } + if preSetupData != nil && preSetupData.PVC != nil { + err := fpv.DeletePersistentVolumeClaim(ctx, client, preSetupData.PVC.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = vcutil.WaitForCNSVolumeToBeDeleted(e2eTestConfig, preSetupData.VolHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + }) + + /* + CreatePvcFromSnapshotOnTargetDs + Steps: + 1. Create a PVC on a SC having a datastore say ds1 in storage compatible list. + 2. Create a snapshot from the volume + 3. Create a PVC from a snapshot by passing a storage class such that it has a common host as of datastore where + the snapshot is created + 4. Verify the PVC gets created in the datastore passed in step#3 + 5. Run Volume usability test (attach - detach - relocate - data integrity) + 6. Run cleanup. + */ + ginkgo.It("[csi-supervisor] Restore snapshot on a different datastore", ginkgo.Label(constants.P0, + constants.VmServiceVm, constants.Block, constants.Wcp, constants.Vc901), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By(fmt.Sprintf("Restoring PVC from snapshot: %s → %s", tc.SourceSC, tc.TargetSCs)) + VerifyVolumeRestoreOperationOnDifferentDatastore(ctx, e2eTestConfig, client, preSetupData.Namespace, + tc.TargetSCs, preSetupData.VolumeSnapshot, constants.DiskSize, true) + + }) + + }, + entries, + ) + }) + +}) diff --git a/tests/e2e/restore_snapshot/pre_setup_test.go b/tests/e2e/restore_snapshot/pre_setup_test.go new file mode 100644 index 0000000000..54e168deca --- /dev/null +++ b/tests/e2e/restore_snapshot/pre_setup_test.go @@ -0,0 +1,137 @@ +package restore_snapshot + +import ( + "context" + "encoding/json" + "os" + "strconv" + "strings" + "time" + + snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/bootstrap" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/config" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/constants" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/csisnapshot" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/k8testutil" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/vcutil" +) + +var ( + ctx context.Context + cancel context.CancelFunc + e2eTestConfig *config.E2eTestConfig + client clientset.Interface + volumeSnapshot *snapV1.VolumeSnapshot + namespace string + snapc *snapclient.Clientset + pandoraSyncWaitTime int +) + +type PreSetupTest struct { + Namespace string + StorageClass *storagev1.StorageClass + PVC *v1.PersistentVolumeClaim + VolumeSnapshot *snapV1.VolumeSnapshot + VolHandle string + SnapC *snapclient.Clientset +} + +func PreSetup(f *framework.Framework, storagePolicyName string) *PreSetupTest { + ctx, cancel = context.WithCancel(context.Background()) + defer cancel() + client = f.ClientSet + e2eTestConfig = bootstrap.Bootstrap() + namespace = vcutil.GetNamespaceToRunTests(f, e2eTestConfig) + + scParameters := make(map[string]string) + storagePolicyName = strings.ToLower(strings.ReplaceAll(storagePolicyName, " ", "-")) + profileID := vcutil.GetSpbmPolicyID(storagePolicyName, e2eTestConfig) + scParameters[constants.ScParamStoragePolicyID] = profileID + + labelsMap := map[string]string{"app": "test"} + + ginkgo.By("Create storage class") + storageclass, err := k8testutil.CreateStorageClass(client, e2eTestConfig, scParameters, nil, "", "", false, storagePolicyName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Delete Storage Class") + err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := k8testutil.CreatePVCAndQueryVolumeInCNS(ctx, client, e2eTestConfig, namespace, labelsMap, "", constants.DiskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if e2eTestConfig.TestInput.ClusterFlavor.GuestCluster { + volHandle = k8testutil.GetVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + ginkgo.DeferCleanup(func() { + cleanupCtx, cleanupCancel := context.WithTimeout(context.Background(), time.Minute) + defer cleanupCancel() + err := fpv.DeletePersistentVolumeClaim(cleanupCtx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = vcutil.WaitForCNSVolumeToBeDeleted(e2eTestConfig, volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + // reading fullsync wait time + if os.Getenv(constants.EnvPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(constants.EnvPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = constants.DefaultPandoraSyncWaitTime + } + var snapc *snapclient.Clientset + restConfig := k8testutil.GetRestConfigClient(e2eTestConfig) + snapc, err = snapclient.NewForConfig(restConfig) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := csisnapshot.CreateVolumeSnapshotClass(ctx, e2eTestConfig, snapc, constants.DeletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, _, _, _, _, _, err = csisnapshot.CreateDynamicVolumeSnapshot( + ctx, e2eTestConfig, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, constants.DiskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + return &PreSetupTest{ + Namespace: namespace, + StorageClass: storageclass, + PVC: pvclaim, + VolumeSnapshot: volumeSnapshot, + VolHandle: volHandle, + SnapC: snapc, + } +} + +type RestoreMatrixEntry struct { + SourceSC string `json:"sourceSC"` + TargetSCs []string `json:"targetSCs"` +} + +func LoadRestoreMatrix(sharedDatastoreType string) ([]RestoreMatrixEntry, error) { + data, err := os.ReadFile("restoreMatrix-OSA.json") + if sharedDatastoreType == "VSAN2" { + data, err = os.ReadFile("restoreMatrix-ESA.json") + if err != nil { + return nil, err + } + } + + var matrix []RestoreMatrixEntry + err = json.Unmarshal(data, &matrix) + return matrix, err +} diff --git a/tests/e2e/restore_snapshot/restoreMatrix-ESA.json b/tests/e2e/restore_snapshot/restoreMatrix-ESA.json new file mode 100644 index 0000000000..b6bba13f62 --- /dev/null +++ b/tests/e2e/restore_snapshot/restoreMatrix-ESA.json @@ -0,0 +1,30 @@ +[ + { + "sourceSC": "vsan2-thin-policy", + "targetSCs": ["vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy", "nfs-policy"] + }, + { + "sourceSC": "vsan2-thick-policy", + "targetSCs": ["vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy", "nfs-policy"] + }, + { + "sourceSC": "vsan2-50-res-policy", + "targetSCs": ["vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy", "nfs-policy"] + }, + { + "sourceSC": "vmfs-thin-policy", + "targetSCs": ["vsan2-thin-policy", "vsan2-thick-policy", "vsan2-50-res-policy", "nfs-policy"] + }, + { + "sourceSC": "vmfs-ezt-policy", + "targetSCs": ["vsan2-thin-policy", "vsan2-thick-policy", "vsan2-50-res-policy", "nfs-policy"] + }, + { + "sourceSC": "vmfs-lzt-policy", + "targetSCs": ["vsan2-thin-policy", "vsan2-thick-policy", "vsan2-50-res-policy", "nfs-policy"] + }, + { + "sourceSC": "nfs-policy", + "targetSCs": ["vsan2-thin-policy", "vsan2-thick-policy", "vsan2-50-res-policy", "vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy"] + } +] diff --git a/tests/e2e/restore_snapshot/restoreMatrix-OSA.json b/tests/e2e/restore_snapshot/restoreMatrix-OSA.json new file mode 100644 index 0000000000..e5f4d601be --- /dev/null +++ b/tests/e2e/restore_snapshot/restoreMatrix-OSA.json @@ -0,0 +1,30 @@ +[ + { + "sourceSC": "vsan-thin-policy", + "targetSCs": ["vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy", "nfs-policy"] + }, + { + "sourceSC": "vsan-thick-policy", + "targetSCs": ["vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy", "nfs-policy"] + }, + { + "sourceSC": "vsan-50-res-policy", + "targetSCs": ["vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy", "nfs-policy"] + }, + { + "sourceSC": "vmfs-thin-policy", + "targetSCs": ["vsan-thin-policy", "vsan-thick-policy", "vsan-50-res-policy", "nfs-policy"] + }, + { + "sourceSC": "vmfs-ezt-policy", + "targetSCs": ["vsan-thin-policy", "vsan-thick-policy", "vsan-50-res-policy", "nfs-policy"] + }, + { + "sourceSC": "vmfs-lzt-policy", + "targetSCs": ["vsan-thin-policy", "vsan-thick-policy", "vsan-50-res-policy", "nfs-policy"] + }, + { + "sourceSC": "nfs-policy", + "targetSCs": ["vsan-thin-policy", "vsan-thick-policy", "vsan-50-res-policy", "vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy"] + } +] diff --git a/tests/e2e/restore_snapshot/restore_snapshot_util.go b/tests/e2e/restore_snapshot/restore_snapshot_util.go new file mode 100644 index 0000000000..cd90525fd6 --- /dev/null +++ b/tests/e2e/restore_snapshot/restore_snapshot_util.go @@ -0,0 +1,186 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package restore_snapshot + +import ( + "context" + "fmt" + "strings" + "time" + + snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/config" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/constants" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/csisnapshot" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/k8testutil" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/vcutil" + + fpod "k8s.io/kubernetes/test/e2e/framework/pod" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" +) + +// verifyVolumeRestoreOperation verifies if volume(PVC) restore from given snapshot +// and creates pod and checks attach volume operation if verifyPodCreation is set to true +func VerifyVolumeRestoreOperationOnDifferentDatastore(ctx context.Context, vs *config.E2eTestConfig, + client clientset.Interface, namespace string, storagePolicyNames []string, + volumeSnapshot *snapV1.VolumeSnapshot, diskSize string, verifyPodCreation bool) { + + for _, storagePolicyName := range storagePolicyNames { + ginkgo.By("Create storage class") + scParameters := make(map[string]string) + storagePolicyName := strings.ToLower(strings.ReplaceAll(storagePolicyName, " ", "-")) + profileID := vcutil.GetSpbmPolicyID(storagePolicyName, vs) + scParameters[constants.ScParamStoragePolicyID] = profileID + storageclass, err := k8testutil.CreateStorageClass(client, vs, scParameters, nil, "", "", false, storagePolicyName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Delete Storage Class") + err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create PVC from snapshot on datastore") + pvcSpec := csisnapshot.GetPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, + v1.ReadWriteOnce, volumeSnapshot.Name, constants.Snapshotapigroup) + + pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle + if vs.TestInput.ClusterFlavor.GuestCluster { + volHandle2 = k8testutil.GetVolumeIDFromSupervisorCluster(volHandle2) + } + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + ginkgo.DeferCleanup(func() { + cleanupCtx, cleanupCancel := context.WithTimeout(context.Background(), time.Minute) + defer cleanupCancel() + err := fpv.DeletePersistentVolumeClaim(cleanupCtx, client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = vcutil.WaitForCNSVolumeToBeDeleted(vs, volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + var pod *v1.Pod + if verifyPodCreation { + // Create a Pod to use this PVC, and verify volume has been attached + ginkgo.By("Creating pod to attach PV to the node") + pod, err = k8testutil.CreatePod(ctx, vs, client, namespace, nil, + []*v1.PersistentVolumeClaim{pvclaim2}, false, constants.ExecRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + var vmUUID string + var exists bool + nodeName := pod.Spec.NodeName + + if vs.TestInput.ClusterFlavor.GuestCluster { + vmUUID, err = vcutil.GetVMUUIDFromNodeName(vs, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if vs.TestInput.ClusterFlavor.SupervisorCluster { + annotations := pod.Annotations + vmUUID, exists = annotations[constants.VmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", constants.VmUUIDLabel)) + _, err := vcutil.GetVMByUUID(ctx, vs, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle2, nodeName)) + isDiskAttached, err := vcutil.IsVolumeAttachedToVM(client, vs, volHandle2, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + /* TODO- test Bug + ginkgo.By("Verify the volume is accessible and Read/write is possible") + var cmd []string + var wrtiecmd []string + + wrtiecmd = []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'Hello message from test into Pod1' >> /mnt/volume1/Pod1.html"} + + cmd = []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "cat ", constants.FilePathPod1} + + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + */ + + } + } +} + +// func testCleanUp(ctx context.Context, vs *config.E2eTestConfig, client clientset.Interface, namespace string) { +// adminClient, c := initializeClusterClientsByUserRoles(c) +// pvNames := sets.NewString() +// pvcPollErr := wait.PollUntilContextTimeout(ctx, constants.PollTimeoutShort, StatefulSetTimeout, true, +// func(ctx context.Context) (bool, error) { +// pvcList, err := c.CoreV1().PersistentVolumeClaims(namespace).List(context.TODO(), +// metav1.ListOptions{LabelSelector: labels.Everything().String()}) +// if err != nil { +// framework.Logf("WARNING: Failed to list pvcs, retrying %v", err) +// return false, nil +// } +// for _, pvc := range pvcList.Items { +// pvNames.Insert(pvc.Spec.VolumeName) +// framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName) +// if err := c.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvc.Name, +// metav1.DeleteOptions{}); err != nil { +// return false, nil +// } +// } +// return true, nil +// }) +// if pvcPollErr != nil { +// errList = append(errList, "Timeout waiting for pvc deletion.") +// } + +// pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true, +// func(ctx context.Context) (bool, error) { +// pvList, err := client.CoreV1().PersistentVolumes().List(context.TODO(), +// metav1.ListOptions{LabelSelector: labels.Everything().String()}) +// if err != nil { +// framework.Logf("WARNING: Failed to list pvs, retrying %v", err) +// return false, nil +// } +// waitingFor := []string{} +// for _, pv := range pvList.Items { +// if pvNames.Has(pv.Name) { +// waitingFor = append(waitingFor, fmt.Sprintf("%v: %+v", pv.Name, pv.Status)) +// } +// } +// if len(waitingFor) == 0 { +// return true, nil +// } +// framework.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n")) +// return false, nil +// }) +// if pollErr != nil { +// errList = append(errList, "Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs.") + +// } +// } diff --git a/tests/e2e/restore_snapshot/suite_test.go b/tests/e2e/restore_snapshot/suite_test.go new file mode 100644 index 0000000000..1f400e67c7 --- /dev/null +++ b/tests/e2e/restore_snapshot/suite_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore_snapshot + +import ( + "flag" + "os" + "strings" + "testing" + + ginkgo "github.com/onsi/ginkgo/v2" + gomega "github.com/onsi/gomega" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/config" + _ "k8s.io/kubernetes/test/e2e/framework/debug/init" + "sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/constants" +) + +func init() { + framework.AfterReadingAllFlags(&framework.TestContext) +} + +func TestE2E(t *testing.T) { + handleFlags() + gomega.RegisterFailHandler(ginkgo.Fail) + _, reporterConfig := ginkgo.GinkgoConfiguration() + reporterConfig.JUnitReport = "junit.xml" + ginkgo.RunSpecs(t, "Restore-Snapshot-On-Different-Datastore-Tests", reporterConfig) +} + +func handleFlags() { + config.CopyFlags(config.Flags, flag.CommandLine) + framework.RegisterCommonFlags(flag.CommandLine) + framework.TestContext.KubeConfig = os.Getenv(constants.KubeconfigEnvVar) + mydir, err := os.Getwd() + framework.ExpectNoError(err) + framework.TestContext.RepoRoot = strings.ReplaceAll(mydir, "/tests/e2e/restore_snapshot", "") + flag.Parse() +}