Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions tests/e2e/constants/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@ const (
StorageQuotaWebhookPrefix = "storage-quota-webhook"
DevopsKubeConf = "DEV_OPS_USER_KUBECONFIG"
QuotaSupportedVCVersion = "9.0.0"
Latebinding = "-latebinding"
)

// For busybox pod image
Expand Down
77 changes: 77 additions & 0 deletions tests/e2e/csisnapshot/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -389,3 +390,79 @@ func WaitForVolumeSnapshotContentToBeDeleted(client snapclient.Clientset, ctx co
})
return waitErr
}

// getPersistentVolumeClaimSpecWithDatasource return the PersistentVolumeClaim
// spec with specified storage class.
func GetPersistentVolumeClaimSpecWithDatasource(namespace string, ds string, storageclass *storagev1.StorageClass,
pvclaimlabels map[string]string, accessMode v1.PersistentVolumeAccessMode,
datasourceName string, snapshotapigroup string) *v1.PersistentVolumeClaim {
disksize := constants.DiskSize
if ds != "" {
disksize = ds
}
if accessMode == "" {
// If accessMode is not specified, set the default accessMode.
accessMode = v1.ReadWriteOnce
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: namespace,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
accessMode,
},
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(disksize),
},
},
StorageClassName: &(storageclass.Name),
DataSource: &v1.TypedLocalObjectReference{
APIGroup: &snapshotapigroup,
Kind: "VolumeSnapshot",
Name: datasourceName,
},
},
}

if pvclaimlabels != nil {
claim.Labels = pvclaimlabels
}

return claim
}

// DeleteVolumeSnapshotWithPollWait request deletion of Volume Snapshot and waits until it is deleted
func DeleteVolumeSnapshotWithPollWait(ctx context.Context, snapc *snapclient.Clientset,
namespace string, name string) {

err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, name, metav1.DeleteOptions{})
if !apierrors.IsNotFound(err) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}

err = WaitForVolumeSnapshotToBeDeleted(ctx, snapc, namespace, name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}

// WaitForVolumeSnapshotToBeDeleted wait till the volume snapshot is deleted
func WaitForVolumeSnapshotToBeDeleted(ctx context.Context, client *snapclient.Clientset,
namespace string, name string) error {

waitErr := wait.PollUntilContextTimeout(ctx, constants.Poll, 2*constants.PollTimeout, true,
func(ctx context.Context) (bool, error) {
_, err := client.SnapshotV1().VolumeSnapshots(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
framework.Logf("VolumeSnapshot: %s is deleted", name)
return true, nil
} else {
return false, fmt.Errorf("error fetching volumesnapshot details : %v", err)
}
}
return false, nil
})
return waitErr
}
84 changes: 84 additions & 0 deletions tests/e2e/restore_snapshot/basic_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
package restore_snapshot

import (
"context"
"fmt"

"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"k8s.io/kubernetes/test/e2e/framework"
fpv "k8s.io/kubernetes/test/e2e/framework/pv"
admissionapi "k8s.io/pod-security-admission/api"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/constants"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/csisnapshot"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/vcutil"
)

var _ bool = ginkgo.Describe("[restore-snapshot-other-ds] restore Snapshot on different Datastore-Basic", func() {

f := framework.NewDefaultFramework("restore-snapshot")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
var (
preSetupData *PreSetupTest
)
ginkgo.Context("Snapshot restore on diferent datastores", func() {
sharedDatastoreType := "VSAN"
// Generate entries dynamically from the JSON file at test construction time.
var entries []ginkgo.TableEntry
testCases, _ := LoadRestoreMatrix(sharedDatastoreType)
for _, tc := range testCases {
entries = append(entries, ginkgo.Entry(fmt.Sprintf("%s → %v", tc.SourceSC, tc.TargetSCs), tc))
}

ginkgo.DescribeTableSubtree("Restore-Snapshot-On-Different-Datastore-Basic-Test",
func(tc RestoreMatrixEntry) {

ginkgo.BeforeEach(func() {
ginkgo.By("In BeforeEach")
preSetupData = PreSetup(f, tc.SourceSC)
})

ginkgo.AfterEach(func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ginkgo.By("Cleaning up preSetup PVC,Snapshot,SnapshotClass")
if preSetupData != nil && preSetupData.VolumeSnapshot != nil {
csisnapshot.DeleteVolumeSnapshotWithPandoraWait(ctx, preSetupData.SnapC, namespace, volumeSnapshot.Name, pandoraSyncWaitTime)
}
if preSetupData != nil && preSetupData.PVC != nil {
err := fpv.DeletePersistentVolumeClaim(ctx, client, preSetupData.PVC.Name, namespace)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = vcutil.WaitForCNSVolumeToBeDeleted(e2eTestConfig, preSetupData.VolHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}

})

/*
CreatePvcFromSnapshotOnTargetDs
Steps:
1. Create a PVC on a SC having a datastore say ds1 in storage compatible list.
2. Create a snapshot from the volume
3. Create a PVC from a snapshot by passing a storage class such that it has a common host as of datastore where
the snapshot is created
4. Verify the PVC gets created in the datastore passed in step#3
5. Run Volume usability test (attach - detach - relocate - data integrity)
6. Run cleanup.
*/
ginkgo.It("[csi-supervisor] Restore snapshot on a different datastore", ginkgo.Label(constants.P0,
constants.VmServiceVm, constants.Block, constants.Wcp, constants.Vc901), func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

ginkgo.By(fmt.Sprintf("Restoring PVC from snapshot: %s → %s", tc.SourceSC, tc.TargetSCs))
VerifyVolumeRestoreOperationOnDifferentDatastore(ctx, e2eTestConfig, client, preSetupData.Namespace,
tc.TargetSCs, preSetupData.VolumeSnapshot, constants.DiskSize, true)

})

},
entries,
)
})

})
137 changes: 137 additions & 0 deletions tests/e2e/restore_snapshot/pre_setup_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
package restore_snapshot

import (
"context"
"encoding/json"
"os"
"strconv"
"strings"
"time"

snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
snapclient "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
fpv "k8s.io/kubernetes/test/e2e/framework/pv"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/bootstrap"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/config"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/constants"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/csisnapshot"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/k8testutil"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/vcutil"
)

var (
ctx context.Context
cancel context.CancelFunc
e2eTestConfig *config.E2eTestConfig
client clientset.Interface
volumeSnapshot *snapV1.VolumeSnapshot
namespace string
snapc *snapclient.Clientset
pandoraSyncWaitTime int
)

type PreSetupTest struct {
Namespace string
StorageClass *storagev1.StorageClass
PVC *v1.PersistentVolumeClaim
VolumeSnapshot *snapV1.VolumeSnapshot
VolHandle string
SnapC *snapclient.Clientset
}

func PreSetup(f *framework.Framework, storagePolicyName string) *PreSetupTest {
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
client = f.ClientSet
e2eTestConfig = bootstrap.Bootstrap()
namespace = vcutil.GetNamespaceToRunTests(f, e2eTestConfig)

scParameters := make(map[string]string)
storagePolicyName = strings.ToLower(strings.ReplaceAll(storagePolicyName, " ", "-"))
profileID := vcutil.GetSpbmPolicyID(storagePolicyName, e2eTestConfig)
scParameters[constants.ScParamStoragePolicyID] = profileID

labelsMap := map[string]string{"app": "test"}

ginkgo.By("Create storage class")
storageclass, err := k8testutil.CreateStorageClass(client, e2eTestConfig, scParameters, nil, "", "", false, storagePolicyName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
ginkgo.By("Delete Storage Class")
err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()

ginkgo.By("Create PVC")
pvclaim, persistentVolumes, err := k8testutil.CreatePVCAndQueryVolumeInCNS(ctx, client, e2eTestConfig, namespace, labelsMap, "", constants.DiskSize, storageclass, true)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle
if e2eTestConfig.TestInput.ClusterFlavor.GuestCluster {
volHandle = k8testutil.GetVolumeIDFromSupervisorCluster(volHandle)
}
gomega.Expect(volHandle).NotTo(gomega.BeEmpty())
ginkgo.DeferCleanup(func() {
cleanupCtx, cleanupCancel := context.WithTimeout(context.Background(), time.Minute)
defer cleanupCancel()
err := fpv.DeletePersistentVolumeClaim(cleanupCtx, client, pvclaim.Name, namespace)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = vcutil.WaitForCNSVolumeToBeDeleted(e2eTestConfig, volHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})
// reading fullsync wait time
if os.Getenv(constants.EnvPandoraSyncWaitTime) != "" {
pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(constants.EnvPandoraSyncWaitTime))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
} else {
pandoraSyncWaitTime = constants.DefaultPandoraSyncWaitTime
}
var snapc *snapclient.Clientset
restConfig := k8testutil.GetRestConfigClient(e2eTestConfig)
snapc, err = snapclient.NewForConfig(restConfig)

ginkgo.By("Create volume snapshot class")
volumeSnapshotClass, err := csisnapshot.CreateVolumeSnapshotClass(ctx, e2eTestConfig, snapc, constants.DeletionPolicy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Create a dynamic volume snapshot")
volumeSnapshot, _, _, _, _, _, err = csisnapshot.CreateDynamicVolumeSnapshot(
ctx, e2eTestConfig, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, constants.DiskSize, true)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

return &PreSetupTest{
Namespace: namespace,
StorageClass: storageclass,
PVC: pvclaim,
VolumeSnapshot: volumeSnapshot,
VolHandle: volHandle,
SnapC: snapc,
}
}

type RestoreMatrixEntry struct {
SourceSC string `json:"sourceSC"`
TargetSCs []string `json:"targetSCs"`
}

func LoadRestoreMatrix(sharedDatastoreType string) ([]RestoreMatrixEntry, error) {
data, err := os.ReadFile("restoreMatrix-OSA.json")
if sharedDatastoreType == "VSAN2" {
data, err = os.ReadFile("restoreMatrix-ESA.json")
if err != nil {
return nil, err
}
}

var matrix []RestoreMatrixEntry
err = json.Unmarshal(data, &matrix)
return matrix, err
}
30 changes: 30 additions & 0 deletions tests/e2e/restore_snapshot/restoreMatrix-ESA.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
[
{
"sourceSC": "vsan2-thin-policy",
"targetSCs": ["vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy", "nfs-policy"]
},
{
"sourceSC": "vsan2-thick-policy",
"targetSCs": ["vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy", "nfs-policy"]
},
{
"sourceSC": "vsan2-50-res-policy",
"targetSCs": ["vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy", "nfs-policy"]
},
{
"sourceSC": "vmfs-thin-policy",
"targetSCs": ["vsan2-thin-policy", "vsan2-thick-policy", "vsan2-50-res-policy", "nfs-policy"]
},
{
"sourceSC": "vmfs-ezt-policy",
"targetSCs": ["vsan2-thin-policy", "vsan2-thick-policy", "vsan2-50-res-policy", "nfs-policy"]
},
{
"sourceSC": "vmfs-lzt-policy",
"targetSCs": ["vsan2-thin-policy", "vsan2-thick-policy", "vsan2-50-res-policy", "nfs-policy"]
},
{
"sourceSC": "nfs-policy",
"targetSCs": ["vsan2-thin-policy", "vsan2-thick-policy", "vsan2-50-res-policy", "vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy"]
}
]
30 changes: 30 additions & 0 deletions tests/e2e/restore_snapshot/restoreMatrix-OSA.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
[
{
"sourceSC": "vsan-thin-policy",
"targetSCs": ["vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy", "nfs-policy"]
},
{
"sourceSC": "vsan-thick-policy",
"targetSCs": ["vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy", "nfs-policy"]
},
{
"sourceSC": "vsan-50-res-policy",
"targetSCs": ["vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy", "nfs-policy"]
},
{
"sourceSC": "vmfs-thin-policy",
"targetSCs": ["vsan-thin-policy", "vsan-thick-policy", "vsan-50-res-policy", "nfs-policy"]
},
{
"sourceSC": "vmfs-ezt-policy",
"targetSCs": ["vsan-thin-policy", "vsan-thick-policy", "vsan-50-res-policy", "nfs-policy"]
},
{
"sourceSC": "vmfs-lzt-policy",
"targetSCs": ["vsan-thin-policy", "vsan-thick-policy", "vsan-50-res-policy", "nfs-policy"]
},
{
"sourceSC": "nfs-policy",
"targetSCs": ["vsan-thin-policy", "vsan-thick-policy", "vsan-50-res-policy", "vmfs-thin-policy", "vmfs-ezt-policy", "vmfs-lzt-policy"]
}
]
Loading