Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions tests/e2e/bootstrap/bootstrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
cnstypes "github.com/vmware/govmomi/cns/types"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/clients/cns"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/clients/vc"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/config"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/constants"
Expand Down Expand Up @@ -53,6 +54,8 @@ func Bootstrap(optionArgs ...bool) *config.E2eTestConfig {
e2eTestConfig.TestInput.Global.VCenterPort, e2eTestConfig.TestInput.Global.User,
e2eTestConfig.TestInput.Global.Password)

e2eTestConfig.CnsClient, _ = cns.NewCnsClient(ctx, e2eTestConfig.VcClient.Client)

if framework.TestContext.RepoRoot != "" {
testfiles.AddFileSource(testfiles.RootFileSource{Root: framework.TestContext.RepoRoot})
}
Expand Down
107 changes: 54 additions & 53 deletions tests/e2e/config/test_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,59 +106,60 @@ type TestInputData struct {

// This struct has all the testbed related information
type TestBedConfig struct {
EsxIp1 string
EsxIp2 string
EsxIp3 string
EsxIp4 string
EsxIp5 string
EsxIp6 string
EsxIp7 string
EsxIp8 string
EsxIp9 string
EsxIp10 string
VcAddress string
VcAddress2 string
VcAddress3 string
MasterIP1 string
MasterIP2 string
MasterIP3 string
IpPortMap map[string]string
MissingEnvVars []string
DefaultlocalhostIP string //= "127.0.0.1"
VcIp2SshPortNum string
VcIp3SshPortNum string
RwxAccessMode bool
Vcptocsi bool
MultipleSvc bool
WindowsEnv bool
Multivc bool
StretchedSVC bool
IsPrivateNetwork bool
K8sMasterIp1PortNum string
K8sMasterIp2PortNum string
K8sMasterIp3PortNum string
VcIp1SshPortNum string
EsxIp1PortNum string
EsxIp2PortNum string
EsxIp3PortNum string
EsxIp4PortNum string
EsxIp5PortNum string
EsxIp6PortNum string
EsxIp7PortNum string
EsxIp8PortNum string
EsxIp9PortNum string
EsxIp10PortNum string
VcVersion string
DefaultCluster *object.ClusterComputeResource
DefaultDatastore *object.Datastore
Name string `default:"worker"`
User string
Location string
VcIp string
VcVmName string
EsxHosts []map[string]string
Podname string
Datastores []map[string]string
EsxIp1 string
EsxIp2 string
EsxIp3 string
EsxIp4 string
EsxIp5 string
EsxIp6 string
EsxIp7 string
EsxIp8 string
EsxIp9 string
EsxIp10 string
VcAddress string
VcAddress2 string
VcAddress3 string
MasterIP1 string
MasterIP2 string
MasterIP3 string
IpPortMap map[string]string
MissingEnvVars []string
DefaultlocalhostIP string //= "127.0.0.1"
VcIp2SshPortNum string
VcIp3SshPortNum string
RwxAccessMode bool
Vcptocsi bool
MultipleSvc bool
WindowsEnv bool
Multivc bool
StretchedSVC bool
IsPrivateNetwork bool
K8sMasterIp1PortNum string
K8sMasterIp2PortNum string
K8sMasterIp3PortNum string
VcIp1SshPortNum string
EsxIp1PortNum string
EsxIp2PortNum string
EsxIp3PortNum string
EsxIp4PortNum string
EsxIp5PortNum string
EsxIp6PortNum string
EsxIp7PortNum string
EsxIp8PortNum string
EsxIp9PortNum string
EsxIp10PortNum string
VcVersion string
DefaultCluster *object.ClusterComputeResource
DefaultDatastore *object.Datastore
Name string `default:"worker"`
User string
Location string
VcIp string
VcVmName string
EsxHosts []map[string]string
Podname string
Datastores []map[string]string
WcpVsanDirectCluster bool
}

// NetPermissionConfig consists of information used to restrict the
Expand Down
6 changes: 6 additions & 0 deletions tests/e2e/constants/env_constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -225,3 +225,9 @@ const (
EnvIsDevopsUser = "IS_DEVOPS_USER"
EnvDevopsUserName = "DEVOPS_USERNAME"
)

// For creating disk on target datastore
var (
EnvStoragePolicy = "STORAGE_POLICY_NAME"
EnvRestoreStoragePolicy = "RESTORE_STORAGE_POLICY"
)
180 changes: 180 additions & 0 deletions tests/e2e/csisnapshot/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,21 @@ import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
fpv "k8s.io/kubernetes/test/e2e/framework/pv"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/config"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/constants"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/env"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/k8testutil"
"sigs.k8s.io/vsphere-csi-driver/v3/tests/e2e/vcutil"
)

Expand Down Expand Up @@ -389,3 +395,177 @@ func WaitForVolumeSnapshotContentToBeDeleted(client snapclient.Clientset, ctx co
})
return waitErr
}

/*
Create volume snapshot
*/
func CreateVolumeSnapshot(ctx context.Context, e2eTestConfig *config.E2eTestConfig, namespace string, pvclaim *v1.PersistentVolumeClaim, pv []*v1.PersistentVolume, diskSize string) (*snapV1.VolumeSnapshot, *snapV1.VolumeSnapshotContent) {
// Create or get volume snapshot class
ginkgo.By("Get or create volume snapshot class")
snapc := GetSnashotClientSet(e2eTestConfig)
volumeSnapshotClass, err := CreateVolumeSnapshotClass(ctx, e2eTestConfig, snapc, constants.DeletionPolicy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

// Create volume snapshot
ginkgo.By("Create a volume snapshot")
performCnsQueryVolumeSnapshot := false
if e2eTestConfig.TestInput.ClusterFlavor.SupervisorCluster {
performCnsQueryVolumeSnapshot = true
}
volumeSnapshot, snapshotContent, _,
_, _, _, err := CreateDynamicVolumeSnapshot(ctx, e2eTestConfig, namespace, snapc, volumeSnapshotClass,
pvclaim, pv[0].Spec.CSI.VolumeHandle, diskSize, performCnsQueryVolumeSnapshot)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

return volumeSnapshot, snapshotContent
}

/*
Get snashot client set
*/
func GetSnashotClientSet(e2eTestConfig *config.E2eTestConfig) *snapclient.Clientset {
var restConfig *rest.Config
if e2eTestConfig.TestInput.ClusterFlavor.GuestCluster {
restConfig = GetRestConfigClientForGuestCluster(nil)
} else {
restConfig = vcutil.GetRestConfigClient(e2eTestConfig)
}
snapc, err := snapclient.NewForConfig(restConfig)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return snapc
}

func GetRestConfigClientForGuestCluster(guestClusterRestConfig *rest.Config) *rest.Config {
var err error
if guestClusterRestConfig == nil {
if k8senv := env.GetAndExpectStringEnvVar("KUBECONFIG"); k8senv != "" {
guestClusterRestConfig, err = clientcmd.BuildConfigFromFlags("", k8senv)
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())

}
return guestClusterRestConfig
}

// verifyVolumeRestoreOperation verifies if volume(PVC) restore from given snapshot
// and creates pod and checks attach volume operation if verifyPodCreation is set to true
func VerifyVolumeRestoreOperation(ctx context.Context, vs *config.E2eTestConfig, client kubernetes.Interface,
namespace string, storageclass *storagev1.StorageClass,
volumeSnapshot *snapV1.VolumeSnapshot, diskSize string,
verifyPodCreation bool) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume, *v1.Pod) {

ginkgo.By("Create PVC from snapshot")
pvcSpec := GetPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil,
v1.ReadWriteOnce, volumeSnapshot.Name, constants.Snapshotapigroup)

pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client,
[]*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle
if vs.TestInput.ClusterFlavor.GuestCluster {
volHandle2 = k8testutil.GetVolumeIDFromSupervisorCluster(volHandle2)
}
gomega.Expect(volHandle2).NotTo(gomega.BeEmpty())

var pod *v1.Pod
if verifyPodCreation {
// Create a Pod to use this PVC, and verify volume has been attached
ginkgo.By("Creating pod to attach PV to the node")
pod, err = k8testutil.CreatePod(ctx, vs, client, namespace, nil,
[]*v1.PersistentVolumeClaim{pvclaim2}, false, constants.ExecRWXCommandPod1)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

var vmUUID string
var exists bool
nodeName := pod.Spec.NodeName

if vs.TestInput.ClusterFlavor.VanillaCluster {
vmUUID = k8testutil.GetNodeUUID(ctx, client, pod.Spec.NodeName)
} else if vs.TestInput.ClusterFlavor.GuestCluster {
vmUUID, err = vcutil.GetVMUUIDFromNodeName(vs, pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
} else if vs.TestInput.ClusterFlavor.SupervisorCluster {
annotations := pod.Annotations
vmUUID, exists = annotations[constants.VmUUIDLabel]
gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", constants.VmUUIDLabel))
_, err := vcutil.GetVMByUUID(ctx, vs, vmUUID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}

ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle2, nodeName))
isDiskAttached, err := vcutil.IsVolumeAttachedToVM(client, vs, volHandle2, vmUUID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node")

ginkgo.By("Verify the volume is accessible and Read/write is possible")
var cmd []string
if vs.TestInput.TestBedInfo.WindowsEnv {
cmd = []string{"exec", pod.Name, "--namespace=" + namespace, "powershell.exe", "cat ", constants.FilePathPod1}
} else {
cmd = []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c",
"cat ", constants.FilePathPod1}
}
_ = e2ekubectl.RunKubectlOrDie(namespace, cmd...)
//gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse())

var wrtiecmd []string
if vs.TestInput.TestBedInfo.WindowsEnv {
wrtiecmd = []string{"exec", pod.Name, "--namespace=" + namespace, "powershell.exe",
"Add-Content /mnt/volume1/Pod1.html 'Hello message from test into Pod1'"}
} else {
wrtiecmd = []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c",
"echo 'Hello message from test into Pod1' >> /mnt/volume1/Pod1.html"}
}
e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...)
_ = e2ekubectl.RunKubectlOrDie(namespace, cmd...)
//gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse())
return pvclaim2, persistentvolumes2, pod
}
return pvclaim2, persistentvolumes2, pod
}

// getPersistentVolumeClaimSpecWithDatasource return the PersistentVolumeClaim
// spec with specified storage class.
func GetPersistentVolumeClaimSpecWithDatasource(namespace string, ds string, storageclass *storagev1.StorageClass,
pvclaimlabels map[string]string, accessMode v1.PersistentVolumeAccessMode,
datasourceName string, snapshotapigroup string) *v1.PersistentVolumeClaim {
disksize := constants.DiskSize
if ds != "" {
disksize = ds
}
if accessMode == "" {
// If accessMode is not specified, set the default accessMode.
accessMode = v1.ReadWriteOnce
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: namespace,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
accessMode,
},
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(disksize),
},
},
StorageClassName: &(storageclass.Name),
DataSource: &v1.TypedLocalObjectReference{
APIGroup: &snapshotapigroup,
Kind: "VolumeSnapshot",
Name: datasourceName,
},
},
}

if pvclaimlabels != nil {
claim.Labels = pvclaimlabels
}

return claim
}
Loading