Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
617 changes: 312 additions & 305 deletions tests/e2e/csi_snapshot_basic.go

Large diffs are not rendered by default.

108 changes: 63 additions & 45 deletions tests/e2e/csi_static_provisioning_basic.go

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions tests/e2e/e2e_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -517,6 +517,7 @@ var (
audienceForSvcAccountName = "https://kubernetes.default.svc.cluster.local"
envIsDevopsUser = "IS_DEVOPS_USER"
serviceAccountKeyword = "ServiceAccount"
envUserName = "USERNAME"
)

// storage policy usages for storage quota validation
Expand Down
11 changes: 6 additions & 5 deletions tests/e2e/snapshot_stretched_supervisor.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,13 +68,14 @@ var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() {

bootstrap()
client = f.ClientSet
var err error
var nodeList *v1.NodeList
namespace = getNamespaceToRunTests(f)

// parameters set for storage policy
scParameters = make(map[string]string)

// fetching node list and checking node status
nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet)
nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet)
framework.ExpectNoError(err, "Unable to find ready and schedulable Node")
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
Expand Down Expand Up @@ -155,7 +156,7 @@ var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() {
defer cancel()

ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace))
fss.DeleteAllStatefulSets(ctx, client, namespace)
deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace)
ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace))
err := client.CoreV1().Services(namespace).Delete(ctx, servicename, *metav1.NewDeleteOptions(0))
if !apierrors.IsNotFound(err) {
Expand Down Expand Up @@ -488,7 +489,7 @@ var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() {
int32(stsReplicas), true, allowedTopologies, true, true,
zonalPolicy, "", storageclass, zonalPolicy)
defer func() {
fss.DeleteAllStatefulSets(ctx, client, namespace)
deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace)
}()

framework.Logf("Verify PV node affinity and that the PODS are running on appropriate node")
Expand Down Expand Up @@ -544,7 +545,7 @@ var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() {
v1.ReadWriteOnce, volumeSnapshot1.Name, snapshotapigroup)
restoreVol1, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
restorepv1, err := fpv.WaitForPVClaimBoundPhase(ctx, client,
restorepv1, err := WaitForPVClaimBoundPhase(ctx, client,
[]*v1.PersistentVolumeClaim{restoreVol1}, framework.ClaimProvisionTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
restoreVolHandle1 := restorepv1[0].Spec.CSI.VolumeHandle
Expand Down
263 changes: 141 additions & 122 deletions tests/e2e/snapshot_vmservice_vm.go

Large diffs are not rendered by default.

84 changes: 49 additions & 35 deletions tests/e2e/statefulsets.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,18 +82,25 @@ var _ = ginkgo.Describe("statefulset", func() {
stsReplicas int32
allowedTopologies []v1.TopologySelectorLabelRequirement
isQuotaValidationSupported bool
adminClient clientset.Interface
)

ginkgo.BeforeEach(func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
namespace = getNamespaceToRunTests(f)
var err error
client = f.ClientSet
namespace = getNamespaceToRunTests(f)
adminClient, client = initializeClusterClientsByUserRoles(client)

bootstrap()
sc, err := client.StorageV1().StorageClasses().Get(ctx, defaultNginxStorageClassName, metav1.GetOptions{})
if err == nil && sc != nil {
gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, sc.Name,
*metav1.NewDeleteOptions(0))).NotTo(gomega.HaveOccurred())

if vanillaCluster {
sc, err := client.StorageV1().StorageClasses().Get(ctx, defaultNginxStorageClassName, metav1.GetOptions{})
if err == nil && sc != nil {
gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, sc.Name,
*metav1.NewDeleteOptions(0))).NotTo(gomega.HaveOccurred())
}
}

scParameters = make(map[string]string)
Expand Down Expand Up @@ -127,7 +134,7 @@ var _ = ginkgo.Describe("statefulset", func() {
}

if stretchedSVC {
nodeList, err = fnodes.GetReadySchedulableNodes(ctx, client)
nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet)
framework.ExpectNoError(err, "Unable to find ready and schedulable Node")
}

Expand All @@ -142,12 +149,13 @@ var _ = ginkgo.Describe("statefulset", func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace))
fss.DeleteAllStatefulSets(ctx, client, namespace)
deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace)
ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace))
err := client.CoreV1().Services(namespace).Delete(ctx, servicename, *metav1.NewDeleteOptions(0))
if !apierrors.IsNotFound(err) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}

if supervisorCluster {
dumpSvcNsEventsOnTestFailure(client, namespace)
}
Expand Down Expand Up @@ -195,8 +203,6 @@ var _ = ginkgo.Describe("statefulset", func() {
} else {
storageClassName = storagePolicyName
ginkgo.By("Running for WCP setup")
profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName)
scParameters[scParamStoragePolicyID] = profileID
}

restConfig := getRestConfigClient()
Expand All @@ -220,7 +226,7 @@ var _ = ginkgo.Describe("statefulset", func() {

if stretchedSVC {
scParameters[svStorageClassName] = zonalPolicy
storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{})
storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{})
if !apierrors.IsNotFound(err) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
Expand All @@ -232,6 +238,10 @@ var _ = ginkgo.Describe("statefulset", func() {

CreateStatefulSet(namespace, statefulset, client)
replicas := *(statefulset.Spec.Replicas)

defer func() {
deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace)
}()
// Waiting for pods status to be Ready
fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas)
gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred())
Expand Down Expand Up @@ -438,8 +448,6 @@ var _ = ginkgo.Describe("statefulset", func() {
} else {
storageClassName = storagePolicyName
ginkgo.By("Running for WCP setup")
profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName)
scParameters[scParamStoragePolicyID] = profileID
}

ginkgo.By("Creating service")
Expand All @@ -456,6 +464,9 @@ var _ = ginkgo.Describe("statefulset", func() {
ginkgo.By("Creating statefulset")
CreateStatefulSet(namespace, statefulset, client)
replicas := *(statefulset.Spec.Replicas)
defer func() {
deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace)
}()
// Waiting for pods status to be Ready
fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas)
gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred())
Expand Down Expand Up @@ -637,8 +648,6 @@ var _ = ginkgo.Describe("statefulset", func() {
storageClassName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores)
framework.Logf("storageClassName %v", storageClassName)
ginkgo.By("CNS_TEST: Running for WCP setup")
profileID := e2eVSphere.GetSpbmPolicyID(storageClassName)
scParameters[scParamStoragePolicyID] = profileID
}

if !vcptocsi {
Expand Down Expand Up @@ -668,6 +677,10 @@ var _ = ginkgo.Describe("statefulset", func() {
Spec.StorageClassName = &storageClassName
CreateStatefulSet(namespace, statefulset, client)
replicas := *(statefulset.Spec.Replicas)
defer func() {
deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace)
}()

// Waiting for pods status to be Ready
fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas)
gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred())
Expand Down Expand Up @@ -827,7 +840,7 @@ var _ = ginkgo.Describe("statefulset", func() {
12. Inncrease the CSI driver replica to 3

*/
ginkgo.It("[ef-wcp][csi-block-vanilla][csi-supervisor][pq-vanilla-block] ListVolumeResponse "+
ginkgo.It("[ef-wcp][csi-block-vanilla] [csi-supervisor] ListVolumeResponse "+
"Validation", ginkgo.Label(p1, listVolume, block, vanilla, wcp, vc70), func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Expand All @@ -843,21 +856,18 @@ var _ = ginkgo.Describe("statefulset", func() {
storageClassName = "nginx-sc-default"
} else {
ginkgo.By("Running for WCP setup")

profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName)
scParameters[scParamStoragePolicyID] = profileID
// create resource quota
storageClassName = storagePolicyName
}

ginkgo.By("scale down CSI driver POD to 1 , so that it will" +
"be easy to validate all Listvolume response on one driver POD")
collectPodLogs(ctx, client, csiSystemNamespace)
scaledownCSIDriver, err := scaleCSIDriver(ctx, client, namespace, 1)
collectPodLogs(ctx, adminClient, csiSystemNamespace)
scaledownCSIDriver, err := scaleCSIDriver(ctx, adminClient, namespace, 1)
gomega.Expect(scaledownCSIDriver).To(gomega.BeTrue(), "csi driver scaledown is not successful")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
ginkgo.By("Scale up the csi-driver replica to 3")
success, err := scaleCSIDriver(ctx, client, namespace, 3)
success, err := scaleCSIDriver(ctx, adminClient, namespace, 3)
gomega.Expect(success).To(gomega.BeTrue(), "csi driver scale up to 3 replica not successful")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
Expand All @@ -884,6 +894,10 @@ var _ = ginkgo.Describe("statefulset", func() {
Spec.StorageClassName = &storageClassName
CreateStatefulSet(namespace, statefulset, client)
replicas := *(statefulset.Spec.Replicas)

defer func() {
deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace)
}()
// Waiting for pods status to be Ready
fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas)
gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred())
Expand Down Expand Up @@ -937,7 +951,7 @@ var _ = ginkgo.Describe("statefulset", func() {
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
}
_, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig,
_, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, adminClient, sshClientConfig,
containerName, logMessage, volumesBeforeScaleUp)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

Expand All @@ -957,7 +971,7 @@ var _ = ginkgo.Describe("statefulset", func() {

ginkgo.By("Validate pagination")
logMessage = "token for next set: 3"
_, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, nil)
_, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, adminClient, sshClientConfig, containerName, logMessage, nil)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

if vanillaCluster {
Expand All @@ -970,7 +984,7 @@ var _ = ginkgo.Describe("statefulset", func() {
//List volume responses will show up in the interval of every 1 minute.
//To see the error, It is required to wait for 1 min after deleteting few Volumes
time.Sleep(pollTimeoutShort)
_, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, nil)
_, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, adminClient, sshClientConfig, containerName, logMessage, nil)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}

Expand All @@ -996,7 +1010,7 @@ var _ = ginkgo.Describe("statefulset", func() {
ginkgo.By("Validate ListVolume Response when no volumes are present")
logMessage = "ListVolumes served 0 results"

_, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, nil)
_, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, adminClient, sshClientConfig, containerName, logMessage, nil)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})

Expand All @@ -1009,7 +1023,7 @@ var _ = ginkgo.Describe("statefulset", func() {
5. Scale up replica to 5.
6. Exit MM and clean up all pods and PVs.
*/
ginkgo.It("[ef-f-wcp][csi-supervisor] Test MM workflow on statefulset", ginkgo.Label(p1, block, wcp,
ginkgo.It("[ef-wcp][csi-supervisor] Test MM workflow on statefulset", ginkgo.Label(p1, block, wcp,
disruptive, vc70), func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Expand All @@ -1021,7 +1035,7 @@ var _ = ginkgo.Describe("statefulset", func() {
setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimit)

ginkgo.By("Get the storageclass from Supervisor")
sc, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{})
sc, err := adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())

ginkgo.By("Creating service")
Expand Down Expand Up @@ -1117,7 +1131,7 @@ var _ = ginkgo.Describe("statefulset", func() {
7. clean up the data
*/

ginkgo.It("[ef-f-stretched-svc][stretched-svc] Statefulset-parallel-podManagementPolicy-wffc",
ginkgo.It("[ef-stretched-svc][stretched-svc] Statefulset-parallel-podManagementPolicy-wffc",
ginkgo.Label(p0, block, stretchedSvc, vc70), func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Expand All @@ -1130,7 +1144,7 @@ var _ = ginkgo.Describe("statefulset", func() {
parallelStatefulSetCreation := false

scParameters[svStorageClassName] = zonalWffcPolicy
storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{})
storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{})
if !apierrors.IsNotFound(err) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
Expand All @@ -1147,7 +1161,7 @@ var _ = ginkgo.Describe("statefulset", func() {
stsReplicas, nodeAffinityToSet, nil, podAntiAffinityToSet, true,
"", "", storageclass, storageClassName)
defer func() {
fss.DeleteAllStatefulSets(ctx, client, namespace)
deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace)
}()

framework.Logf("Verify PV node affinity and that the PODS are running on appropriate node")
Expand Down Expand Up @@ -1205,7 +1219,7 @@ var _ = ginkgo.Describe("statefulset", func() {
parallelStatefulSetCreation := false

scParameters[svStorageClassName] = zonalPolicy
storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{})
storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{})
if !apierrors.IsNotFound(err) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
Expand All @@ -1222,7 +1236,7 @@ var _ = ginkgo.Describe("statefulset", func() {
stsReplicas, nodeAffinityToSet, allowedTopologies, podAntiAffinityToSet, true,
"", "", storageclass, storageClassName)
defer func() {
fss.DeleteAllStatefulSets(ctx, client, namespace)
deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace)
}()

framework.Logf("Verify PV node affinity and that the PODS are running on appropriate node")
Expand Down Expand Up @@ -1278,7 +1292,7 @@ var _ = ginkgo.Describe("statefulset", func() {
parallelStatefulSetCreation := false

scParameters[svStorageClassName] = zonalPolicy
storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{})
storageclass, err := adminClient.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{})
if !apierrors.IsNotFound(err) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
Expand All @@ -1295,7 +1309,7 @@ var _ = ginkgo.Describe("statefulset", func() {
stsReplicas, nodeAffinityToSet, allowedTopologies, podAntiAffinityToSet, true,
"", "", storageclass, storageClassName)
defer func() {
fss.DeleteAllStatefulSets(ctx, client, namespace)
deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace)
}()

ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up/down")
Expand Down
Loading