Skip to content

Commit 92a416c

Browse files
CSI testing with different personas
1 parent e338da4 commit 92a416c

File tree

69 files changed

+1809
-950
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

69 files changed

+1809
-950
lines changed

tests/e2e/config_change_test.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ var _ bool = ginkgo.Describe("[csi-supervisor] config-change-test", func() {
3535
namespace = getNamespaceToRunTests(f)
3636
ctx, cancel = context.WithCancel(context.Background())
3737
defer cancel()
38+
3839
nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet)
3940
framework.ExpectNoError(err, "Unable to find ready and schedulable Node")
4041
if !(len(nodeList.Items) > 0) {
@@ -81,7 +82,7 @@ var _ bool = ginkgo.Describe("[csi-supervisor] config-change-test", func() {
8182
gomega.Expect(err).NotTo(gomega.HaveOccurred())
8283

8384
ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name))
84-
pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client,
85+
pvs, err := WaitForPVClaimBoundPhase(ctx, client,
8586
[]*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout)
8687
gomega.Expect(err).NotTo(gomega.HaveOccurred())
8788
gomega.Expect(pvs).NotTo(gomega.BeEmpty())

tests/e2e/csi_snapshot_basic.go

Lines changed: 47 additions & 30 deletions
Large diffs are not rendered by default.

tests/e2e/csi_snapshot_negative.go

Lines changed: 41 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -59,13 +59,17 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti
5959
serviceName string
6060
pandoraSyncWaitTime int
6161
storagePolicyName string
62+
adminClient clientset.Interface
6263
)
6364

6465
ginkgo.BeforeEach(func() {
6566
bootstrap()
6667
ctx, cancel := context.WithCancel(context.Background())
6768
defer cancel()
6869
client = f.ClientSet
70+
var err error
71+
72+
adminClient, client = initializeClusterClientsByUserRoles(client)
6973
namespace = getNamespaceToRunTests(f)
7074
scParameters = make(map[string]string)
7175
isServiceStopped = false
@@ -114,12 +118,13 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti
114118
ctx, vSphereCSIControllerPodNamePrefix, metav1.GetOptions{})
115119
gomega.Expect(err).NotTo(gomega.HaveOccurred())
116120
svcCsiReplicas = *csiDeployment.Spec.Replicas
117-
}
121+
} else {
118122

119-
csiDeployment, err := client.AppsV1().Deployments(csiNamespace).Get(
120-
ctx, vSphereCSIControllerPodNamePrefix, metav1.GetOptions{})
121-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
122-
csiReplicas = *csiDeployment.Spec.Replicas
123+
csiDeployment, err := client.AppsV1().Deployments(csiNamespace).Get(
124+
ctx, vSphereCSIControllerPodNamePrefix, metav1.GetOptions{})
125+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
126+
csiReplicas = *csiDeployment.Spec.Replicas
127+
}
123128

124129
if os.Getenv(envPandoraSyncWaitTime) != "" {
125130
pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime))
@@ -143,15 +148,15 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti
143148
if serviceName == "CSI" {
144149
framework.Logf("Starting CSI driver")
145150
ignoreLabels := make(map[string]string)
146-
err := updateDeploymentReplicawithWait(client, csiReplicas, vSphereCSIControllerPodNamePrefix,
151+
err := updateDeploymentReplicawithWait(adminClient, csiReplicas, vSphereCSIControllerPodNamePrefix,
147152
csiSystemNamespace)
148153
gomega.Expect(err).NotTo(gomega.HaveOccurred())
149154

150155
// Wait for the CSI Pods to be up and Running
151-
list_of_pods, err := fpod.GetPodsInNamespace(ctx, client, csiSystemNamespace, ignoreLabels)
156+
list_of_pods, err := fpod.GetPodsInNamespace(ctx, adminClient, csiSystemNamespace, ignoreLabels)
152157
gomega.Expect(err).NotTo(gomega.HaveOccurred())
153158
num_csi_pods := len(list_of_pods)
154-
err = fpod.WaitForPodsRunningReady(ctx, client, csiSystemNamespace, int(num_csi_pods),
159+
err = fpod.WaitForPodsRunningReady(ctx, adminClient, csiSystemNamespace, int(num_csi_pods),
155160
time.Duration(pollTimeout))
156161
gomega.Expect(err).NotTo(gomega.HaveOccurred())
157162
} else if serviceName == hostdServiceName {
@@ -170,7 +175,15 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti
170175
}
171176

172177
ginkgo.By(fmt.Sprintf("Resetting provisioner time interval to %s sec", defaultProvisionerTimeInSec))
173-
updateCSIDeploymentProvisionerTimeout(client, csiSystemNamespace, defaultProvisionerTimeInSec)
178+
updateCSIDeploymentProvisionerTimeout(adminClient, csiSystemNamespace, defaultProvisionerTimeInSec)
179+
180+
if supervisorCluster {
181+
dumpSvcNsEventsOnTestFailure(client, namespace)
182+
}
183+
if guestCluster {
184+
svcClient, svNamespace := getSvcClientAndNamespace()
185+
dumpSvcNsEventsOnTestFailure(svcClient, svNamespace)
186+
}
174187
})
175188

176189
/*
@@ -318,6 +331,13 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string,
318331

319332
storagePolicyName := GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores)
320333

334+
/*var testClient clientset.Interface
335+
if vanillaCluster {
336+
testClient = client
337+
} else {
338+
testClient = adminClient
339+
}*/
340+
adminClient, _ := initializeClusterClientsByUserRoles(client)
321341
if vanillaCluster {
322342
ginkgo.By("Create storage class")
323343
scParameters[scParamDatastoreURL] = datastoreURL
@@ -329,7 +349,7 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string,
329349
}()
330350
} else if supervisorCluster {
331351
ginkgo.By("Get storage class")
332-
storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{})
352+
storageclass, err = adminClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{})
333353
if !apierrors.IsNotFound(err) {
334354
gomega.Expect(err).NotTo(gomega.HaveOccurred())
335355
}
@@ -340,7 +360,7 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string,
340360
storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, "")
341361
gomega.Expect(err).NotTo(gomega.HaveOccurred())
342362
defer func() {
343-
err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0))
363+
err := adminClient.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0))
344364
gomega.Expect(err).NotTo(gomega.HaveOccurred())
345365
}()
346366
}
@@ -404,18 +424,18 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string,
404424

405425
if serviceName == "CSI" {
406426
ginkgo.By("Stopping CSI driver")
407-
isServiceStopped, err = stopCSIPods(ctx, client, csiNamespace)
427+
isServiceStopped, err = stopCSIPods(ctx, adminClient, csiNamespace)
408428
gomega.Expect(err).NotTo(gomega.HaveOccurred())
409429

410430
defer func() {
411431
if isServiceStopped {
412432
framework.Logf("Starting CSI driver")
413-
isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiNamespace)
433+
isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicas, csiNamespace)
414434
gomega.Expect(err).NotTo(gomega.HaveOccurred())
415435
}
416436
}()
417437
framework.Logf("Starting CSI driver")
418-
isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiNamespace)
438+
isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicas, csiNamespace)
419439
gomega.Expect(err).NotTo(gomega.HaveOccurred())
420440

421441
ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime))
@@ -531,17 +551,17 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string,
531551

532552
if serviceName == "CSI" {
533553
ginkgo.By("Stopping CSI driver")
534-
isServiceStopped, err = stopCSIPods(ctx, client, csiNamespace)
554+
isServiceStopped, err = stopCSIPods(ctx, adminClient, csiNamespace)
535555
gomega.Expect(err).NotTo(gomega.HaveOccurred())
536556
defer func() {
537557
if isServiceStopped {
538558
framework.Logf("Starting CSI driver")
539-
isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiNamespace)
559+
isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicas, csiNamespace)
540560
gomega.Expect(err).NotTo(gomega.HaveOccurred())
541561
}
542562
}()
543563
framework.Logf("Starting CSI driver")
544-
isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiNamespace)
564+
isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicas, csiNamespace)
545565
gomega.Expect(err).NotTo(gomega.HaveOccurred())
546566

547567
ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime))
@@ -633,7 +653,7 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string,
633653

634654
//After service restart
635655
bootstrap()
636-
persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client,
656+
persistentvolumes2, err := WaitForPVClaimBoundPhase(ctx, client,
637657
[]*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout)
638658
gomega.Expect(err).NotTo(gomega.HaveOccurred())
639659
volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle
@@ -657,18 +677,18 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string,
657677
if isSnapshotCreated {
658678
if serviceName == "CSI" {
659679
ginkgo.By("Stopping CSI driver")
660-
isServiceStopped, err = stopCSIPods(ctx, client, csiNamespace)
680+
isServiceStopped, err = stopCSIPods(ctx, adminClient, csiNamespace)
661681
gomega.Expect(err).NotTo(gomega.HaveOccurred())
662682

663683
defer func() {
664684
if isServiceStopped {
665685
framework.Logf("Starting CSI driver")
666-
isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiNamespace)
686+
isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicas, csiNamespace)
667687
gomega.Expect(err).NotTo(gomega.HaveOccurred())
668688
}
669689
}()
670690
framework.Logf("Starting CSI driver")
671-
isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiNamespace)
691+
isServiceStopped, err = startCSIPods(ctx, adminClient, csiReplicas, csiNamespace)
672692
gomega.Expect(err).NotTo(gomega.HaveOccurred())
673693

674694
ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime))

tests/e2e/csi_snapshot_utils.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -691,7 +691,7 @@ func verifyVolumeRestoreOperation(ctx context.Context, client clientset.Interfac
691691
pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec)
692692
gomega.Expect(err).NotTo(gomega.HaveOccurred())
693693

694-
persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(ctx, client,
694+
persistentvolumes2, err := WaitForPVClaimBoundPhase(ctx, client,
695695
[]*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout)
696696
gomega.Expect(err).NotTo(gomega.HaveOccurred())
697697
volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle
@@ -759,8 +759,8 @@ func verifyVolumeRestoreOperation(ctx context.Context, client clientset.Interfac
759759

760760
// createPVCAndQueryVolumeInCNS creates PVc with a given storage class on a given namespace
761761
// and verifies cns metadata of that volume if verifyCNSVolume is set to true
762-
func createPVCAndQueryVolumeInCNS(ctx context.Context, client clientset.Interface, namespace string,
763-
pvclaimLabels map[string]string, accessMode v1.PersistentVolumeAccessMode,
762+
func createPVCAndQueryVolumeInCNS(ctx context.Context, client clientset.Interface,
763+
namespace string, pvclaimLabels map[string]string, accessMode v1.PersistentVolumeAccessMode,
764764
ds string, storageclass *storagev1.StorageClass,
765765
verifyCNSVolume bool) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume, error) {
766766

@@ -771,7 +771,7 @@ func createPVCAndQueryVolumeInCNS(ctx context.Context, client clientset.Interfac
771771
}
772772

773773
// Wait for PVC to be bound to a PV
774-
persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client,
774+
persistentvolumes, err := WaitForPVClaimBoundPhase(ctx, client,
775775
[]*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout*2)
776776
if err != nil {
777777
return pvclaim, persistentvolumes, fmt.Errorf("failed to wait for PVC to bind to a PV: %w", err)

0 commit comments

Comments
 (0)