Skip to content

Commit 97c1408

Browse files
e2e: handle ceph-csi-operator deployment changes
This commits adds e2e/operator.go containing utility methods specific to the operator. Signed-off-by: Praveen M <m.praveen@ibm.com>
1 parent aa90d55 commit 97c1408

File tree

13 files changed

+448
-169
lines changed

13 files changed

+448
-169
lines changed

e2e/cephfs.go

Lines changed: 61 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,19 @@ var (
4646
subvolumegroup = "e2e"
4747
fileSystemName = "myfs"
4848
fileSystemPoolName = "myfs-replicated"
49+
50+
helmCephFSPodsLabel = "ceph-csi-cephfs"
51+
52+
operatorCephFSDeploymentName = "cephfs.csi.ceph.com-ctrlplugin"
53+
operatorCephFSDaemonsetName = "cephfs.csi.ceph.com-nodeplugin"
54+
55+
cephFSDeployment CephFSDeploymentMethod
4956
)
5057

58+
type CephFSDeployment struct {
59+
DriverInfo
60+
}
61+
5162
func deployCephfsPlugin() {
5263
// delete objects deployed by rook
5364

@@ -175,13 +186,35 @@ var _ = Describe(cephfsType, func() {
175186
Skip("Skipping CephFS E2E")
176187
}
177188
c = f.ClientSet
178-
if deployCephFS {
179-
if cephCSINamespace != defaultNs {
180-
err := createNamespace(c, cephCSINamespace)
181-
if err != nil {
182-
framework.Failf("failed to create namespace %s: %v", cephCSINamespace, err)
183-
}
189+
cephFSDeployment = &CephFSDeployment{
190+
DriverInfo: DriverInfo{
191+
clientSet: c,
192+
deploymentName: cephFSDeploymentName,
193+
daemonsetName: cephFSDeamonSetName,
194+
helmPodLabelName: helmCephFSPodsLabel,
195+
driverContainers: []string{cephFSContainerName},
196+
},
197+
}
198+
if operatorDeployment {
199+
cephFSDeployment = &OperatorDeployment{
200+
DriverInfo: DriverInfo{
201+
clientSet: c,
202+
deploymentName: operatorCephFSDeploymentName,
203+
daemonsetName: operatorCephFSDaemonsetName,
204+
driverContainers: []string{cephFSContainerName},
205+
},
206+
}
207+
}
208+
209+
// No need to create the namespace if ceph-csi is deployed via helm or operator.
210+
if cephCSINamespace != defaultNs && !(helmTest || operatorDeployment) {
211+
err := createNamespace(c, cephCSINamespace)
212+
if err != nil {
213+
framework.Failf("failed to create namespace %s: %v", cephCSINamespace, err)
184214
}
215+
}
216+
217+
if deployCephFS {
185218
deployCephfsPlugin()
186219
}
187220
err := createConfigMap(cephFSDirPath, f.ClientSet, f)
@@ -208,12 +241,9 @@ var _ = Describe(cephfsType, func() {
208241
}
209242
deployVault(f.ClientSet, deployTimeout)
210243

211-
// wait for cluster name update in deployment
212-
containers := []string{cephFSContainerName}
213-
err = waitForContainersArgsUpdate(c, cephCSINamespace, cephFSDeploymentName,
214-
"clustername", defaultClusterName, containers, deployTimeout)
244+
err = cephFSDeployment.setClusterName(defaultClusterName)
215245
if err != nil {
216-
framework.Failf("timeout waiting for deployment update %s/%s: %v", cephCSINamespace, cephFSDeploymentName, err)
246+
framework.Failf("failed to set cluster name: %v", err)
217247
}
218248

219249
err = createSubvolumegroup(f, fileSystemName, subvolumegroup)
@@ -226,13 +256,14 @@ var _ = Describe(cephfsType, func() {
226256
if !testCephFS || upgradeTesting {
227257
Skip("Skipping CephFS E2E")
228258
}
259+
229260
if CurrentSpecReport().Failed() {
230261
// log pods created by helm chart
231-
logsCSIPods("app=ceph-csi-cephfs", c)
262+
logsCSIPods("app="+helmCephFSPodsLabel, c)
232263
// log provisioner
233-
logsCSIPods("app=csi-cephfsplugin-provisioner", c)
264+
logsCSIPods("app="+cephFSDeployment.getDeploymentName(), c)
234265
// log node plugin
235-
logsCSIPods("app=csi-cephfsplugin", c)
266+
logsCSIPods("app="+cephFSDeployment.getDaemonsetName(), c)
236267

237268
// log all details from the namespace where Ceph-CSI is deployed
238269
e2edebug.DumpAllNamespaceInfo(context.TODO(), c, cephCSINamespace)
@@ -266,11 +297,12 @@ var _ = Describe(cephfsType, func() {
266297

267298
if deployCephFS {
268299
deleteCephfsPlugin()
269-
if cephCSINamespace != defaultNs {
270-
err = deleteNamespace(c, cephCSINamespace)
271-
if err != nil {
272-
framework.Failf("failed to delete namespace %s: %v", cephCSINamespace, err)
273-
}
300+
}
301+
// No need to delete the namespace if ceph-csi is deployed via helm or operator.
302+
if cephCSINamespace != defaultNs && !(helmTest || operatorDeployment) {
303+
err = deleteNamespace(c, cephCSINamespace)
304+
if err != nil {
305+
framework.Failf("failed to delete namespace %s: %v", cephCSINamespace, err)
274306
}
275307
}
276308
})
@@ -299,16 +331,16 @@ var _ = Describe(cephfsType, func() {
299331
}
300332

301333
By("checking provisioner deployment is running", func() {
302-
err := waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout)
334+
err := waitForDeploymentComplete(f.ClientSet, cephFSDeployment.getDeploymentName(), cephCSINamespace, deployTimeout)
303335
if err != nil {
304-
framework.Failf("timeout waiting for deployment %s: %v", cephFSDeploymentName, err)
336+
framework.Failf("timeout waiting for deployment %s: %v", cephFSDeployment.getDeploymentName(), err)
305337
}
306338
})
307339

308340
By("checking nodeplugin daemonset pods are running", func() {
309-
err := waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
341+
err := waitForDaemonSets(cephFSDeployment.getDaemonsetName(), cephCSINamespace, f.ClientSet, deployTimeout)
310342
if err != nil {
311-
framework.Failf("timeout waiting for daemonset %s: %v", cephFSDeamonSetName, err)
343+
framework.Failf("timeout waiting for daemonset %s: %v", cephFSDeployment.getDaemonsetName(), err)
312344
}
313345
})
314346

@@ -338,7 +370,7 @@ var _ = Describe(cephfsType, func() {
338370
}
339371

340372
err = verifySeLinuxMountOption(f, pvcPath, appPath,
341-
cephFSDeamonSetName, cephFSContainerName, cephCSINamespace)
373+
cephFSDeployment.getDaemonsetName(), cephFSContainerName, cephCSINamespace)
342374
if err != nil {
343375
framework.Failf("failed to verify mount options: %v", err)
344376
}
@@ -772,7 +804,7 @@ var _ = Describe(cephfsType, func() {
772804
}
773805
}
774806
// Kill ceph-fuse in cephfs-csi node plugin Pods.
775-
nodePluginSelector, err := getDaemonSetLabelSelector(f, cephCSINamespace, cephFSDeamonSetName)
807+
nodePluginSelector, err := getDaemonSetLabelSelector(f, cephCSINamespace, cephFSDeployment.getDaemonsetName())
776808
if err != nil {
777809
framework.Failf("failed to get node plugin DaemonSet label selector: %v", err)
778810
}
@@ -2506,20 +2538,11 @@ var _ = Describe(cephfsType, func() {
25062538
framework.Failf("failed to create configmap: %v", err)
25072539
}
25082540

2509-
// delete csi pods
2510-
err = deletePodWithLabel("app in (ceph-csi-cephfs, csi-cephfsplugin, csi-cephfsplugin-provisioner)",
2511-
cephCSINamespace, false)
2512-
if err != nil {
2513-
framework.Failf("failed to delete pods with labels: %v", err)
2514-
}
2515-
// wait for csi pods to come up
2516-
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
2517-
if err != nil {
2518-
framework.Failf("timeout waiting for daemonset pods: %v", err)
2519-
}
2520-
err = waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout)
2541+
// restart csi pods for the configmap to take effect.
2542+
err = recreateCSIPods(f, cephFSDeployment.getPodSelector(),
2543+
cephFSDeployment.getDaemonsetName(), cephFSDeployment.getDeploymentName())
25212544
if err != nil {
2522-
framework.Failf("timeout waiting for deployment pods: %v", err)
2545+
framework.Failf("failed to recreate cephfs csi pods: %v", err)
25232546
}
25242547
}
25252548

e2e/cephfs_helper.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ func unmountCephFSVolume(f *framework.Framework, appName, pvcName string) error
161161
stdErr, err := execCommandInDaemonsetPod(
162162
f,
163163
cmd,
164-
cephFSDeamonSetName,
164+
cephFSDeployment.getDaemonsetName(),
165165
pod.Spec.NodeName,
166166
cephFSContainerName,
167167
cephCSINamespace)
@@ -396,7 +396,7 @@ func validateEncryptedCephfs(f *framework.Framework, pvName, appName string) err
396396
pod.UID,
397397
pvName)
398398

399-
selector, err := getDaemonSetLabelSelector(f, cephCSINamespace, cephFSDeamonSetName)
399+
selector, err := getDaemonSetLabelSelector(f, cephCSINamespace, cephFSDeployment.getDaemonsetName())
400400
if err != nil {
401401
return fmt.Errorf("failed to get labels: %w", err)
402402
}

e2e/e2e_test.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ func init() {
5252
flag.StringVar(&fileSystemName, "filesystem", "myfs", "CephFS filesystem to use")
5353
flag.StringVar(&clusterID, "clusterid", "", "Ceph cluster ID to use (defaults to `ceph fsid` detection)")
5454
flag.StringVar(&nfsDriverName, "nfs-driver", "nfs.csi.ceph.com", "name of the driver for NFS-volumes")
55+
flag.BoolVar(&operatorDeployment, "operator-deployment", false, "test running on deployment via operator")
5556
setDefaultKubeconfig()
5657

5758
// Register framework flags, then handle flags
@@ -91,4 +92,8 @@ func handleFlags() {
9192
testNFS = testCephFS
9293
deployNFS = deployCephFS
9394
}
95+
96+
if operatorDeployment {
97+
cephCSINamespace = "ceph-csi-operator-system"
98+
}
9499
}

e2e/migration.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,8 @@ func generateClusterIDConfigMapForMigration(f *framework.Framework, c kubernetes
6363
return fmt.Errorf("failed to create configmap: %w", err)
6464
}
6565
// restart csi pods for the configmap to take effect.
66-
err = recreateCSIPods(f, rbdPodLabels, rbdDaemonsetName, rbdDeploymentName)
66+
err = recreateCSIPods(f,
67+
rbdDeployment.getPodSelector(), rbdDeployment.getDaemonsetName(), rbdDeployment.getDeploymentName())
6768
if err != nil {
6869
return fmt.Errorf("failed to recreate rbd csi pods: %w", err)
6970
}

0 commit comments

Comments
 (0)