Skip to content

Commit 4841e5b

Browse files
committed
use framework.ExpectNoError() for daemon_set.go and deployment.go in e2e/apps
1 parent d881c0d commit 4841e5b

File tree

2 files changed

+157
-141
lines changed

2 files changed

+157
-141
lines changed

test/e2e/apps/daemon_set.go

Lines changed: 36 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
8989
e2elog.Logf("unable to dump pods: %v", err)
9090
}
9191
err = clearDaemonSetNodeLabels(f.ClientSet)
92-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
92+
framework.ExpectNoError(err)
9393
})
9494

9595
f = framework.NewDefaultFramework("daemonsets")
@@ -106,12 +106,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
106106
c = f.ClientSet
107107

108108
updatedNS, err := updateNamespaceAnnotations(c, ns)
109-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
109+
framework.ExpectNoError(err)
110110

111111
ns = updatedNS.Name
112112

113113
err = clearDaemonSetNodeLabels(c)
114-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
114+
framework.ExpectNoError(err)
115115
})
116116

117117
/*
@@ -124,19 +124,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
124124

125125
ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
126126
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
127-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
127+
framework.ExpectNoError(err)
128128

129129
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
130130
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
131131
framework.ExpectNoError(err, "error waiting for daemon pod to start")
132132
err = checkDaemonStatus(f, dsName)
133-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
133+
framework.ExpectNoError(err)
134134

135135
ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.")
136136
podList := listDaemonPods(c, ns, label)
137137
pod := podList.Items[0]
138138
err = c.CoreV1().Pods(ns).Delete(pod.Name, nil)
139-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
139+
framework.ExpectNoError(err)
140140
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
141141
framework.ExpectNoError(err, "error waiting for daemon pod to revive")
142142
})
@@ -153,7 +153,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
153153
ds := newDaemonSet(dsName, image, complexLabel)
154154
ds.Spec.Template.Spec.NodeSelector = nodeSelector
155155
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
156-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
156+
framework.ExpectNoError(err)
157157

158158
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
159159
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
@@ -169,14 +169,14 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
169169
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
170170
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
171171
err = checkDaemonStatus(f, dsName)
172-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
172+
framework.ExpectNoError(err)
173173

174174
ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled")
175175
nodeSelector[daemonsetColorLabel] = "green"
176176
greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
177177
framework.ExpectNoError(err, "error removing labels on node")
178-
gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
179-
NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
178+
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
179+
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
180180

181181
ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
182182
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
@@ -188,7 +188,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
188188
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
189189
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
190190
err = checkDaemonStatus(f, dsName)
191-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
191+
framework.ExpectNoError(err)
192192
})
193193

194194
// We defer adding this test to conformance pending the disposition of moving DaemonSet scheduling logic to the
@@ -216,7 +216,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
216216
},
217217
}
218218
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
219-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
219+
framework.ExpectNoError(err)
220220

221221
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
222222
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
@@ -232,13 +232,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
232232
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
233233
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
234234
err = checkDaemonStatus(f, dsName)
235-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
235+
framework.ExpectNoError(err)
236236

237237
ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
238238
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
239239
framework.ExpectNoError(err, "error removing labels on node")
240-
gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
241-
NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
240+
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
241+
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
242242
})
243243

244244
/*
@@ -250,13 +250,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
250250

251251
ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
252252
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
253-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
253+
framework.ExpectNoError(err)
254254

255255
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
256256
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
257257
framework.ExpectNoError(err, "error waiting for daemon pod to start")
258258
err = checkDaemonStatus(f, dsName)
259-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
259+
framework.ExpectNoError(err)
260260

261261
ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.")
262262
podList := listDaemonPods(c, ns, label)
@@ -282,15 +282,15 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
282282
ds := newDaemonSet(dsName, image, label)
283283
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.OnDeleteDaemonSetStrategyType}
284284
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
285-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
285+
framework.ExpectNoError(err)
286286

287287
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
288288
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
289289
framework.ExpectNoError(err, "error waiting for daemon pod to start")
290290

291291
// Check history and labels
292292
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
293-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
293+
framework.ExpectNoError(err)
294294
waitForHistoryCreated(c, ns, label, 1)
295295
first := curHistory(listDaemonHistories(c, ns, label), ds)
296296
firstHash := first.Labels[apps.DefaultDaemonSetUniqueLabelKey]
@@ -300,19 +300,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
300300
ginkgo.By("Update daemon pods image.")
301301
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
302302
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
303-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
303+
framework.ExpectNoError(err)
304304

305305
ginkgo.By("Check that daemon pods images aren't updated.")
306306
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
307-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
307+
framework.ExpectNoError(err)
308308

309309
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
310310
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
311311
framework.ExpectNoError(err, "error waiting for daemon pod to start")
312312

313313
// Check history and labels
314314
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
315-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
315+
framework.ExpectNoError(err)
316316
waitForHistoryCreated(c, ns, label, 2)
317317
cur := curHistory(listDaemonHistories(c, ns, label), ds)
318318
gomega.Expect(cur.Revision).To(gomega.Equal(int64(2)))
@@ -331,15 +331,15 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
331331
ds := newDaemonSet(dsName, image, label)
332332
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
333333
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
334-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
334+
framework.ExpectNoError(err)
335335

336336
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
337337
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
338338
framework.ExpectNoError(err, "error waiting for daemon pod to start")
339339

340340
// Check history and labels
341341
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
342-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
342+
framework.ExpectNoError(err)
343343
waitForHistoryCreated(c, ns, label, 1)
344344
cur := curHistory(listDaemonHistories(c, ns, label), ds)
345345
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
@@ -349,26 +349,26 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
349349
ginkgo.By("Update daemon pods image.")
350350
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
351351
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
352-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
352+
framework.ExpectNoError(err)
353353

354354
// Time to complete the rolling upgrade is proportional to the number of nodes in the cluster.
355355
// Get the number of nodes, and set the timeout appropriately.
356356
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
357-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
357+
framework.ExpectNoError(err)
358358
nodeCount := len(nodes.Items)
359359
retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second
360360

361361
ginkgo.By("Check that daemon pods images are updated.")
362362
err = wait.PollImmediate(dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
363-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
363+
framework.ExpectNoError(err)
364364

365365
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
366366
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
367367
framework.ExpectNoError(err, "error waiting for daemon pod to start")
368368

369369
// Check history and labels
370370
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
371-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
371+
framework.ExpectNoError(err)
372372
waitForHistoryCreated(c, ns, label, 2)
373373
cur = curHistory(listDaemonHistories(c, ns, label), ds)
374374
hash = cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
@@ -389,7 +389,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
389389
ds := newDaemonSet(dsName, image, label)
390390
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
391391
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
392-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
392+
framework.ExpectNoError(err)
393393

394394
e2elog.Logf("Check that daemon pods launch on every node of the cluster")
395395
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
@@ -401,11 +401,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
401401
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
402402
update.Spec.Template.Spec.Containers[0].Image = newImage
403403
})
404-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
404+
framework.ExpectNoError(err)
405405

406406
// Make sure we're in the middle of a rollout
407407
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage))
408-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
408+
framework.ExpectNoError(err)
409409

410410
pods := listDaemonPods(c, ns, label)
411411
var existingPods, newPods []*v1.Pod
@@ -433,11 +433,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
433433
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
434434
update.Spec.Template.Spec.Containers[0].Image = image
435435
})
436-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
436+
framework.ExpectNoError(err)
437437

438438
e2elog.Logf("Make sure DaemonSet rollback is complete")
439439
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
440-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
440+
framework.ExpectNoError(err)
441441

442442
// After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted
443443
pods = listDaemonPods(c, ns, label)
@@ -487,7 +487,7 @@ func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *
487487
selector := labels.Set(label).AsSelector()
488488
options := metav1.ListOptions{LabelSelector: selector.String()}
489489
podList, err := c.CoreV1().Pods(ns).List(options)
490-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
490+
framework.ExpectNoError(err)
491491
gomega.Expect(len(podList.Items)).To(gomega.BeNumerically(">", 0))
492492
return podList
493493
}
@@ -748,7 +748,7 @@ func listDaemonHistories(c clientset.Interface, ns string, label map[string]stri
748748
selector := labels.Set(label).AsSelector()
749749
options := metav1.ListOptions{LabelSelector: selector.String()}
750750
historyList, err := c.AppsV1().ControllerRevisions(ns).List(options)
751-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
751+
framework.ExpectNoError(err)
752752
gomega.Expect(len(historyList.Items)).To(gomega.BeNumerically(">", 0))
753753
return historyList
754754
}
@@ -761,7 +761,7 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
761761
// Every history should have the hash label
762762
gomega.Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(gomega.BeNumerically(">", 0))
763763
match, err := daemon.Match(ds, history)
764-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
764+
framework.ExpectNoError(err)
765765
if match {
766766
curHistory = history
767767
foundCurHistories++

0 commit comments

Comments
 (0)