Skip to content

Commit 80fec73

Browse files
authored
Merge pull request kubernetes#77649 from atoato88/issue-77103-use-ExpectError-e2e-lifecycle
Use framework.ExpectNoError() for e2e/lifecycle
2 parents 274876e + be4af8f commit 80fec73

File tree

5 files changed

+30
-32
lines changed

5 files changed

+30
-32
lines changed

test/e2e/lifecycle/addon_update.go

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,7 @@ var _ = SIGDescribe("Addon update", func() {
227227

228228
var err error
229229
sshClient, err = getMasterSSHClient()
230-
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get the master SSH client.")
230+
framework.ExpectNoError(err, "Failed to get the master SSH client.")
231231
})
232232

233233
ginkgo.AfterEach(func() {
@@ -275,7 +275,7 @@ var _ = SIGDescribe("Addon update", func() {
275275

276276
for _, p := range remoteFiles {
277277
err := writeRemoteFile(sshClient, p.data, temporaryRemotePath, p.fileName, 0644)
278-
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to write file %q at remote path %q with ssh client %+v", p.fileName, temporaryRemotePath, sshClient)
278+
framework.ExpectNoError(err, "Failed to write file %q at remote path %q with ssh client %+v", p.fileName, temporaryRemotePath, sshClient)
279279
}
280280

281281
// directory on kubernetes-master
@@ -284,7 +284,7 @@ var _ = SIGDescribe("Addon update", func() {
284284

285285
// cleanup from previous tests
286286
_, _, _, err := sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix))
287-
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to remove remote dir %q with ssh client %+v", destinationDirPrefix, sshClient)
287+
framework.ExpectNoError(err, "Failed to remove remote dir %q with ssh client %+v", destinationDirPrefix, sshClient)
288288

289289
defer sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix)) // ignore result in cleanup
290290
sshExecAndVerify(sshClient, fmt.Sprintf("sudo mkdir -p %s", destinationDir))
@@ -300,7 +300,8 @@ var _ = SIGDescribe("Addon update", func() {
300300
// Delete the "ensure exist class" addon at the end.
301301
defer func() {
302302
e2elog.Logf("Cleaning up ensure exist class addon.")
303-
gomega.Expect(f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)).NotTo(gomega.HaveOccurred())
303+
err := f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)
304+
framework.ExpectNoError(err)
304305
}()
305306

306307
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", true)
@@ -386,7 +387,7 @@ func getMasterSSHClient() (*ssh.Client, error) {
386387

387388
func sshExecAndVerify(client *ssh.Client, cmd string) {
388389
_, _, rc, err := sshExec(client, cmd)
389-
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to execute %q with ssh client %+v", cmd, client)
390+
framework.ExpectNoError(err, "Failed to execute %q with ssh client %+v", cmd, client)
390391
gomega.Expect(rc).To(gomega.Equal(0), "error return code from executing command on the cluster: %s", cmd)
391392
}
392393

test/e2e/lifecycle/kubelet_security.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,15 +46,15 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
4646
// make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy
4747
ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func() {
4848
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort)
49-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
49+
framework.ExpectNoError(err)
5050

5151
var statusCode int
5252
result.StatusCode(&statusCode)
5353
gomega.Expect(statusCode).NotTo(gomega.Equal(http.StatusOK))
5454
})
5555
ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() {
5656
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "containers/", 4194)
57-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
57+
framework.ExpectNoError(err)
5858

5959
var statusCode int
6060
result.StatusCode(&statusCode)

test/e2e/lifecycle/reboot.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@ import (
3535
testutils "k8s.io/kubernetes/test/utils"
3636

3737
"github.com/onsi/ginkgo"
38-
"github.com/onsi/gomega"
3938
)
4039

4140
const (
@@ -70,7 +69,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
7069
namespaceName := metav1.NamespaceSystem
7170
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName))
7271
events, err := f.ClientSet.CoreV1().Events(namespaceName).List(metav1.ListOptions{})
73-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
72+
framework.ExpectNoError(err)
7473

7574
for _, e := range events.Items {
7675
e2elog.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)

test/e2e/lifecycle/resize_nodes.go

Lines changed: 15 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ import (
2727
"k8s.io/kubernetes/test/e2e/framework"
2828

2929
"github.com/onsi/ginkgo"
30-
"github.com/onsi/gomega"
3130
)
3231

3332
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
@@ -51,7 +50,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
5150
c = f.ClientSet
5251
ns = f.Namespace.Name
5352
systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
54-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
53+
framework.ExpectNoError(err)
5554
systemPodsNo = int32(len(systemPods))
5655
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
5756
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
@@ -104,36 +103,36 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
104103
// the cluster is restored to health.
105104
ginkgo.By("waiting for system pods to successfully restart")
106105
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
107-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
106+
framework.ExpectNoError(err)
108107
})
109108

110109
ginkgo.It("should be able to delete nodes", func() {
111110
// Create a replication controller for a service that serves its hostname.
112111
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
113112
name := "my-hostname-delete-node"
114113
numNodes, err := framework.NumberOfRegisteredNodes(c)
115-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
114+
framework.ExpectNoError(err)
116115
originalNodeCount = int32(numNodes)
117116
common.NewRCByName(c, ns, name, originalNodeCount, nil)
118117
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
119-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
118+
framework.ExpectNoError(err)
120119

121120
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)
122121
ginkgo.By(fmt.Sprintf("decreasing cluster size to %d", targetNumNodes))
123122
err = framework.ResizeGroup(group, targetNumNodes)
124-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
123+
framework.ExpectNoError(err)
125124
err = framework.WaitForGroupSize(group, targetNumNodes)
126-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
125+
framework.ExpectNoError(err)
127126
err = framework.WaitForReadyNodes(c, int(originalNodeCount-1), 10*time.Minute)
128-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
127+
framework.ExpectNoError(err)
129128

130129
ginkgo.By("waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on " +
131130
"the now non-existent node and the RC to recreate it")
132131
time.Sleep(time.Minute)
133132

134133
ginkgo.By("verifying whether the pods from the removed node are recreated")
135134
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
136-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
135+
framework.ExpectNoError(err)
137136
})
138137

139138
// TODO: Bug here - testName is not correct
@@ -143,26 +142,26 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
143142
name := "my-hostname-add-node"
144143
common.NewSVCByName(c, ns, name)
145144
numNodes, err := framework.NumberOfRegisteredNodes(c)
146-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
145+
framework.ExpectNoError(err)
147146
originalNodeCount = int32(numNodes)
148147
common.NewRCByName(c, ns, name, originalNodeCount, nil)
149148
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
150-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
149+
framework.ExpectNoError(err)
151150

152151
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1)
153152
ginkgo.By(fmt.Sprintf("increasing cluster size to %d", targetNumNodes))
154153
err = framework.ResizeGroup(group, targetNumNodes)
155-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
154+
framework.ExpectNoError(err)
156155
err = framework.WaitForGroupSize(group, targetNumNodes)
157-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
156+
framework.ExpectNoError(err)
158157
err = framework.WaitForReadyNodes(c, int(originalNodeCount+1), 10*time.Minute)
159-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
158+
framework.ExpectNoError(err)
160159

161160
ginkgo.By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1))
162161
err = resizeRC(c, ns, name, originalNodeCount+1)
163-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
162+
framework.ExpectNoError(err)
164163
err = framework.VerifyPods(c, ns, name, true, originalNodeCount+1)
165-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
164+
framework.ExpectNoError(err)
166165
})
167166
})
168167
})

test/e2e/lifecycle/restart.go

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ import (
2929
testutils "k8s.io/kubernetes/test/utils"
3030

3131
"github.com/onsi/ginkgo"
32-
"github.com/onsi/gomega"
3332
)
3433

3534
func nodeNames(nodes []v1.Node) []string {
@@ -54,14 +53,14 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
5453
framework.SkipUnlessProviderIs("gce", "gke")
5554
var err error
5655
ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
57-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
56+
framework.ExpectNoError(err)
5857
numNodes, err = framework.NumberOfRegisteredNodes(f.ClientSet)
59-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
58+
framework.ExpectNoError(err)
6059
systemNamespace = metav1.NamespaceSystem
6160

6261
ginkgo.By("ensuring all nodes are ready")
6362
originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
64-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
63+
framework.ExpectNoError(err)
6564
e2elog.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
6665

6766
ginkgo.By("ensuring all pods are running and ready")
@@ -87,11 +86,11 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
8786
ginkgo.It("should restart all nodes and ensure all nodes and pods recover", func() {
8887
ginkgo.By("restarting all of the nodes")
8988
err := common.RestartNodes(f.ClientSet, originalNodes)
90-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
89+
framework.ExpectNoError(err)
9190

9291
ginkgo.By("ensuring all nodes are ready after the restart")
9392
nodesAfter, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
94-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
93+
framework.ExpectNoError(err)
9594
e2elog.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))
9695

9796
// Make sure that we have the same number of nodes. We're not checking
@@ -108,7 +107,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
108107
ginkgo.By("ensuring the same number of pods are running and ready after restart")
109108
podCheckStart := time.Now()
110109
podNamesAfter, err := framework.WaitForNRestartablePods(ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout)
111-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
110+
framework.ExpectNoError(err)
112111
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
113112
if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) {
114113
pods := ps.List()

0 commit comments

Comments
 (0)