Skip to content

Commit a60d212

Browse files
authored
Merge pull request kubernetes#77715 from danielqsj/t2
fix golint error make test error checking more readable in test/e2e/node
2 parents 91ba27e + 124efde commit a60d212

15 files changed

+209
-209
lines changed

hack/.golint_failures

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -604,7 +604,6 @@ test/e2e/chaosmonkey
604604
test/e2e/common
605605
test/e2e/framework
606606
test/e2e/lifecycle/bootstrap
607-
test/e2e/node
608607
test/e2e/scalability
609608
test/e2e/scheduling
610609
test/e2e/storage/drivers

test/e2e/node/apparmor.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,29 +21,29 @@ import (
2121
"k8s.io/kubernetes/test/e2e/framework"
2222
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2323

24-
. "github.com/onsi/ginkgo"
24+
"github.com/onsi/ginkgo"
2525
)
2626

2727
var _ = SIGDescribe("AppArmor", func() {
2828
f := framework.NewDefaultFramework("apparmor")
2929

30-
Context("load AppArmor profiles", func() {
31-
BeforeEach(func() {
30+
ginkgo.Context("load AppArmor profiles", func() {
31+
ginkgo.BeforeEach(func() {
3232
common.SkipIfAppArmorNotSupported()
3333
common.LoadAppArmorProfiles(f)
3434
})
35-
AfterEach(func() {
36-
if !CurrentGinkgoTestDescription().Failed {
35+
ginkgo.AfterEach(func() {
36+
if !ginkgo.CurrentGinkgoTestDescription().Failed {
3737
return
3838
}
3939
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
4040
})
4141

42-
It("should enforce an AppArmor profile", func() {
42+
ginkgo.It("should enforce an AppArmor profile", func() {
4343
common.CreateAppArmorTestPod(f, false, true)
4444
})
4545

46-
It("can disable an AppArmor profile, using unconfined", func() {
46+
ginkgo.It("can disable an AppArmor profile, using unconfined", func() {
4747
common.CreateAppArmorTestPod(f, true, true)
4848
})
4949
})

test/e2e/node/crictl.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,22 +24,22 @@ import (
2424
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2525
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
2626

27-
. "github.com/onsi/ginkgo"
27+
"github.com/onsi/ginkgo"
2828
)
2929

3030
var _ = SIGDescribe("crictl", func() {
3131
f := framework.NewDefaultFramework("crictl")
3232

33-
BeforeEach(func() {
33+
ginkgo.BeforeEach(func() {
3434
// `crictl` is not available on all cloud providers.
3535
framework.SkipUnlessProviderIs("gce", "gke")
3636
// The test requires $HOME/.ssh/id_rsa key to be present.
3737
framework.SkipUnlessSSHKeyPresent()
3838
})
3939

40-
It("should be able to run crictl on the node", func() {
40+
ginkgo.It("should be able to run crictl on the node", func() {
4141
// Get all nodes' external IPs.
42-
By("Getting all nodes' SSH-able IP addresses")
42+
ginkgo.By("Getting all nodes' SSH-able IP addresses")
4343
hosts, err := e2essh.NodeSSHHosts(f.ClientSet)
4444
if err != nil {
4545
framework.Failf("Error getting node hostnames: %v", err)
@@ -55,7 +55,7 @@ var _ = SIGDescribe("crictl", func() {
5555
for _, testCase := range testCases {
5656
// Choose an arbitrary node to test.
5757
host := hosts[0]
58-
By(fmt.Sprintf("SSH'ing to node %q to run %q", host, testCase.cmd))
58+
ginkgo.By(fmt.Sprintf("SSH'ing to node %q to run %q", host, testCase.cmd))
5959

6060
result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider)
6161
stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr)

test/e2e/node/events.go

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@ import (
2929
"k8s.io/kubernetes/test/e2e/framework"
3030
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3131

32-
. "github.com/onsi/ginkgo"
33-
. "github.com/onsi/gomega"
32+
"github.com/onsi/ginkgo"
33+
"github.com/onsi/gomega"
3434
)
3535

3636
var _ = SIGDescribe("Events", func() {
@@ -45,7 +45,7 @@ var _ = SIGDescribe("Events", func() {
4545

4646
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
4747

48-
By("creating the pod")
48+
ginkgo.By("creating the pod")
4949
name := "send-events-" + string(uuid.NewUUID())
5050
value := strconv.Itoa(time.Now().Nanosecond())
5151
pod := &v1.Pod{
@@ -67,9 +67,9 @@ var _ = SIGDescribe("Events", func() {
6767
},
6868
}
6969

70-
By("submitting the pod to kubernetes")
70+
ginkgo.By("submitting the pod to kubernetes")
7171
defer func() {
72-
By("deleting the pod")
72+
ginkgo.By("deleting the pod")
7373
podClient.Delete(pod.Name, nil)
7474
}()
7575
if _, err := podClient.Create(pod); err != nil {
@@ -78,25 +78,25 @@ var _ = SIGDescribe("Events", func() {
7878

7979
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
8080

81-
By("verifying the pod is in kubernetes")
81+
ginkgo.By("verifying the pod is in kubernetes")
8282
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
8383
options := metav1.ListOptions{LabelSelector: selector.String()}
8484
pods, err := podClient.List(options)
85-
Expect(len(pods.Items)).To(Equal(1))
85+
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
8686

87-
By("retrieving the pod")
88-
podWithUid, err := podClient.Get(pod.Name, metav1.GetOptions{})
87+
ginkgo.By("retrieving the pod")
88+
podWithUID, err := podClient.Get(pod.Name, metav1.GetOptions{})
8989
if err != nil {
9090
framework.Failf("Failed to get pod: %v", err)
9191
}
92-
e2elog.Logf("%+v\n", podWithUid)
92+
e2elog.Logf("%+v\n", podWithUID)
9393
var events *v1.EventList
9494
// Check for scheduler event about the pod.
95-
By("checking for scheduler event about the pod")
95+
ginkgo.By("checking for scheduler event about the pod")
9696
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
9797
selector := fields.Set{
9898
"involvedObject.kind": "Pod",
99-
"involvedObject.uid": string(podWithUid.UID),
99+
"involvedObject.uid": string(podWithUID.UID),
100100
"involvedObject.namespace": f.Namespace.Name,
101101
"source": v1.DefaultSchedulerName,
102102
}.AsSelector().String()
@@ -112,10 +112,10 @@ var _ = SIGDescribe("Events", func() {
112112
return false, nil
113113
}))
114114
// Check for kubelet event about the pod.
115-
By("checking for kubelet event about the pod")
115+
ginkgo.By("checking for kubelet event about the pod")
116116
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
117117
selector := fields.Set{
118-
"involvedObject.uid": string(podWithUid.UID),
118+
"involvedObject.uid": string(podWithUID.UID),
119119
"involvedObject.kind": "Pod",
120120
"involvedObject.namespace": f.Namespace.Name,
121121
"source": "kubelet",

test/e2e/node/framework.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package node
1818

1919
import "k8s.io/kubernetes/test/e2e/framework"
2020

21+
// SIGDescribe annotates the test with the SIG label.
2122
func SIGDescribe(text string, body func()) bool {
2223
return framework.KubeDescribe("[sig-node] "+text, body)
2324
}

test/e2e/node/kubelet.go

Lines changed: 34 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@ import (
3535
testutils "k8s.io/kubernetes/test/utils"
3636
imageutils "k8s.io/kubernetes/test/utils/image"
3737

38-
. "github.com/onsi/ginkgo"
39-
. "github.com/onsi/gomega"
38+
"github.com/onsi/ginkgo"
39+
"github.com/onsi/gomega"
4040
)
4141

4242
const (
@@ -119,7 +119,7 @@ func stopNfsServer(serverPod *v1.Pod) {
119119
// will execute the passed in shell cmd. Waits for the pod to start.
120120
// Note: the nfs plugin is defined inline, no PV or PVC.
121121
func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP, cmd string) *v1.Pod {
122-
By("create pod using nfs volume")
122+
ginkgo.By("create pod using nfs volume")
123123

124124
isPrivileged := true
125125
cmdLine := []string{"-c", cmd}
@@ -166,13 +166,13 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP,
166166
},
167167
}
168168
rtnPod, err := c.CoreV1().Pods(ns).Create(pod)
169-
Expect(err).NotTo(HaveOccurred())
169+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
170170

171171
err = f.WaitForPodReady(rtnPod.Name) // running & ready
172-
Expect(err).NotTo(HaveOccurred())
172+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
173173

174174
rtnPod, err = c.CoreV1().Pods(ns).Get(rtnPod.Name, metav1.GetOptions{}) // return fresh pod
175-
Expect(err).NotTo(HaveOccurred())
175+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
176176
return rtnPod
177177
}
178178

@@ -189,7 +189,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
189189
mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs")
190190
// use ip rather than hostname in GCE
191191
nodeIP, err := framework.GetHostExternalAddress(c, pod)
192-
Expect(err).NotTo(HaveOccurred())
192+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
193193

194194
condMsg := "deleted"
195195
if !expectClean {
@@ -216,7 +216,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
216216
e2elog.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
217217
err = wait.Poll(poll, timeout, func() (bool, error) {
218218
result, err := e2essh.NodeExec(nodeIP, test.cmd, framework.TestContext.Provider)
219-
Expect(err).NotTo(HaveOccurred())
219+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
220220
e2essh.LogResult(result)
221221
ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0)
222222
if expectClean && ok { // keep trying
@@ -227,7 +227,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
227227
}
228228
return true, nil // done, host is as expected
229229
})
230-
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg))
230+
gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg))
231231
}
232232

233233
if expectClean {
@@ -244,7 +244,7 @@ var _ = SIGDescribe("kubelet", func() {
244244
)
245245
f := framework.NewDefaultFramework("kubelet")
246246

247-
BeforeEach(func() {
247+
ginkgo.BeforeEach(func() {
248248
c = f.ClientSet
249249
ns = f.Namespace.Name
250250
})
@@ -265,14 +265,14 @@ var _ = SIGDescribe("kubelet", func() {
265265
{podsPerNode: 10, timeout: 1 * time.Minute},
266266
}
267267

268-
BeforeEach(func() {
268+
ginkgo.BeforeEach(func() {
269269
// Use node labels to restrict the pods to be assigned only to the
270270
// nodes we observe initially.
271271
nodeLabels = make(map[string]string)
272272
nodeLabels["kubelet_cleanup"] = "true"
273273
nodes := framework.GetReadySchedulableNodesOrDie(c)
274274
numNodes = len(nodes.Items)
275-
Expect(numNodes).NotTo(BeZero())
275+
gomega.Expect(numNodes).NotTo(gomega.BeZero())
276276
nodeNames = sets.NewString()
277277
// If there are a lot of nodes, we don't want to use all of them
278278
// (if there are 1000 nodes in the cluster, starting 10 pods/node
@@ -297,7 +297,7 @@ var _ = SIGDescribe("kubelet", func() {
297297
}
298298
})
299299

300-
AfterEach(func() {
300+
ginkgo.AfterEach(func() {
301301
if resourceMonitor != nil {
302302
resourceMonitor.Stop()
303303
}
@@ -312,30 +312,30 @@ var _ = SIGDescribe("kubelet", func() {
312312
for _, itArg := range deleteTests {
313313
name := fmt.Sprintf(
314314
"kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
315-
It(name, func() {
315+
ginkgo.It(name, func() {
316316
totalPods := itArg.podsPerNode * numNodes
317-
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
317+
ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
318318
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))
319319

320-
Expect(framework.RunRC(testutils.RCConfig{
320+
gomega.Expect(framework.RunRC(testutils.RCConfig{
321321
Client: f.ClientSet,
322322
Name: rcName,
323323
Namespace: f.Namespace.Name,
324324
Image: imageutils.GetPauseImageName(),
325325
Replicas: totalPods,
326326
NodeSelector: nodeLabels,
327-
})).NotTo(HaveOccurred())
327+
})).NotTo(gomega.HaveOccurred())
328328
// Perform a sanity check so that we know all desired pods are
329329
// running on the nodes according to kubelet. The timeout is set to
330330
// only 30 seconds here because framework.RunRC already waited for all pods to
331331
// transition to the running status.
332-
Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods,
333-
time.Second*30)).NotTo(HaveOccurred())
332+
gomega.Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods,
333+
time.Second*30)).NotTo(gomega.HaveOccurred())
334334
if resourceMonitor != nil {
335335
resourceMonitor.LogLatest()
336336
}
337337

338-
By("Deleting the RC")
338+
ginkgo.By("Deleting the RC")
339339
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
340340
// Check that the pods really are gone by querying /runningpods on the
341341
// node. The /runningpods handler checks the container runtime (or its
@@ -345,8 +345,8 @@ var _ = SIGDescribe("kubelet", func() {
345345
// - a bug in graceful termination (if it is enabled)
346346
// - docker slow to delete pods (or resource problems causing slowness)
347347
start := time.Now()
348-
Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0,
349-
itArg.timeout)).NotTo(HaveOccurred())
348+
gomega.Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0,
349+
itArg.timeout)).NotTo(gomega.HaveOccurred())
350350
e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
351351
time.Since(start))
352352
if resourceMonitor != nil {
@@ -369,7 +369,7 @@ var _ = SIGDescribe("kubelet", func() {
369369
// If the nfs-server pod is deleted the client pod's mount can not be unmounted.
370370
// If the nfs-server pod is deleted and re-created, due to having a different ip
371371
// addr, the client pod's mount still cannot be unmounted.
372-
Context("Host cleanup after disrupting NFS volume [NFS]", func() {
372+
ginkgo.Context("Host cleanup after disrupting NFS volume [NFS]", func() {
373373
// issue #31272
374374
var (
375375
nfsServerPod *v1.Pod
@@ -389,38 +389,38 @@ var _ = SIGDescribe("kubelet", func() {
389389
},
390390
}
391391

392-
BeforeEach(func() {
392+
ginkgo.BeforeEach(func() {
393393
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
394394
_, nfsServerPod, nfsIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
395395
})
396396

397-
AfterEach(func() {
397+
ginkgo.AfterEach(func() {
398398
err := framework.DeletePodWithWait(f, c, pod)
399-
Expect(err).NotTo(HaveOccurred(), "AfterEach: Failed to delete client pod ", pod.Name)
399+
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "AfterEach: Failed to delete client pod ", pod.Name)
400400
err = framework.DeletePodWithWait(f, c, nfsServerPod)
401-
Expect(err).NotTo(HaveOccurred(), "AfterEach: Failed to delete server pod ", nfsServerPod.Name)
401+
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "AfterEach: Failed to delete server pod ", nfsServerPod.Name)
402402
})
403403

404404
// execute It blocks from above table of tests
405405
for _, t := range testTbl {
406-
It(t.itDescr, func() {
406+
ginkgo.It(t.itDescr, func() {
407407
pod = createPodUsingNfs(f, c, ns, nfsIP, t.podCmd)
408408

409-
By("Stop the NFS server")
409+
ginkgo.By("Stop the NFS server")
410410
stopNfsServer(nfsServerPod)
411411

412-
By("Delete the pod mounted to the NFS volume -- expect failure")
412+
ginkgo.By("Delete the pod mounted to the NFS volume -- expect failure")
413413
err := framework.DeletePodWithWait(f, c, pod)
414-
Expect(err).To(HaveOccurred())
414+
framework.ExpectError(err)
415415
// pod object is now stale, but is intentionally not nil
416416

417-
By("Check if pod's host has been cleaned up -- expect not")
417+
ginkgo.By("Check if pod's host has been cleaned up -- expect not")
418418
checkPodCleanup(c, pod, false)
419419

420-
By("Restart the nfs server")
420+
ginkgo.By("Restart the nfs server")
421421
restartNfsServer(nfsServerPod)
422422

423-
By("Verify that the deleted client pod is now cleaned up")
423+
ginkgo.By("Verify that the deleted client pod is now cleaned up")
424424
checkPodCleanup(c, pod, true)
425425
})
426426
}

0 commit comments

Comments
 (0)