@@ -35,8 +35,8 @@ import (
35
35
testutils "k8s.io/kubernetes/test/utils"
36
36
imageutils "k8s.io/kubernetes/test/utils/image"
37
37
38
- . "github.com/onsi/ginkgo"
39
- . "github.com/onsi/gomega"
38
+ "github.com/onsi/ginkgo"
39
+ "github.com/onsi/gomega"
40
40
)
41
41
42
42
const (
@@ -119,7 +119,7 @@ func stopNfsServer(serverPod *v1.Pod) {
119
119
// will execute the passed in shell cmd. Waits for the pod to start.
120
120
// Note: the nfs plugin is defined inline, no PV or PVC.
121
121
func createPodUsingNfs (f * framework.Framework , c clientset.Interface , ns , nfsIP , cmd string ) * v1.Pod {
122
- By ("create pod using nfs volume" )
122
+ ginkgo . By ("create pod using nfs volume" )
123
123
124
124
isPrivileged := true
125
125
cmdLine := []string {"-c" , cmd }
@@ -166,13 +166,13 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP,
166
166
},
167
167
}
168
168
rtnPod , err := c .CoreV1 ().Pods (ns ).Create (pod )
169
- Expect (err ).NotTo (HaveOccurred ())
169
+ gomega . Expect (err ).NotTo (gomega . HaveOccurred ())
170
170
171
171
err = f .WaitForPodReady (rtnPod .Name ) // running & ready
172
- Expect (err ).NotTo (HaveOccurred ())
172
+ gomega . Expect (err ).NotTo (gomega . HaveOccurred ())
173
173
174
174
rtnPod , err = c .CoreV1 ().Pods (ns ).Get (rtnPod .Name , metav1.GetOptions {}) // return fresh pod
175
- Expect (err ).NotTo (HaveOccurred ())
175
+ gomega . Expect (err ).NotTo (gomega . HaveOccurred ())
176
176
return rtnPod
177
177
}
178
178
@@ -189,7 +189,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
189
189
mountDir := filepath .Join (podDir , "volumes" , "kubernetes.io~nfs" )
190
190
// use ip rather than hostname in GCE
191
191
nodeIP , err := framework .GetHostExternalAddress (c , pod )
192
- Expect (err ).NotTo (HaveOccurred ())
192
+ gomega . Expect (err ).NotTo (gomega . HaveOccurred ())
193
193
194
194
condMsg := "deleted"
195
195
if ! expectClean {
@@ -216,7 +216,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
216
216
e2elog .Logf ("Wait up to %v for host's (%v) %q to be %v" , timeout , nodeIP , test .feature , condMsg )
217
217
err = wait .Poll (poll , timeout , func () (bool , error ) {
218
218
result , err := e2essh .NodeExec (nodeIP , test .cmd , framework .TestContext .Provider )
219
- Expect (err ).NotTo (HaveOccurred ())
219
+ gomega . Expect (err ).NotTo (gomega . HaveOccurred ())
220
220
e2essh .LogResult (result )
221
221
ok := (result .Code == 0 && len (result .Stdout ) > 0 && len (result .Stderr ) == 0 )
222
222
if expectClean && ok { // keep trying
@@ -227,7 +227,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
227
227
}
228
228
return true , nil // done, host is as expected
229
229
})
230
- Expect (err ).NotTo (HaveOccurred (), fmt .Sprintf ("Host (%v) cleanup error: %v. Expected %q to be %v" , nodeIP , err , test .feature , condMsg ))
230
+ gomega . Expect (err ).NotTo (gomega . HaveOccurred (), fmt .Sprintf ("Host (%v) cleanup error: %v. Expected %q to be %v" , nodeIP , err , test .feature , condMsg ))
231
231
}
232
232
233
233
if expectClean {
@@ -244,7 +244,7 @@ var _ = SIGDescribe("kubelet", func() {
244
244
)
245
245
f := framework .NewDefaultFramework ("kubelet" )
246
246
247
- BeforeEach (func () {
247
+ ginkgo . BeforeEach (func () {
248
248
c = f .ClientSet
249
249
ns = f .Namespace .Name
250
250
})
@@ -265,14 +265,14 @@ var _ = SIGDescribe("kubelet", func() {
265
265
{podsPerNode : 10 , timeout : 1 * time .Minute },
266
266
}
267
267
268
- BeforeEach (func () {
268
+ ginkgo . BeforeEach (func () {
269
269
// Use node labels to restrict the pods to be assigned only to the
270
270
// nodes we observe initially.
271
271
nodeLabels = make (map [string ]string )
272
272
nodeLabels ["kubelet_cleanup" ] = "true"
273
273
nodes := framework .GetReadySchedulableNodesOrDie (c )
274
274
numNodes = len (nodes .Items )
275
- Expect (numNodes ).NotTo (BeZero ())
275
+ gomega . Expect (numNodes ).NotTo (gomega . BeZero ())
276
276
nodeNames = sets .NewString ()
277
277
// If there are a lot of nodes, we don't want to use all of them
278
278
// (if there are 1000 nodes in the cluster, starting 10 pods/node
@@ -297,7 +297,7 @@ var _ = SIGDescribe("kubelet", func() {
297
297
}
298
298
})
299
299
300
- AfterEach (func () {
300
+ ginkgo . AfterEach (func () {
301
301
if resourceMonitor != nil {
302
302
resourceMonitor .Stop ()
303
303
}
@@ -312,30 +312,30 @@ var _ = SIGDescribe("kubelet", func() {
312
312
for _ , itArg := range deleteTests {
313
313
name := fmt .Sprintf (
314
314
"kubelet should be able to delete %d pods per node in %v." , itArg .podsPerNode , itArg .timeout )
315
- It (name , func () {
315
+ ginkgo . It (name , func () {
316
316
totalPods := itArg .podsPerNode * numNodes
317
- By (fmt .Sprintf ("Creating a RC of %d pods and wait until all pods of this RC are running" , totalPods ))
317
+ ginkgo . By (fmt .Sprintf ("Creating a RC of %d pods and wait until all pods of this RC are running" , totalPods ))
318
318
rcName := fmt .Sprintf ("cleanup%d-%s" , totalPods , string (uuid .NewUUID ()))
319
319
320
- Expect (framework .RunRC (testutils.RCConfig {
320
+ gomega . Expect (framework .RunRC (testutils.RCConfig {
321
321
Client : f .ClientSet ,
322
322
Name : rcName ,
323
323
Namespace : f .Namespace .Name ,
324
324
Image : imageutils .GetPauseImageName (),
325
325
Replicas : totalPods ,
326
326
NodeSelector : nodeLabels ,
327
- })).NotTo (HaveOccurred ())
327
+ })).NotTo (gomega . HaveOccurred ())
328
328
// Perform a sanity check so that we know all desired pods are
329
329
// running on the nodes according to kubelet. The timeout is set to
330
330
// only 30 seconds here because framework.RunRC already waited for all pods to
331
331
// transition to the running status.
332
- Expect (waitTillNPodsRunningOnNodes (f .ClientSet , nodeNames , rcName , ns , totalPods ,
333
- time .Second * 30 )).NotTo (HaveOccurred ())
332
+ gomega . Expect (waitTillNPodsRunningOnNodes (f .ClientSet , nodeNames , rcName , ns , totalPods ,
333
+ time .Second * 30 )).NotTo (gomega . HaveOccurred ())
334
334
if resourceMonitor != nil {
335
335
resourceMonitor .LogLatest ()
336
336
}
337
337
338
- By ("Deleting the RC" )
338
+ ginkgo . By ("Deleting the RC" )
339
339
framework .DeleteRCAndWaitForGC (f .ClientSet , f .Namespace .Name , rcName )
340
340
// Check that the pods really are gone by querying /runningpods on the
341
341
// node. The /runningpods handler checks the container runtime (or its
@@ -345,8 +345,8 @@ var _ = SIGDescribe("kubelet", func() {
345
345
// - a bug in graceful termination (if it is enabled)
346
346
// - docker slow to delete pods (or resource problems causing slowness)
347
347
start := time .Now ()
348
- Expect (waitTillNPodsRunningOnNodes (f .ClientSet , nodeNames , rcName , ns , 0 ,
349
- itArg .timeout )).NotTo (HaveOccurred ())
348
+ gomega . Expect (waitTillNPodsRunningOnNodes (f .ClientSet , nodeNames , rcName , ns , 0 ,
349
+ itArg .timeout )).NotTo (gomega . HaveOccurred ())
350
350
e2elog .Logf ("Deleting %d pods on %d nodes completed in %v after the RC was deleted" , totalPods , len (nodeNames ),
351
351
time .Since (start ))
352
352
if resourceMonitor != nil {
@@ -369,7 +369,7 @@ var _ = SIGDescribe("kubelet", func() {
369
369
// If the nfs-server pod is deleted the client pod's mount can not be unmounted.
370
370
// If the nfs-server pod is deleted and re-created, due to having a different ip
371
371
// addr, the client pod's mount still cannot be unmounted.
372
- Context ("Host cleanup after disrupting NFS volume [NFS]" , func () {
372
+ ginkgo . Context ("Host cleanup after disrupting NFS volume [NFS]" , func () {
373
373
// issue #31272
374
374
var (
375
375
nfsServerPod * v1.Pod
@@ -389,38 +389,38 @@ var _ = SIGDescribe("kubelet", func() {
389
389
},
390
390
}
391
391
392
- BeforeEach (func () {
392
+ ginkgo . BeforeEach (func () {
393
393
framework .SkipUnlessProviderIs (framework .ProvidersWithSSH ... )
394
394
_ , nfsServerPod , nfsIP = volume .NewNFSServer (c , ns , []string {"-G" , "777" , "/exports" })
395
395
})
396
396
397
- AfterEach (func () {
397
+ ginkgo . AfterEach (func () {
398
398
err := framework .DeletePodWithWait (f , c , pod )
399
- Expect (err ).NotTo (HaveOccurred (), "AfterEach: Failed to delete client pod " , pod .Name )
399
+ gomega . Expect (err ).NotTo (gomega . HaveOccurred (), "AfterEach: Failed to delete client pod " , pod .Name )
400
400
err = framework .DeletePodWithWait (f , c , nfsServerPod )
401
- Expect (err ).NotTo (HaveOccurred (), "AfterEach: Failed to delete server pod " , nfsServerPod .Name )
401
+ gomega . Expect (err ).NotTo (gomega . HaveOccurred (), "AfterEach: Failed to delete server pod " , nfsServerPod .Name )
402
402
})
403
403
404
404
// execute It blocks from above table of tests
405
405
for _ , t := range testTbl {
406
- It (t .itDescr , func () {
406
+ ginkgo . It (t .itDescr , func () {
407
407
pod = createPodUsingNfs (f , c , ns , nfsIP , t .podCmd )
408
408
409
- By ("Stop the NFS server" )
409
+ ginkgo . By ("Stop the NFS server" )
410
410
stopNfsServer (nfsServerPod )
411
411
412
- By ("Delete the pod mounted to the NFS volume -- expect failure" )
412
+ ginkgo . By ("Delete the pod mounted to the NFS volume -- expect failure" )
413
413
err := framework .DeletePodWithWait (f , c , pod )
414
- Expect ( err ). To ( HaveOccurred () )
414
+ framework . ExpectError ( err )
415
415
// pod object is now stale, but is intentionally not nil
416
416
417
- By ("Check if pod's host has been cleaned up -- expect not" )
417
+ ginkgo . By ("Check if pod's host has been cleaned up -- expect not" )
418
418
checkPodCleanup (c , pod , false )
419
419
420
- By ("Restart the nfs server" )
420
+ ginkgo . By ("Restart the nfs server" )
421
421
restartNfsServer (nfsServerPod )
422
422
423
- By ("Verify that the deleted client pod is now cleaned up" )
423
+ ginkgo . By ("Verify that the deleted client pod is now cleaned up" )
424
424
checkPodCleanup (c , pod , true )
425
425
})
426
426
}
0 commit comments