Skip to content

Commit ea08d4d

Browse files
authored
Merge pull request kubernetes#128946 from jackfrancis/SetPVCVACName-eventually-gomega
test: don't panic during an Eventually retry loop
2 parents 2579512 + 53499d9 commit ea08d4d

File tree

6 files changed

+37
-27
lines changed

6 files changed

+37
-27
lines changed

test/e2e/instrumentation/metrics.go

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,10 +44,7 @@ var _ = common.SIGDescribe("Metrics", func() {
4444
ec = f.KubemarkExternalClusterClientSet
4545
gomega.Eventually(ctx, func() error {
4646
grabber, err = e2emetrics.NewMetricsGrabber(ctx, c, ec, f.ClientConfig(), true, true, true, true, true, true)
47-
if err != nil {
48-
framework.ExpectNoError(err, "failed to create metrics grabber")
49-
}
50-
return nil
47+
return err
5148
}, 5*time.Minute, 10*time.Second).Should(gomega.BeNil())
5249
})
5350

test/e2e/node/gpu.go

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package node
1818

1919
import (
2020
"context"
21+
"fmt"
2122
"os"
2223
"regexp"
2324
"time"
@@ -292,10 +293,12 @@ func SetupEnvironmentAndSkipIfNeeded(ctx context.Context, f *framework.Framework
292293
}
293294
}
294295

295-
func areGPUsAvailableOnAllSchedulableNodes(ctx context.Context, clientSet clientset.Interface) bool {
296+
func areGPUsAvailableOnAllSchedulableNodes(ctx context.Context, clientSet clientset.Interface) error {
296297
framework.Logf("Getting list of Nodes from API server")
297298
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
298-
framework.ExpectNoError(err, "getting node list")
299+
if err != nil {
300+
return fmt.Errorf("unexpected error getting node list: %w", err)
301+
}
299302
for _, node := range nodeList.Items {
300303
if node.Spec.Unschedulable {
301304
continue
@@ -305,12 +308,11 @@ func areGPUsAvailableOnAllSchedulableNodes(ctx context.Context, clientSet client
305308
}
306309
framework.Logf("gpuResourceName %s", e2egpu.NVIDIAGPUResourceName)
307310
if val, ok := node.Status.Capacity[e2egpu.NVIDIAGPUResourceName]; !ok || val.Value() == 0 {
308-
framework.Logf("Nvidia GPUs not available on Node: %q", node.Name)
309-
return false
311+
return fmt.Errorf("nvidia GPUs not available on Node: %q", node.Name)
310312
}
311313
}
312314
framework.Logf("Nvidia GPUs exist on all schedulable nodes")
313-
return true
315+
return nil
314316
}
315317

316318
func logOSImages(ctx context.Context, f *framework.Framework) {
@@ -386,9 +388,9 @@ func waitForGPUs(ctx context.Context, f *framework.Framework, namespace, name st
386388

387389
// Wait for Nvidia GPUs to be available on nodes
388390
framework.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
389-
gomega.Eventually(ctx, func(ctx context.Context) bool {
391+
gomega.Eventually(ctx, func(ctx context.Context) error {
390392
return areGPUsAvailableOnAllSchedulableNodes(ctx, f.ClientSet)
391-
}, driverInstallTimeout, time.Second).Should(gomega.BeTrueBecause("expected GPU resources to be available within the timout"))
393+
}, driverInstallTimeout, time.Second).Should(gomega.Succeed())
392394
}
393395

394396
// StartJob starts a simple CUDA job that requests gpu and the specified number of completions

test/e2e/storage/testsuites/volume_modify.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -398,13 +398,13 @@ func SetPVCVACName(ctx context.Context, origPVC *v1.PersistentVolumeClaim, name
398398
pvcName := origPVC.Name
399399
var patchedPVC *v1.PersistentVolumeClaim
400400

401-
gomega.Eventually(ctx, func(g gomega.Gomega) {
401+
gomega.Eventually(ctx, func() error {
402402
var err error
403403
patch := []map[string]interface{}{{"op": "replace", "path": "/spec/volumeAttributesClassName", "value": name}}
404404
patchBytes, _ := json.Marshal(patch)
405405

406406
patchedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Patch(ctx, pvcName, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
407-
framework.ExpectNoError(err, "While patching PVC to add VAC name")
407+
return err
408408
}, timeout, modifyPollInterval).Should(gomega.Succeed())
409409

410410
return patchedPVC

test/e2e/windows/eviction.go

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package windows
1818

1919
import (
2020
"context"
21+
"fmt"
2122
"strconv"
2223
"strings"
2324
"time"
@@ -176,18 +177,20 @@ var _ = sigDescribe(feature.Windows, "Eviction", framework.WithSerial(), framewo
176177
framework.ExpectNoError(err)
177178

178179
framework.Logf("Waiting for pod2 to get evicted")
179-
gomega.Eventually(ctx, func() bool {
180+
gomega.Eventually(ctx, func() error {
180181
eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{})
181-
framework.ExpectNoError(err)
182+
if err != nil {
183+
return fmt.Errorf("error getting events: %w", err)
184+
}
182185
for _, e := range eventList.Items {
183186
// Look for an event that shows FailedScheduling
184187
if e.Type == "Warning" && e.Reason == "Evicted" && strings.Contains(e.Message, "pod2") {
185188
framework.Logf("Found %+v event with message %+v", e.Reason, e.Message)
186-
return true
189+
return nil
187190
}
188191
}
189-
return false
190-
}, 10*time.Minute, 10*time.Second).Should(gomega.BeTrueBecause("Eviction Event was not found"))
192+
return fmt.Errorf("did not find any FailedScheduling event for pod %s", pod2.ObjectMeta.Name)
193+
}, 10*time.Minute, 10*time.Second).Should(gomega.Succeed())
191194

192195
ginkgo.By("Waiting for node.kubernetes.io/memory-pressure taint to be removed")
193196
// ensure e2e test framework catches the memory-pressure taint

test/e2e/windows/memory_limits.go

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ package windows
1919
import (
2020
"context"
2121
"encoding/json"
22+
"fmt"
2223
"time"
2324

2425
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
@@ -162,18 +163,20 @@ func overrideAllocatableMemoryTest(ctx context.Context, f *framework.Framework,
162163
framework.Logf("Ensuring that pod %s fails to schedule", podName)
163164
failurePod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, failurePod, metav1.CreateOptions{})
164165
framework.ExpectNoError(err)
165-
gomega.Eventually(ctx, func() bool {
166+
gomega.Eventually(ctx, func() error {
166167
eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{})
167-
framework.ExpectNoError(err)
168+
if err != nil {
169+
return fmt.Errorf("error getting events: %w", err)
170+
}
168171
for _, e := range eventList.Items {
169172
// Look for an event that shows FailedScheduling
170173
if e.Type == "Warning" && e.Reason == "FailedScheduling" && e.InvolvedObject.Name == failurePod.ObjectMeta.Name {
171174
framework.Logf("Found %+v event with message %+v", e.Reason, e.Message)
172-
return true
175+
return nil
173176
}
174177
}
175-
return false
176-
}, 3*time.Minute, 10*time.Second).Should(gomega.BeTrueBecause("Expected %s pod to be failed scheduling", podName))
178+
return fmt.Errorf("did not find any FailedScheduling event for pod %s", failurePod.ObjectMeta.Name)
179+
}, 3*time.Minute, 10*time.Second).Should(gomega.Succeed())
177180
}
178181

179182
func getNodeMemory(ctx context.Context, f *framework.Framework, node v1.Node) nodeMemory {

test/e2e_node/util.go

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -237,11 +237,16 @@ func waitForKubeletToStart(ctx context.Context, f *framework.Framework) {
237237
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
238238

239239
// Wait for the Kubelet to be ready.
240-
gomega.Eventually(ctx, func(ctx context.Context) bool {
240+
gomega.Eventually(ctx, func(ctx context.Context) error {
241241
nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
242-
framework.ExpectNoError(err)
243-
return nodes == 1
244-
}, time.Minute, time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in ready state"))
242+
if err != nil {
243+
return fmt.Errorf("error getting ready nodes: %w", err)
244+
}
245+
if nodes != 1 {
246+
return fmt.Errorf("expected 1 ready node, got %d", nodes)
247+
}
248+
return nil
249+
}, time.Minute, time.Second).Should(gomega.Succeed())
245250
}
246251

247252
func deleteStateFile(stateFileName string) {

0 commit comments

Comments
 (0)