Skip to content

Commit 53499d9

Browse files
committed
prefer error over bool, prefer Should(gomega.Succeed())
Signed-off-by: Jack Francis <[email protected]>
1 parent d54ff74 commit 53499d9

File tree

5 files changed

+27
-26
lines changed

5 files changed

+27
-26
lines changed

test/e2e/node/gpu.go

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package node
1818

1919
import (
2020
"context"
21+
"fmt"
2122
"os"
2223
"regexp"
2324
"time"
@@ -292,12 +293,11 @@ func SetupEnvironmentAndSkipIfNeeded(ctx context.Context, f *framework.Framework
292293
}
293294
}
294295

295-
func areGPUsAvailableOnAllSchedulableNodes(ctx context.Context, clientSet clientset.Interface) bool {
296+
func areGPUsAvailableOnAllSchedulableNodes(ctx context.Context, clientSet clientset.Interface) error {
296297
framework.Logf("Getting list of Nodes from API server")
297298
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
298299
if err != nil {
299-
framework.Logf("Unexpected error getting node list: %v", err)
300-
return false
300+
return fmt.Errorf("unexpected error getting node list: %w", err)
301301
}
302302
for _, node := range nodeList.Items {
303303
if node.Spec.Unschedulable {
@@ -308,12 +308,11 @@ func areGPUsAvailableOnAllSchedulableNodes(ctx context.Context, clientSet client
308308
}
309309
framework.Logf("gpuResourceName %s", e2egpu.NVIDIAGPUResourceName)
310310
if val, ok := node.Status.Capacity[e2egpu.NVIDIAGPUResourceName]; !ok || val.Value() == 0 {
311-
framework.Logf("Nvidia GPUs not available on Node: %q", node.Name)
312-
return false
311+
return fmt.Errorf("nvidia GPUs not available on Node: %q", node.Name)
313312
}
314313
}
315314
framework.Logf("Nvidia GPUs exist on all schedulable nodes")
316-
return true
315+
return nil
317316
}
318317

319318
func logOSImages(ctx context.Context, f *framework.Framework) {
@@ -389,9 +388,9 @@ func waitForGPUs(ctx context.Context, f *framework.Framework, namespace, name st
389388

390389
// Wait for Nvidia GPUs to be available on nodes
391390
framework.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
392-
gomega.Eventually(ctx, func(ctx context.Context) bool {
391+
gomega.Eventually(ctx, func(ctx context.Context) error {
393392
return areGPUsAvailableOnAllSchedulableNodes(ctx, f.ClientSet)
394-
}, driverInstallTimeout, time.Second).Should(gomega.BeTrueBecause("expected GPU resources to be available within the timout"))
393+
}, driverInstallTimeout, time.Second).Should(gomega.Succeed())
395394
}
396395

397396
// StartJob starts a simple CUDA job that requests gpu and the specified number of completions

test/e2e/storage/testsuites/volume_modify.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -281,7 +281,7 @@ func SetPVCVACName(ctx context.Context, origPVC *v1.PersistentVolumeClaim, name
281281

282282
patchedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Patch(ctx, pvcName, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
283283
return err
284-
}, timeout, modifyPollInterval).Should(gomega.BeNil())
284+
}, timeout, modifyPollInterval).Should(gomega.Succeed())
285285

286286
return patchedPVC
287287
}

test/e2e/windows/eviction.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package windows
1818

1919
import (
2020
"context"
21+
"fmt"
2122
"strconv"
2223
"strings"
2324
"time"
@@ -176,21 +177,20 @@ var _ = sigDescribe(feature.Windows, "Eviction", framework.WithSerial(), framewo
176177
framework.ExpectNoError(err)
177178

178179
framework.Logf("Waiting for pod2 to get evicted")
179-
gomega.Eventually(ctx, func() bool {
180+
gomega.Eventually(ctx, func() error {
180181
eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{})
181182
if err != nil {
182-
framework.Logf("Error getting events: %v", err)
183-
return false
183+
return fmt.Errorf("error getting events: %w", err)
184184
}
185185
for _, e := range eventList.Items {
186186
// Look for an event that shows FailedScheduling
187187
if e.Type == "Warning" && e.Reason == "Evicted" && strings.Contains(e.Message, "pod2") {
188188
framework.Logf("Found %+v event with message %+v", e.Reason, e.Message)
189-
return true
189+
return nil
190190
}
191191
}
192-
return false
193-
}, 10*time.Minute, 10*time.Second).Should(gomega.BeTrueBecause("Eviction Event was not found"))
192+
return fmt.Errorf("did not find any FailedScheduling event for pod %s", pod2.ObjectMeta.Name)
193+
}, 10*time.Minute, 10*time.Second).Should(gomega.Succeed())
194194

195195
ginkgo.By("Waiting for node.kubernetes.io/memory-pressure taint to be removed")
196196
// ensure e2e test framework catches the memory-pressure taint

test/e2e/windows/memory_limits.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ package windows
1919
import (
2020
"context"
2121
"encoding/json"
22+
"fmt"
2223
"time"
2324

2425
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
@@ -162,21 +163,20 @@ func overrideAllocatableMemoryTest(ctx context.Context, f *framework.Framework,
162163
framework.Logf("Ensuring that pod %s fails to schedule", podName)
163164
failurePod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, failurePod, metav1.CreateOptions{})
164165
framework.ExpectNoError(err)
165-
gomega.Eventually(ctx, func() bool {
166+
gomega.Eventually(ctx, func() error {
166167
eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{})
167168
if err != nil {
168-
framework.Logf("Error getting events: %v", err)
169-
return false
169+
return fmt.Errorf("error getting events: %w", err)
170170
}
171171
for _, e := range eventList.Items {
172172
// Look for an event that shows FailedScheduling
173173
if e.Type == "Warning" && e.Reason == "FailedScheduling" && e.InvolvedObject.Name == failurePod.ObjectMeta.Name {
174174
framework.Logf("Found %+v event with message %+v", e.Reason, e.Message)
175-
return true
175+
return nil
176176
}
177177
}
178-
return false
179-
}, 3*time.Minute, 10*time.Second).Should(gomega.BeTrueBecause("Expected %s pod to be failed scheduling", podName))
178+
return fmt.Errorf("did not find any FailedScheduling event for pod %s", failurePod.ObjectMeta.Name)
179+
}, 3*time.Minute, 10*time.Second).Should(gomega.Succeed())
180180
}
181181

182182
func getNodeMemory(ctx context.Context, f *framework.Framework, node v1.Node) nodeMemory {

test/e2e_node/util.go

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -237,14 +237,16 @@ func waitForKubeletToStart(ctx context.Context, f *framework.Framework) {
237237
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
238238

239239
// Wait for the Kubelet to be ready.
240-
gomega.Eventually(ctx, func(ctx context.Context) bool {
240+
gomega.Eventually(ctx, func(ctx context.Context) error {
241241
nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
242242
if err != nil {
243-
framework.Logf("Error getting ready nodes: %v", err)
244-
return false
243+
return fmt.Errorf("error getting ready nodes: %w", err)
244+
}
245+
if nodes != 1 {
246+
return fmt.Errorf("expected 1 ready node, got %d", nodes)
245247
}
246-
return nodes == 1
247-
}, time.Minute, time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in ready state"))
248+
return nil
249+
}, time.Minute, time.Second).Should(gomega.Succeed())
248250
}
249251

250252
func deleteStateFile(stateFileName string) {

0 commit comments

Comments
 (0)