Skip to content

Commit f47e4ae

Browse files
authored
Merge pull request kubernetes#85787 from tanjunchen/use-ExpectEqual-test-e2e_node
Use ExpectEqual test/e2e_node
2 parents 99e4f65 + 561ee6e commit f47e4ae

8 files changed

+26
-29
lines changed

test/e2e_node/apparmor_test.go

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ import (
2727
"strconv"
2828
"strings"
2929

30-
"k8s.io/api/core/v1"
30+
v1 "k8s.io/api/core/v1"
3131
"k8s.io/apimachinery/pkg/api/errors"
3232
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3333
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -63,9 +63,8 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor
6363
return
6464
}
6565
state := status.ContainerStatuses[0].State.Terminated
66-
gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
67-
gomega.Expect(state.ExitCode).To(gomega.Not(gomega.BeZero()), "ContainerStateTerminated: %+v", state)
68-
66+
framework.ExpectNotEqual(state, nil, "ContainerState: %+v", status.ContainerStatuses[0].State)
67+
framework.ExpectEqual(state.ExitCode, 0, "ContainerStateTerminated: %+v", state)
6968
})
7069
ginkgo.It("should enforce a permissive profile", func() {
7170
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write")
@@ -74,8 +73,8 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor
7473
return
7574
}
7675
state := status.ContainerStatuses[0].State.Terminated
77-
gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
78-
gomega.Expect(state.ExitCode).To(gomega.BeZero(), "ContainerStateTerminated: %+v", state)
76+
framework.ExpectNotEqual(state, nil, "ContainerState: %+v", status.ContainerStatuses[0].State)
77+
framework.ExpectEqual(state.ExitCode, 0, "ContainerStateTerminated: %+v", state)
7978
})
8079
})
8180
} else {

test/e2e_node/container_manager_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
7979
ginkgo.Context("once the node is setup", func() {
8080
ginkgo.It("container runtime's oom-score-adj should be -999", func() {
8181
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
82-
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of container runtime pids")
82+
framework.ExpectEqual(err, nil, "failed to get list of container runtime pids")
8383
for _, pid := range runtimePids {
8484
gomega.Eventually(func() error {
8585
return validateOOMScoreAdjSetting(pid, -999)
@@ -88,7 +88,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
8888
})
8989
ginkgo.It("Kubelet's oom-score-adj should be -999", func() {
9090
kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
91-
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of kubelet pids")
91+
framework.ExpectEqual(err, nil, "failed to get list of kubelet pids")
9292
framework.ExpectEqual(len(kubeletPids), 1, "expected only one kubelet process; found %d", len(kubeletPids))
9393
gomega.Eventually(func() error {
9494
return validateOOMScoreAdjSetting(kubeletPids[0], -999)
@@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
100100
// created before this test, and may not be infra
101101
// containers. They should be excluded from the test.
102102
existingPausePIDs, err := getPidsForProcess("pause", "")
103-
gomega.Expect(err).To(gomega.BeNil(), "failed to list all pause processes on the node")
103+
framework.ExpectEqual(err, nil, "failed to list all pause processes on the node")
104104
existingPausePIDSet := sets.NewInt(existingPausePIDs...)
105105

106106
podClient := f.PodClient()

test/e2e_node/critical_pod_test.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ import (
3232
imageutils "k8s.io/kubernetes/test/utils/image"
3333

3434
"github.com/onsi/ginkgo"
35-
"github.com/onsi/gomega"
3635
)
3736

3837
const (
@@ -86,7 +85,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
8685
})
8786

8887
_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(systemCriticalPriority)
89-
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue(), "failed to create PriorityClasses with an error: %v", err)
88+
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true, "failed to create PriorityClasses with an error: %v", err)
9089

9190
// Create pods, starting with non-critical so that the critical preempts the other pods.
9291
f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed})
@@ -157,9 +156,9 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) *
157156
pod.Spec.PriorityClassName = systemCriticalPriorityName
158157
pod.Spec.Priority = &value
159158

160-
gomega.Expect(kubelettypes.IsCriticalPod(pod)).To(gomega.BeTrue(), "pod should be a critical pod")
159+
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), true, "pod should be a critical pod")
161160
} else {
162-
gomega.Expect(kubelettypes.IsCriticalPod(pod)).To(gomega.BeFalse(), "pod should not be a critical pod")
161+
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), false, "pod should not be a critical pod")
163162
}
164163
return pod
165164
}

test/e2e_node/device_plugin_test.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ import (
2222

2323
"regexp"
2424

25-
"k8s.io/api/core/v1"
25+
v1 "k8s.io/api/core/v1"
2626
"k8s.io/apimachinery/pkg/api/resource"
2727
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2828
"k8s.io/apimachinery/pkg/util/uuid"
@@ -97,20 +97,20 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
9797
pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
9898
deviceIDRE := "stub devices: (Dev-[0-9]+)"
9999
devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
100-
gomega.Expect(devID1).To(gomega.Not(gomega.Equal("")))
100+
framework.ExpectNotEqual(devID1, "")
101101

102102
podResources, err := getNodeDevices()
103103
var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
104104
framework.Logf("pod resources %v", podResources)
105-
gomega.Expect(err).To(gomega.BeNil())
105+
framework.ExpectEqual(err, nil)
106106
framework.ExpectEqual(len(podResources.PodResources), 2)
107107
for _, res := range podResources.GetPodResources() {
108108
if res.Name == pod1.Name {
109109
resourcesForOurPod = res
110110
}
111111
}
112112
framework.Logf("resourcesForOurPod %v", resourcesForOurPod)
113-
gomega.Expect(resourcesForOurPod).NotTo(gomega.BeNil())
113+
framework.ExpectNotEqual(resourcesForOurPod, nil)
114114
framework.ExpectEqual(resourcesForOurPod.Name, pod1.Name)
115115
framework.ExpectEqual(resourcesForOurPod.Namespace, pod1.Namespace)
116116
framework.ExpectEqual(len(resourcesForOurPod.Containers), 1)
@@ -181,7 +181,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
181181
ginkgo.By("Checking that pod got a different fake device")
182182
devID2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
183183

184-
gomega.Expect(devID1).To(gomega.Not(gomega.Equal(devID2)))
184+
framework.ExpectNotEqual(devID1, devID2)
185185

186186
ginkgo.By("By deleting the pods and waiting for container removal")
187187
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions)

test/e2e_node/e2e_node_suite_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -307,7 +307,7 @@ func getNode(c *clientset.Clientset) (*v1.Node, error) {
307307
if nodes == nil {
308308
return nil, fmt.Errorf("the node list is nil")
309309
}
310-
gomega.Expect(len(nodes.Items) > 1).NotTo(gomega.BeTrue(), "the number of nodes is more than 1.")
310+
framework.ExpectNotEqual(len(nodes.Items) > 1, true, "the number of nodes is more than 1.")
311311
if len(nodes.Items) == 0 {
312312
return nil, fmt.Errorf("empty node list: %+v", nodes)
313313
}

test/e2e_node/eviction_test.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ import (
2323
"strings"
2424
"time"
2525

26-
"k8s.io/api/core/v1"
26+
v1 "k8s.io/api/core/v1"
2727
schedulingv1 "k8s.io/api/scheduling/v1"
2828
"k8s.io/apimachinery/pkg/api/errors"
2929
"k8s.io/apimachinery/pkg/api/resource"
@@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
302302
})
303303
ginkgo.BeforeEach(func() {
304304
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
305-
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
305+
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
306306
})
307307
ginkgo.AfterEach(func() {
308308
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
@@ -359,7 +359,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
359359
})
360360
ginkgo.BeforeEach(func() {
361361
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
362-
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
362+
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
363363
})
364364
ginkgo.AfterEach(func() {
365365
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
@@ -412,7 +412,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
412412
})
413413
ginkgo.BeforeEach(func() {
414414
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
415-
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
415+
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
416416
})
417417
ginkgo.AfterEach(func() {
418418
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
@@ -661,7 +661,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
661661
if expectedStarvedResource != noStarvedResource {
662662
// Check the eviction.StarvedResourceKey
663663
starved, found := event.Annotations[eviction.StarvedResourceKey]
664-
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
664+
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
665665
pod.Name, expectedStarvedResource)
666666
starvedResource := v1.ResourceName(starved)
667667
framework.ExpectEqual(starvedResource, expectedStarvedResource, "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
@@ -671,7 +671,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
671671
if expectedStarvedResource == v1.ResourceMemory {
672672
// Check the eviction.OffendingContainersKey
673673
offendersString, found := event.Annotations[eviction.OffendingContainersKey]
674-
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
674+
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
675675
pod.Name)
676676
offendingContainers := strings.Split(offendersString, ",")
677677
framework.ExpectEqual(len(offendingContainers), 1, "Expected to find the offending container's usage in the %s annotation, but no container was found",

test/e2e_node/node_problem_detector_linux.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import (
2424
"path"
2525
"time"
2626

27-
"k8s.io/api/core/v1"
27+
v1 "k8s.io/api/core/v1"
2828
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2929
"k8s.io/apimachinery/pkg/fields"
3030
"k8s.io/apimachinery/pkg/labels"
@@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
104104

105105
nodeTime = time.Now()
106106
bootTime, err = util.GetBootTime()
107-
gomega.Expect(err).To(gomega.BeNil())
107+
framework.ExpectEqual(err, nil)
108108

109109
// Set lookback duration longer than node up time.
110110
// Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.

test/e2e_node/startup_probe_test.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ import (
3030
imageutils "k8s.io/kubernetes/test/utils/image"
3131

3232
"github.com/onsi/ginkgo"
33-
"github.com/onsi/gomega"
3433
)
3534

3635
const (
@@ -179,7 +178,7 @@ var _ = framework.KubeDescribe("StartupProbe [Serial] [Disruptive] [NodeAlphaFea
179178

180179
isReady, err := testutils.PodRunningReady(p)
181180
framework.ExpectNoError(err)
182-
gomega.Expect(isReady).To(gomega.BeTrue(), "pod should be ready")
181+
framework.ExpectEqual(isReady, true, "pod should be ready")
183182

184183
// We assume the pod became ready when the container became ready. This
185184
// is true for a single container pod.

0 commit comments

Comments
 (0)