Skip to content

Commit 0a3884e

Browse files
authored
Merge pull request kubernetes#77714 from danielqsj/t1
cleanup dot imports and make test error checking more readable in test/e2e/scheduling
2 parents 116f06e + ccecc67 commit 0a3884e

12 files changed

+390
-381
lines changed

hack/.golint_failures

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -605,7 +605,6 @@ test/e2e/common
605605
test/e2e/framework
606606
test/e2e/lifecycle/bootstrap
607607
test/e2e/scalability
608-
test/e2e/scheduling
609608
test/e2e/storage/drivers
610609
test/e2e/storage/testsuites
611610
test/e2e/storage/utils

test/e2e/scheduling/equivalence_cache_predicates.go

Lines changed: 24 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,9 @@ import (
3131
testutils "k8s.io/kubernetes/test/utils"
3232
imageutils "k8s.io/kubernetes/test/utils/image"
3333

34-
. "github.com/onsi/ginkgo"
35-
. "github.com/onsi/gomega"
34+
"github.com/onsi/ginkgo"
35+
"github.com/onsi/gomega"
36+
// ensure libs have a chance to initialize
3637
_ "github.com/stretchr/testify/assert"
3738
)
3839

@@ -48,7 +49,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
4849
var ns string
4950
f := framework.NewDefaultFramework("equivalence-cache")
5051

51-
BeforeEach(func() {
52+
ginkgo.BeforeEach(func() {
5253
cs = f.ClientSet
5354
ns = f.Namespace.Name
5455

@@ -61,7 +62,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
6162
// cannot be run in parallel with any other test that touches Nodes or Pods.
6263
// It is so because we need to have precise control on what's running in the cluster.
6364
systemPods, err := framework.GetPodsInNamespace(cs, ns, map[string]string{})
64-
Expect(err).NotTo(HaveOccurred())
65+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
6566
systemPodsNo = 0
6667
for _, pod := range systemPods {
6768
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
@@ -70,7 +71,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
7071
}
7172

7273
err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{})
73-
Expect(err).NotTo(HaveOccurred())
74+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
7475

7576
for _, node := range nodeList.Items {
7677
e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
@@ -83,16 +84,16 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
8384
// When a replica pod (with HostPorts) is scheduled to a node, it will invalidate GeneralPredicates cache on this node,
8485
// so that subsequent replica pods with same host port claim will be rejected.
8586
// We enforce all replica pods bind to the same node so there will always be conflicts.
86-
It("validates GeneralPredicates is properly invalidated when a pod is scheduled [Slow]", func() {
87-
By("Launching a RC with two replica pods with HostPorts")
87+
ginkgo.It("validates GeneralPredicates is properly invalidated when a pod is scheduled [Slow]", func() {
88+
ginkgo.By("Launching a RC with two replica pods with HostPorts")
8889
nodeName := getNodeThatCanRunPodWithoutToleration(f)
8990
rcName := "host-port"
9091

9192
// bind all replicas to same node
9293
nodeSelector := map[string]string{"kubernetes.io/hostname": nodeName}
9394

94-
By("One pod should be scheduled, the other should be rejected")
95-
// CreateNodeSelectorPods creates RC with host port 4312
95+
ginkgo.By("One pod should be scheduled, the other should be rejected")
96+
// CreateNodeSelectorPods creates RC with host port 4321
9697
WaitForSchedulerAfterAction(f, func() error {
9798
err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false)
9899
return err
@@ -105,11 +106,11 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
105106
// This test verifies that MatchInterPodAffinity works as expected.
106107
// In equivalence cache, it does not handle inter pod affinity (anti-affinity) specially (unless node label changed),
107108
// because current predicates algorithm will ensure newly scheduled pod does not break existing affinity in cluster.
108-
It("validates pod affinity works properly when new replica pod is scheduled", func() {
109+
ginkgo.It("validates pod affinity works properly when new replica pod is scheduled", func() {
109110
// create a pod running with label {security: S1}, and choose this node
110111
nodeName, _ := runAndKeepPodWithLabelAndGetNodeName(f)
111112

112-
By("Trying to apply a random label on the found node.")
113+
ginkgo.By("Trying to apply a random label on the found node.")
113114
// we need to use real failure domains, since scheduler only know them
114115
k := "failure-domain.beta.kubernetes.io/zone"
115116
v := "equivalence-e2e-test"
@@ -118,7 +119,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
118119
// restore the node label
119120
defer framework.AddOrUpdateLabelOnNode(cs, nodeName, k, oldValue)
120121

121-
By("Trying to schedule RC with Pod Affinity should success.")
122+
ginkgo.By("Trying to schedule RC with Pod Affinity should success.")
122123
framework.WaitForStableCluster(cs, masterNodes)
123124
affinityRCName := "with-pod-affinity-" + string(uuid.NewUUID())
124125
replica := 2
@@ -154,10 +155,10 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
154155
framework.ExpectNoError(err)
155156
framework.ExpectNoError(framework.WaitForControlledPodsRunning(cs, ns, affinityRCName, api.Kind("ReplicationController")))
156157

157-
By("Remove node failure domain label")
158+
ginkgo.By("Remove node failure domain label")
158159
framework.RemoveLabelOffNode(cs, nodeName, k)
159160

160-
By("Trying to schedule another equivalent Pod should fail due to node label has been removed.")
161+
ginkgo.By("Trying to schedule another equivalent Pod should fail due to node label has been removed.")
161162
// use scale to create another equivalent pod and wait for failure event
162163
WaitForSchedulerAfterAction(f, func() error {
163164
err := framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false)
@@ -168,17 +169,17 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
168169
})
169170

170171
// This test verifies that MatchInterPodAffinity (anti-affinity) is respected as expected.
171-
It("validates pod anti-affinity works properly when new replica pod is scheduled", func() {
172-
By("Launching two pods on two distinct nodes to get two node names")
172+
ginkgo.It("validates pod anti-affinity works properly when new replica pod is scheduled", func() {
173+
ginkgo.By("Launching two pods on two distinct nodes to get two node names")
173174
CreateHostPortPods(f, "host-port", 2, true)
174175
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "host-port")
175176
podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
176177
framework.ExpectNoError(err)
177-
Expect(len(podList.Items)).To(Equal(2))
178+
gomega.Expect(len(podList.Items)).To(gomega.Equal(2))
178179
nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
179-
Expect(nodeNames[0]).ToNot(Equal(nodeNames[1]))
180+
gomega.Expect(nodeNames[0]).ToNot(gomega.Equal(nodeNames[1]))
180181

181-
By("Applying a random label to both nodes.")
182+
ginkgo.By("Applying a random label to both nodes.")
182183
k := "e2e.inter-pod-affinity.kubernetes.io/zone"
183184
v := "equivalence-e2etest"
184185
for _, nodeName := range nodeNames {
@@ -187,15 +188,15 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
187188
defer framework.RemoveLabelOffNode(cs, nodeName, k)
188189
}
189190

190-
By("Trying to launch a pod with the service label on the selected nodes.")
191+
ginkgo.By("Trying to launch a pod with the service label on the selected nodes.")
191192
// run a pod with label {"service": "S1"} and expect it to be running
192193
runPausePod(f, pausePodConfig{
193194
Name: "with-label-" + string(uuid.NewUUID()),
194195
Labels: map[string]string{"service": "S1"},
195196
NodeSelector: map[string]string{k: v}, // only launch on our two nodes
196197
})
197198

198-
By("Trying to launch RC with podAntiAffinity on these two nodes should be rejected.")
199+
ginkgo.By("Trying to launch RC with podAntiAffinity on these two nodes should be rejected.")
199200
labelRCName := "with-podantiaffinity-" + string(uuid.NewUUID())
200201
replica := 2
201202
labelsMap := map[string]string{
@@ -269,8 +270,9 @@ func getRCWithInterPodAffinityNodeSelector(name string, labelsMap map[string]str
269270
}
270271
}
271272

273+
// CreateNodeSelectorPods creates RC with host port 4321 and defines node selector
272274
func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error {
273-
By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
275+
ginkgo.By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
274276

275277
config := &testutils.RCConfig{
276278
Client: f.ClientSet,

test/e2e/scheduling/framework.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package scheduling
1818

1919
import "github.com/onsi/ginkgo"
2020

21+
// SIGDescribe annotates the test with the SIG label.
2122
func SIGDescribe(text string, body func()) bool {
2223
return ginkgo.Describe("[sig-scheduling] "+text, body)
2324
}

test/e2e/scheduling/limit_range.go

Lines changed: 45 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,8 @@ import (
3030
"k8s.io/kubernetes/test/e2e/framework"
3131
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3232

33-
. "github.com/onsi/ginkgo"
34-
. "github.com/onsi/gomega"
33+
"github.com/onsi/ginkgo"
34+
"github.com/onsi/gomega"
3535
)
3636

3737
const (
@@ -41,8 +41,8 @@ const (
4141
var _ = SIGDescribe("LimitRange", func() {
4242
f := framework.NewDefaultFramework("limitrange")
4343

44-
It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
45-
By("Creating a LimitRange")
44+
ginkgo.It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
45+
ginkgo.By("Creating a LimitRange")
4646

4747
min := getResourceList("50m", "100Mi", "100Gi")
4848
max := getResourceList("500m", "500Mi", "500Gi")
@@ -54,24 +54,24 @@ var _ = SIGDescribe("LimitRange", func() {
5454
defaultLimit, defaultRequest,
5555
maxLimitRequestRatio)
5656

57-
By("Setting up watch")
57+
ginkgo.By("Setting up watch")
5858
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
5959
options := metav1.ListOptions{LabelSelector: selector.String()}
6060
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
61-
Expect(err).NotTo(HaveOccurred(), "failed to query for limitRanges")
62-
Expect(len(limitRanges.Items)).To(Equal(0))
61+
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for limitRanges")
62+
gomega.Expect(len(limitRanges.Items)).To(gomega.Equal(0))
6363
options = metav1.ListOptions{
6464
LabelSelector: selector.String(),
6565
ResourceVersion: limitRanges.ListMeta.ResourceVersion,
6666
}
6767
w, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(metav1.ListOptions{})
68-
Expect(err).NotTo(HaveOccurred(), "failed to set up watch")
68+
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch")
6969

70-
By("Submitting a LimitRange")
70+
ginkgo.By("Submitting a LimitRange")
7171
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange)
72-
Expect(err).NotTo(HaveOccurred())
72+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
7373

74-
By("Verifying LimitRange creation was observed")
74+
ginkgo.By("Verifying LimitRange creation was observed")
7575
select {
7676
case event, _ := <-w.ResultChan():
7777
if event.Type != watch.Added {
@@ -81,39 +81,39 @@ var _ = SIGDescribe("LimitRange", func() {
8181
framework.Failf("Timeout while waiting for LimitRange creation")
8282
}
8383

84-
By("Fetching the LimitRange to ensure it has proper values")
84+
ginkgo.By("Fetching the LimitRange to ensure it has proper values")
8585
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
86-
Expect(err).NotTo(HaveOccurred())
86+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
8787
expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
8888
actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
8989
err = equalResourceRequirement(expected, actual)
90-
Expect(err).NotTo(HaveOccurred())
90+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
9191

92-
By("Creating a Pod with no resource requirements")
92+
ginkgo.By("Creating a Pod with no resource requirements")
9393
pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
9494
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
95-
Expect(err).NotTo(HaveOccurred())
95+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
9696

97-
By("Ensuring Pod has resource requirements applied from LimitRange")
97+
ginkgo.By("Ensuring Pod has resource requirements applied from LimitRange")
9898
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
99-
Expect(err).NotTo(HaveOccurred())
99+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
100100
for i := range pod.Spec.Containers {
101101
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
102102
if err != nil {
103103
// Print the pod to help in debugging.
104104
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
105-
Expect(err).NotTo(HaveOccurred())
105+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
106106
}
107107
}
108108

109-
By("Creating a Pod with partial resource requirements")
109+
ginkgo.By("Creating a Pod with partial resource requirements")
110110
pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", ""))
111111
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
112-
Expect(err).NotTo(HaveOccurred())
112+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
113113

114-
By("Ensuring Pod has merged resource requirements applied from LimitRange")
114+
ginkgo.By("Ensuring Pod has merged resource requirements applied from LimitRange")
115115
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
116-
Expect(err).NotTo(HaveOccurred())
116+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
117117
// This is an interesting case, so it's worth a comment
118118
// If you specify a Limit, and no Request, the Limit will default to the Request
119119
// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
@@ -123,49 +123,49 @@ var _ = SIGDescribe("LimitRange", func() {
123123
if err != nil {
124124
// Print the pod to help in debugging.
125125
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
126-
Expect(err).NotTo(HaveOccurred())
126+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
127127
}
128128
}
129129

130-
By("Failing to create a Pod with less than min resources")
130+
ginkgo.By("Failing to create a Pod with less than min resources")
131131
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
132132
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
133-
Expect(err).To(HaveOccurred())
133+
framework.ExpectError(err)
134134

135-
By("Failing to create a Pod with more than max resources")
135+
ginkgo.By("Failing to create a Pod with more than max resources")
136136
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
137137
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
138-
Expect(err).To(HaveOccurred())
138+
framework.ExpectError(err)
139139

140-
By("Updating a LimitRange")
140+
ginkgo.By("Updating a LimitRange")
141141
newMin := getResourceList("9m", "49Mi", "49Gi")
142142
limitRange.Spec.Limits[0].Min = newMin
143143
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(limitRange)
144-
Expect(err).NotTo(HaveOccurred())
144+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
145145

146-
By("Verifying LimitRange updating is effective")
147-
Expect(wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
146+
ginkgo.By("Verifying LimitRange updating is effective")
147+
gomega.Expect(wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
148148
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
149-
Expect(err).NotTo(HaveOccurred())
149+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
150150
return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil
151-
})).NotTo(HaveOccurred())
151+
})).NotTo(gomega.HaveOccurred())
152152

153-
By("Creating a Pod with less than former min resources")
153+
ginkgo.By("Creating a Pod with less than former min resources")
154154
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
155155
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
156-
Expect(err).NotTo(HaveOccurred())
156+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
157157

158-
By("Failing to create a Pod with more than max resources")
158+
ginkgo.By("Failing to create a Pod with more than max resources")
159159
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
160160
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
161-
Expect(err).To(HaveOccurred())
161+
framework.ExpectError(err)
162162

163-
By("Deleting a LimitRange")
163+
ginkgo.By("Deleting a LimitRange")
164164
err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(limitRange.Name, metav1.NewDeleteOptions(30))
165-
Expect(err).NotTo(HaveOccurred())
165+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
166166

167-
By("Verifying the LimitRange was deleted")
168-
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
167+
ginkgo.By("Verifying the LimitRange was deleted")
168+
gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
169169
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
170170
options := metav1.ListOptions{LabelSelector: selector.String()}
171171
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
@@ -190,12 +190,12 @@ var _ = SIGDescribe("LimitRange", func() {
190190

191191
return false, nil
192192

193-
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
193+
})).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice")
194194

195-
By("Creating a Pod with more than former max resources")
195+
ginkgo.By("Creating a Pod with more than former max resources")
196196
pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
197197
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
198-
Expect(err).NotTo(HaveOccurred())
198+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
199199
})
200200

201201
})

0 commit comments

Comments
 (0)