Skip to content

Commit 15a4342

Browse files
committed
remove dot imports in e2e/scheduling
1 parent 4430194 commit 15a4342

10 files changed

+303
-303
lines changed

test/e2e/scheduling/equivalence_cache_predicates.go

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,8 @@ import (
3131
testutils "k8s.io/kubernetes/test/utils"
3232
imageutils "k8s.io/kubernetes/test/utils/image"
3333

34-
. "github.com/onsi/ginkgo"
35-
. "github.com/onsi/gomega"
34+
"github.com/onsi/ginkgo"
35+
"github.com/onsi/gomega"
3636
_ "github.com/stretchr/testify/assert"
3737
)
3838

@@ -48,7 +48,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
4848
var ns string
4949
f := framework.NewDefaultFramework("equivalence-cache")
5050

51-
BeforeEach(func() {
51+
ginkgo.BeforeEach(func() {
5252
cs = f.ClientSet
5353
ns = f.Namespace.Name
5454

@@ -61,7 +61,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
6161
// cannot be run in parallel with any other test that touches Nodes or Pods.
6262
// It is so because we need to have precise control on what's running in the cluster.
6363
systemPods, err := framework.GetPodsInNamespace(cs, ns, map[string]string{})
64-
Expect(err).NotTo(HaveOccurred())
64+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
6565
systemPodsNo = 0
6666
for _, pod := range systemPods {
6767
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
@@ -70,7 +70,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
7070
}
7171

7272
err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{})
73-
Expect(err).NotTo(HaveOccurred())
73+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
7474

7575
for _, node := range nodeList.Items {
7676
e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
@@ -83,15 +83,15 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
8383
// When a replica pod (with HostPorts) is scheduled to a node, it will invalidate GeneralPredicates cache on this node,
8484
// so that subsequent replica pods with same host port claim will be rejected.
8585
// We enforce all replica pods bind to the same node so there will always be conflicts.
86-
It("validates GeneralPredicates is properly invalidated when a pod is scheduled [Slow]", func() {
87-
By("Launching a RC with two replica pods with HostPorts")
86+
ginkgo.It("validates GeneralPredicates is properly invalidated when a pod is scheduled [Slow]", func() {
87+
ginkgo.By("Launching a RC with two replica pods with HostPorts")
8888
nodeName := getNodeThatCanRunPodWithoutToleration(f)
8989
rcName := "host-port"
9090

9191
// bind all replicas to same node
9292
nodeSelector := map[string]string{"kubernetes.io/hostname": nodeName}
9393

94-
By("One pod should be scheduled, the other should be rejected")
94+
ginkgo.By("One pod should be scheduled, the other should be rejected")
9595
// CreateNodeSelectorPods creates RC with host port 4312
9696
WaitForSchedulerAfterAction(f, func() error {
9797
err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false)
@@ -105,11 +105,11 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
105105
// This test verifies that MatchInterPodAffinity works as expected.
106106
// In equivalence cache, it does not handle inter pod affinity (anti-affinity) specially (unless node label changed),
107107
// because current predicates algorithm will ensure newly scheduled pod does not break existing affinity in cluster.
108-
It("validates pod affinity works properly when new replica pod is scheduled", func() {
108+
ginkgo.It("validates pod affinity works properly when new replica pod is scheduled", func() {
109109
// create a pod running with label {security: S1}, and choose this node
110110
nodeName, _ := runAndKeepPodWithLabelAndGetNodeName(f)
111111

112-
By("Trying to apply a random label on the found node.")
112+
ginkgo.By("Trying to apply a random label on the found node.")
113113
// we need to use real failure domains, since scheduler only know them
114114
k := "failure-domain.beta.kubernetes.io/zone"
115115
v := "equivalence-e2e-test"
@@ -118,7 +118,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
118118
// restore the node label
119119
defer framework.AddOrUpdateLabelOnNode(cs, nodeName, k, oldValue)
120120

121-
By("Trying to schedule RC with Pod Affinity should success.")
121+
ginkgo.By("Trying to schedule RC with Pod Affinity should success.")
122122
framework.WaitForStableCluster(cs, masterNodes)
123123
affinityRCName := "with-pod-affinity-" + string(uuid.NewUUID())
124124
replica := 2
@@ -154,10 +154,10 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
154154
framework.ExpectNoError(err)
155155
framework.ExpectNoError(framework.WaitForControlledPodsRunning(cs, ns, affinityRCName, api.Kind("ReplicationController")))
156156

157-
By("Remove node failure domain label")
157+
ginkgo.By("Remove node failure domain label")
158158
framework.RemoveLabelOffNode(cs, nodeName, k)
159159

160-
By("Trying to schedule another equivalent Pod should fail due to node label has been removed.")
160+
ginkgo.By("Trying to schedule another equivalent Pod should fail due to node label has been removed.")
161161
// use scale to create another equivalent pod and wait for failure event
162162
WaitForSchedulerAfterAction(f, func() error {
163163
err := framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false)
@@ -168,17 +168,17 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
168168
})
169169

170170
// This test verifies that MatchInterPodAffinity (anti-affinity) is respected as expected.
171-
It("validates pod anti-affinity works properly when new replica pod is scheduled", func() {
172-
By("Launching two pods on two distinct nodes to get two node names")
171+
ginkgo.It("validates pod anti-affinity works properly when new replica pod is scheduled", func() {
172+
ginkgo.By("Launching two pods on two distinct nodes to get two node names")
173173
CreateHostPortPods(f, "host-port", 2, true)
174174
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "host-port")
175175
podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
176176
framework.ExpectNoError(err)
177-
Expect(len(podList.Items)).To(Equal(2))
177+
gomega.Expect(len(podList.Items)).To(gomega.Equal(2))
178178
nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
179-
Expect(nodeNames[0]).ToNot(Equal(nodeNames[1]))
179+
gomega.Expect(nodeNames[0]).ToNot(gomega.Equal(nodeNames[1]))
180180

181-
By("Applying a random label to both nodes.")
181+
ginkgo.By("Applying a random label to both nodes.")
182182
k := "e2e.inter-pod-affinity.kubernetes.io/zone"
183183
v := "equivalence-e2etest"
184184
for _, nodeName := range nodeNames {
@@ -187,15 +187,15 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
187187
defer framework.RemoveLabelOffNode(cs, nodeName, k)
188188
}
189189

190-
By("Trying to launch a pod with the service label on the selected nodes.")
190+
ginkgo.By("Trying to launch a pod with the service label on the selected nodes.")
191191
// run a pod with label {"service": "S1"} and expect it to be running
192192
runPausePod(f, pausePodConfig{
193193
Name: "with-label-" + string(uuid.NewUUID()),
194194
Labels: map[string]string{"service": "S1"},
195195
NodeSelector: map[string]string{k: v}, // only launch on our two nodes
196196
})
197197

198-
By("Trying to launch RC with podAntiAffinity on these two nodes should be rejected.")
198+
ginkgo.By("Trying to launch RC with podAntiAffinity on these two nodes should be rejected.")
199199
labelRCName := "with-podantiaffinity-" + string(uuid.NewUUID())
200200
replica := 2
201201
labelsMap := map[string]string{
@@ -270,7 +270,7 @@ func getRCWithInterPodAffinityNodeSelector(name string, labelsMap map[string]str
270270
}
271271

272272
func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error {
273-
By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
273+
ginkgo.By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
274274

275275
config := &testutils.RCConfig{
276276
Client: f.ClientSet,

test/e2e/scheduling/limit_range.go

Lines changed: 45 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,8 @@ import (
3030
"k8s.io/kubernetes/test/e2e/framework"
3131
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3232

33-
. "github.com/onsi/ginkgo"
34-
. "github.com/onsi/gomega"
33+
"github.com/onsi/ginkgo"
34+
"github.com/onsi/gomega"
3535
)
3636

3737
const (
@@ -41,8 +41,8 @@ const (
4141
var _ = SIGDescribe("LimitRange", func() {
4242
f := framework.NewDefaultFramework("limitrange")
4343

44-
It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
45-
By("Creating a LimitRange")
44+
ginkgo.It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
45+
ginkgo.By("Creating a LimitRange")
4646

4747
min := getResourceList("50m", "100Mi", "100Gi")
4848
max := getResourceList("500m", "500Mi", "500Gi")
@@ -54,24 +54,24 @@ var _ = SIGDescribe("LimitRange", func() {
5454
defaultLimit, defaultRequest,
5555
maxLimitRequestRatio)
5656

57-
By("Setting up watch")
57+
ginkgo.By("Setting up watch")
5858
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
5959
options := metav1.ListOptions{LabelSelector: selector.String()}
6060
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
61-
Expect(err).NotTo(HaveOccurred(), "failed to query for limitRanges")
62-
Expect(len(limitRanges.Items)).To(Equal(0))
61+
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for limitRanges")
62+
gomega.Expect(len(limitRanges.Items)).To(gomega.Equal(0))
6363
options = metav1.ListOptions{
6464
LabelSelector: selector.String(),
6565
ResourceVersion: limitRanges.ListMeta.ResourceVersion,
6666
}
6767
w, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(metav1.ListOptions{})
68-
Expect(err).NotTo(HaveOccurred(), "failed to set up watch")
68+
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch")
6969

70-
By("Submitting a LimitRange")
70+
ginkgo.By("Submitting a LimitRange")
7171
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange)
72-
Expect(err).NotTo(HaveOccurred())
72+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
7373

74-
By("Verifying LimitRange creation was observed")
74+
ginkgo.By("Verifying LimitRange creation was observed")
7575
select {
7676
case event, _ := <-w.ResultChan():
7777
if event.Type != watch.Added {
@@ -81,39 +81,39 @@ var _ = SIGDescribe("LimitRange", func() {
8181
framework.Failf("Timeout while waiting for LimitRange creation")
8282
}
8383

84-
By("Fetching the LimitRange to ensure it has proper values")
84+
ginkgo.By("Fetching the LimitRange to ensure it has proper values")
8585
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
86-
Expect(err).NotTo(HaveOccurred())
86+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
8787
expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
8888
actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
8989
err = equalResourceRequirement(expected, actual)
90-
Expect(err).NotTo(HaveOccurred())
90+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
9191

92-
By("Creating a Pod with no resource requirements")
92+
ginkgo.By("Creating a Pod with no resource requirements")
9393
pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
9494
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
95-
Expect(err).NotTo(HaveOccurred())
95+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
9696

97-
By("Ensuring Pod has resource requirements applied from LimitRange")
97+
ginkgo.By("Ensuring Pod has resource requirements applied from LimitRange")
9898
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
99-
Expect(err).NotTo(HaveOccurred())
99+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
100100
for i := range pod.Spec.Containers {
101101
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
102102
if err != nil {
103103
// Print the pod to help in debugging.
104104
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
105-
Expect(err).NotTo(HaveOccurred())
105+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
106106
}
107107
}
108108

109-
By("Creating a Pod with partial resource requirements")
109+
ginkgo.By("Creating a Pod with partial resource requirements")
110110
pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", ""))
111111
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
112-
Expect(err).NotTo(HaveOccurred())
112+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
113113

114-
By("Ensuring Pod has merged resource requirements applied from LimitRange")
114+
ginkgo.By("Ensuring Pod has merged resource requirements applied from LimitRange")
115115
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
116-
Expect(err).NotTo(HaveOccurred())
116+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
117117
// This is an interesting case, so it's worth a comment
118118
// If you specify a Limit, and no Request, the Limit will default to the Request
119119
// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
@@ -123,49 +123,49 @@ var _ = SIGDescribe("LimitRange", func() {
123123
if err != nil {
124124
// Print the pod to help in debugging.
125125
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
126-
Expect(err).NotTo(HaveOccurred())
126+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
127127
}
128128
}
129129

130-
By("Failing to create a Pod with less than min resources")
130+
ginkgo.By("Failing to create a Pod with less than min resources")
131131
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
132132
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
133-
Expect(err).To(HaveOccurred())
133+
gomega.Expect(err).To(gomega.HaveOccurred())
134134

135-
By("Failing to create a Pod with more than max resources")
135+
ginkgo.By("Failing to create a Pod with more than max resources")
136136
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
137137
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
138-
Expect(err).To(HaveOccurred())
138+
gomega.Expect(err).To(gomega.HaveOccurred())
139139

140-
By("Updating a LimitRange")
140+
ginkgo.By("Updating a LimitRange")
141141
newMin := getResourceList("9m", "49Mi", "49Gi")
142142
limitRange.Spec.Limits[0].Min = newMin
143143
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(limitRange)
144-
Expect(err).NotTo(HaveOccurred())
144+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
145145

146-
By("Verifying LimitRange updating is effective")
147-
Expect(wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
146+
ginkgo.By("Verifying LimitRange updating is effective")
147+
gomega.Expect(wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
148148
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
149-
Expect(err).NotTo(HaveOccurred())
149+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
150150
return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil
151-
})).NotTo(HaveOccurred())
151+
})).NotTo(gomega.HaveOccurred())
152152

153-
By("Creating a Pod with less than former min resources")
153+
ginkgo.By("Creating a Pod with less than former min resources")
154154
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
155155
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
156-
Expect(err).NotTo(HaveOccurred())
156+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
157157

158-
By("Failing to create a Pod with more than max resources")
158+
ginkgo.By("Failing to create a Pod with more than max resources")
159159
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
160160
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
161-
Expect(err).To(HaveOccurred())
161+
gomega.Expect(err).To(gomega.HaveOccurred())
162162

163-
By("Deleting a LimitRange")
163+
ginkgo.By("Deleting a LimitRange")
164164
err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(limitRange.Name, metav1.NewDeleteOptions(30))
165-
Expect(err).NotTo(HaveOccurred())
165+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
166166

167-
By("Verifying the LimitRange was deleted")
168-
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
167+
ginkgo.By("Verifying the LimitRange was deleted")
168+
gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
169169
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
170170
options := metav1.ListOptions{LabelSelector: selector.String()}
171171
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
@@ -190,12 +190,12 @@ var _ = SIGDescribe("LimitRange", func() {
190190

191191
return false, nil
192192

193-
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
193+
})).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice")
194194

195-
By("Creating a Pod with more than former max resources")
195+
ginkgo.By("Creating a Pod with more than former max resources")
196196
pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
197197
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
198-
Expect(err).NotTo(HaveOccurred())
198+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
199199
})
200200

201201
})

test/e2e/scheduling/nvidia-gpus.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,8 @@ import (
3030
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3131
imageutils "k8s.io/kubernetes/test/utils/image"
3232

33-
. "github.com/onsi/ginkgo"
34-
. "github.com/onsi/gomega"
33+
"github.com/onsi/ginkgo"
34+
"github.com/onsi/gomega"
3535
)
3636

3737
const (
@@ -130,7 +130,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
130130
e2elog.Logf("Using %v", dsYamlUrl)
131131
// Creates the DaemonSet that installs Nvidia Drivers.
132132
ds, err := framework.DsFromManifest(dsYamlUrl)
133-
Expect(err).NotTo(HaveOccurred())
133+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
134134
ds.Namespace = f.Namespace.Name
135135
_, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds)
136136
framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset")
@@ -155,9 +155,9 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
155155

156156
// Wait for Nvidia GPUs to be available on nodes
157157
e2elog.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
158-
Eventually(func() bool {
158+
gomega.Eventually(func() bool {
159159
return areGPUsAvailableOnAllSchedulableNodes(f)
160-
}, driverInstallTimeout, time.Second).Should(BeTrue())
160+
}, driverInstallTimeout, time.Second).Should(gomega.BeTrue())
161161

162162
return rsgather
163163
}
@@ -185,7 +185,7 @@ func testNvidiaGPUs(f *framework.Framework) {
185185

186186
var _ = SIGDescribe("[Feature:GPUDevicePlugin]", func() {
187187
f := framework.NewDefaultFramework("device-plugin-gpus")
188-
It("run Nvidia GPU Device Plugin tests", func() {
188+
ginkgo.It("run Nvidia GPU Device Plugin tests", func() {
189189
testNvidiaGPUs(f)
190190
})
191191
})

0 commit comments

Comments
 (0)