Skip to content

Commit b12fcd9

Browse files
E2Es for net seg: remove KinD specific Node selector
Signed-off-by: Martin Kennelly <[email protected]>
1 parent f4205de commit b12fcd9

File tree

1 file changed

+40
-27
lines changed

1 file changed

+40
-27
lines changed

test/e2e/network_segmentation.go

Lines changed: 40 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -57,8 +57,6 @@ var _ = Describe("Network Segmentation", func() {
5757
userDefinedNetworkIPv6Subnet = "2014:100:200::0/60"
5858
userDefinedNetworkName = "hogwarts"
5959
nadName = "gryffindor"
60-
workerOneNodeName = "ovn-worker"
61-
workerTwoNodeName = "ovn-worker2"
6260
)
6361

6462
BeforeEach(func() {
@@ -146,13 +144,18 @@ var _ = Describe("Network Segmentation", func() {
146144
netConfig.namespace = f.Namespace.Name
147145
Expect(createNetworkFn(netConfig)).To(Succeed())
148146

147+
nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.Background(), cs, 2)
148+
Expect(err).NotTo(HaveOccurred())
149+
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "must be at least 2 Nodes to schedule pods")
150+
149151
By("creating client/server pods")
150152
serverPodConfig.namespace = f.Namespace.Name
153+
serverPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].Name}
151154
clientPodConfig.namespace = f.Namespace.Name
155+
clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[1].Name}
152156
runUDNPod(cs, f.Namespace.Name, serverPodConfig, nil)
153157
runUDNPod(cs, f.Namespace.Name, clientPodConfig, nil)
154158

155-
var err error
156159
var serverIP string
157160
for i, cidr := range strings.Split(netConfig.cidr, ",") {
158161
if cidr != "" {
@@ -188,14 +191,12 @@ var _ = Describe("Network Segmentation", func() {
188191
},
189192
*podConfig(
190193
"client-pod",
191-
withNodeSelector(map[string]string{nodeHostnameKey: workerOneNodeName}),
192194
),
193195
*podConfig(
194196
"server-pod",
195197
withCommand(func() []string {
196198
return httpServerContainerCmd(port)
197199
}),
198-
withNodeSelector(map[string]string{nodeHostnameKey: workerTwoNodeName}),
199200
),
200201
),
201202
Entry(
@@ -208,14 +209,12 @@ var _ = Describe("Network Segmentation", func() {
208209
},
209210
*podConfig(
210211
"client-pod",
211-
withNodeSelector(map[string]string{nodeHostnameKey: workerOneNodeName}),
212212
),
213213
*podConfig(
214214
"server-pod",
215215
withCommand(func() []string {
216216
return httpServerContainerCmd(port)
217217
}),
218-
withNodeSelector(map[string]string{nodeHostnameKey: workerTwoNodeName}),
219218
),
220219
),
221220
)
@@ -250,7 +249,13 @@ var _ = Describe("Network Segmentation", func() {
250249
netConfigParams.namespace = f.Namespace.Name
251250
Expect(createNetworkFn(netConfigParams)).To(Succeed())
252251

252+
nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.Background(), cs, 1)
253+
Expect(err).NotTo(HaveOccurred())
254+
Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "must be at least one Node to schedule pods")
255+
nodeName := nodes.Items[0].Name
253256
udnPodConfig.namespace = f.Namespace.Name
257+
udnPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].Name}
258+
254259
udnPod := runUDNPod(cs, f.Namespace.Name, udnPodConfig, func(pod *v1.Pod) {
255260
pod.Spec.Containers[0].ReadinessProbe = &v1.Probe{
256261
ProbeHandler: v1.ProbeHandler{
@@ -294,11 +299,11 @@ var _ = Describe("Network Segmentation", func() {
294299
})
295300

296301
By("creating default network pod")
297-
defaultPod, err := createPod(f, "default-net-pod", udnPodConfig.nodeSelector[nodeHostnameKey],
302+
defaultPod, err := createPod(f, "default-net-pod", nodeName,
298303
defaultNetNamespace, []string{"/agnhost", "netexec"}, nil)
299304
Expect(err).NotTo(HaveOccurred())
300305
By("creating default network client pod")
301-
defaultClientPod, err := createPod(f, "default-net-client-pod", udnPodConfig.nodeSelector[nodeHostnameKey],
306+
defaultClientPod, err := createPod(f, "default-net-client-pod", nodeName,
302307
defaultNetNamespace, []string{}, nil)
303308
Expect(err).NotTo(HaveOccurred())
304309

@@ -353,7 +358,7 @@ var _ = Describe("Network Segmentation", func() {
353358
Expect(udnPod.Status.ContainerStatuses[0].RestartCount).To(Equal(int32(0)))
354359

355360
By("restarting kubelet, pod should stay ready")
356-
_, err = runCommand(containerRuntime, "exec", workerOneNodeName,
361+
_, err = runCommand(containerRuntime, "exec", nodeName,
357362
"systemctl", "restart", "kubelet")
358363
Expect(err).NotTo(HaveOccurred())
359364

@@ -366,7 +371,7 @@ var _ = Describe("Network Segmentation", func() {
366371

367372
if !isUDNHostIsolationDisabled() {
368373
By("checking default network hostNetwork pod and non-kubelet host process can't reach the UDN pod")
369-
hostNetPod, err := createPod(f, "host-net-pod", udnPodConfig.nodeSelector[nodeHostnameKey],
374+
hostNetPod, err := createPod(f, "host-net-pod", nodeName,
370375
defaultNetNamespace, []string{}, nil, func(pod *v1.Pod) {
371376
pod.Spec.HostNetwork = true
372377
})
@@ -383,7 +388,7 @@ var _ = Describe("Network Segmentation", func() {
383388
}).Should(BeTrue())
384389
By("checking the non-kubelet host process can reach default pod on IP " + destIP)
385390
Eventually(func() bool {
386-
_, err = runCommand(containerRuntime, "exec", workerOneNodeName,
391+
_, err = runCommand(containerRuntime, "exec", nodeName,
387392
"curl", "--connect-timeout", "2",
388393
net.JoinHostPort(destIP, fmt.Sprintf("%d", defaultPort)))
389394
return err == nil
@@ -402,7 +407,7 @@ var _ = Describe("Network Segmentation", func() {
402407

403408
By("checking the non-kubelet host process can't reach UDN pod on IP " + destIP)
404409
Consistently(func() bool {
405-
_, err = runCommand(containerRuntime, "exec", workerOneNodeName,
410+
_, err = runCommand(containerRuntime, "exec", nodeName,
406411
"curl", "--connect-timeout", "2",
407412
net.JoinHostPort(destIP, fmt.Sprintf("%d", port)))
408413
return err != nil
@@ -478,7 +483,6 @@ var _ = Describe("Network Segmentation", func() {
478483
withCommand(func() []string {
479484
return httpServerContainerCmd(port)
480485
}),
481-
withNodeSelector(map[string]string{nodeHostnameKey: workerOneNodeName}),
482486
),
483487
),
484488
Entry(
@@ -494,7 +498,6 @@ var _ = Describe("Network Segmentation", func() {
494498
withCommand(func() []string {
495499
return httpServerContainerCmd(port)
496500
}),
497-
withNodeSelector(map[string]string{nodeHostnameKey: workerOneNodeName}),
498501
),
499502
),
500503
)
@@ -514,6 +517,12 @@ var _ = Describe("Network Segmentation", func() {
514517
namespaceRed := f.Namespace.Name + "-" + red
515518
namespaceBlue := f.Namespace.Name + "-" + blue
516519

520+
nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.TODO(), cs, 2)
521+
framework.ExpectNoError(err)
522+
523+
node1Name := nodes.Items[0].Name
524+
node2Name := nodes.Items[1].Name
525+
517526
for _, namespace := range []string{namespaceRed, namespaceBlue} {
518527
By("Creating namespace " + namespace)
519528
_, err := cs.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{
@@ -569,11 +578,11 @@ var _ = Describe("Network Segmentation", func() {
569578
podConfig.namespace = namespace
570579
//ensure testing accross nodes
571580
if i%2 == 0 {
572-
podConfig.nodeSelector = map[string]string{nodeHostnameKey: workerOneNodeName}
581+
podConfig.nodeSelector = map[string]string{nodeHostnameKey: node1Name}
573582

574583
} else {
575584

576-
podConfig.nodeSelector = map[string]string{nodeHostnameKey: workerTwoNodeName}
585+
podConfig.nodeSelector = map[string]string{nodeHostnameKey: node2Name}
577586
}
578587
By("creating pod " + podConfig.name + " in " + podConfig.namespace)
579588
pod := runUDNPod(cs, podConfig.namespace, podConfig, nil)
@@ -695,20 +704,23 @@ var _ = Describe("Network Segmentation", func() {
695704
cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet),
696705
role: "primary",
697706
}
707+
nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.TODO(), cs, 2)
708+
framework.ExpectNoError(err)
709+
node1Name, node2Name := nodes.Items[0].Name, nodes.Items[1].Name
698710
clientPodConfig := *podConfig(
699711
"client-pod",
700-
withNodeSelector(map[string]string{nodeHostnameKey: workerOneNodeName}),
712+
withNodeSelector(map[string]string{nodeHostnameKey: node1Name}),
701713
)
702714
serverPodConfig := *podConfig(
703715
"server-pod",
704716
withCommand(func() []string {
705717
return httpServerContainerCmd(port)
706718
}),
707-
withNodeSelector(map[string]string{nodeHostnameKey: workerTwoNodeName}),
719+
withNodeSelector(map[string]string{nodeHostnameKey: node2Name}),
708720
)
709721

710722
By("creating second namespace")
711-
_, err := cs.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{
723+
_, err = cs.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{
712724
ObjectMeta: metav1.ObjectMeta{
713725
Name: netConfig2.namespace,
714726
Labels: map[string]string{RequiredUDNNamespaceLabel: ""},
@@ -1556,9 +1568,12 @@ spec:
15561568
})
15571569

15581570
It("should react to k8s.ovn.org/open-default-ports annotations changes", func() {
1571+
nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.TODO(), cs, 1)
1572+
framework.ExpectNoError(err)
1573+
node1Name := nodes.Items[0].Name
15591574
By("Creating second namespace for default network pod")
15601575
defaultNetNamespace := f.Namespace.Name + "-default"
1561-
_, err := cs.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{
1576+
_, err = cs.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{
15621577
ObjectMeta: metav1.ObjectMeta{
15631578
Name: defaultNetNamespace,
15641579
},
@@ -1569,12 +1584,12 @@ spec:
15691584
}()
15701585

15711586
By("creating default network client pod")
1572-
defaultClientPod, err := createPod(f, "default-net-client-pod", workerOneNodeName,
1587+
defaultClientPod, err := createPod(f, "default-net-client-pod", node1Name,
15731588
defaultNetNamespace, []string{}, nil)
15741589
Expect(err).NotTo(HaveOccurred())
15751590

15761591
By("creating default network hostNetwork client pod")
1577-
hostNetPod, err := createPod(f, "host-net-client-pod", workerOneNodeName,
1592+
hostNetPod, err := createPod(f, "host-net-client-pod", node1Name,
15781593
defaultNetNamespace, []string{}, nil, func(pod *v1.Pod) {
15791594
pod.Spec.HostNetwork = true
15801595
})
@@ -1692,7 +1707,9 @@ spec:
16921707
Expect(err).ShouldNot(HaveOccurred(), "test requires at least two schedulable nodes")
16931708
Expect(len(nodes.Items)).Should(BeNumerically(">=", 2), "test requires >= 2 Ready nodes")
16941709
serverPodConfig.namespace = f.Namespace.Name
1710+
serverPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].Name}
16951711
clientPodConfig.namespace = f.Namespace.Name
1712+
clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[1].Name}
16961713
runUDNPod(cs, f.Namespace.Name, serverPodConfig, nil)
16971714
runUDNPod(cs, f.Namespace.Name, clientPodConfig, nil)
16981715
serverIP, err := podIPsForUserDefinedPrimaryNetwork(cs, f.Namespace.Name, serverPodConfig.name, namespacedName(f.Namespace.Name, netConfig.name), 0)
@@ -1719,14 +1736,12 @@ spec:
17191736
},
17201737
*podConfig(
17211738
"client-pod",
1722-
withNodeSelector(map[string]string{nodeHostnameKey: workerOneNodeName}),
17231739
),
17241740
*podConfig(
17251741
"server-pod",
17261742
withCommand(func() []string {
17271743
return httpServerContainerCmd(port)
17281744
}),
1729-
withNodeSelector(map[string]string{nodeHostnameKey: workerTwoNodeName}),
17301745
),
17311746
),
17321747
Entry(
@@ -1739,14 +1754,12 @@ spec:
17391754
},
17401755
*podConfig(
17411756
"client-pod",
1742-
withNodeSelector(map[string]string{nodeHostnameKey: workerOneNodeName}),
17431757
),
17441758
*podConfig(
17451759
"server-pod",
17461760
withCommand(func() []string {
17471761
return httpServerContainerCmd(port)
17481762
}),
1749-
withNodeSelector(map[string]string{nodeHostnameKey: workerTwoNodeName}),
17501763
),
17511764
),
17521765
)

0 commit comments

Comments
 (0)