Skip to content

Commit 8e8bef6

Browse files
committed
Refactor endpoint/execpod antiaffinity
Also make launchHostExecPod return a fully-filled-in Pod object.
1 parent d62b797 commit 8e8bef6

File tree

1 file changed

+39
-40
lines changed

1 file changed

+39
-40
lines changed

test/e2e/network/service.go

Lines changed: 39 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -333,7 +333,7 @@ func StopServeHostnameService(ctx context.Context, clientset clientset.Interface
333333
// given expectedPods list after a sort | uniq.
334334
func verifyServeHostnameServiceUp(ctx context.Context, c clientset.Interface, ns string, expectedPods []string, serviceIP string, servicePort int) error {
335335
// to verify from host network
336-
hostExecPod := launchHostExecPod(ctx, c, ns, "verify-service-up-host-exec-pod")
336+
hostExecPod := launchHostExecPod(ctx, c, ns, "verify-service-up-host-exec-pod", nil)
337337

338338
// to verify from container's network
339339
execPod := e2epod.CreateExecPodOrFail(ctx, c, ns, "verify-service-up-exec-pod-", nil)
@@ -403,7 +403,7 @@ func verifyServeHostnameServiceUp(ctx context.Context, c clientset.Interface, ns
403403
// verifyServeHostnameServiceDown verifies that the given service isn't served.
404404
func verifyServeHostnameServiceDown(ctx context.Context, c clientset.Interface, ns string, serviceIP string, servicePort int) error {
405405
// verify from host network
406-
hostExecPod := launchHostExecPod(ctx, c, ns, "verify-service-down-host-exec-pod")
406+
hostExecPod := launchHostExecPod(ctx, c, ns, "verify-service-down-host-exec-pod", nil)
407407
defer func() {
408408
e2epod.DeletePodOrFail(ctx, c, ns, hostExecPod.Name)
409409
}()
@@ -1706,7 +1706,7 @@ var _ = common.SIGDescribe("Services", func() {
17061706
err = t.DeleteService(serviceName)
17071707
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
17081708

1709-
hostExec := launchHostExecPod(ctx, f.ClientSet, f.Namespace.Name, "hostexec")
1709+
hostExec := launchHostExecPod(ctx, f.ClientSet, f.Namespace.Name, "hostexec", nil)
17101710
cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort)
17111711
var stdout string
17121712
if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
@@ -2705,51 +2705,31 @@ var _ = common.SIGDescribe("Services", func() {
27052705
jig := e2eservice.NewTestJig(cs, namespace, serviceName)
27062706

27072707
ginkgo.By("creating the service")
2708-
svc, err := jig.CreateOnlyLocalNodePortService(ctx, false)
2708+
svc, err := jig.CreateOnlyLocalNodePortService(ctx, true)
27092709
framework.ExpectNoError(err, "creating the service")
27102710
tcpNodePort := int(svc.Spec.Ports[0].NodePort)
27112711
nodePortStr := fmt.Sprintf("%d", tcpNodePort)
27122712
framework.Logf("NodePort is %s", nodePortStr)
27132713

2714-
ginkgo.By("creating a HostNetwork exec pod")
2715-
execPod := launchHostExecPod(ctx, cs, namespace, "hostexec")
2716-
execPod, err = cs.CoreV1().Pods(namespace).Get(ctx, execPod.Name, metav1.GetOptions{})
2717-
framework.ExpectNoError(err, "getting podIP of execPod")
2718-
framework.Logf("execPod IP is %q", execPod.Status.PodIP)
2719-
2720-
ginkgo.By("creating an endpoint for the service on a different node from the execPod")
2721-
_, err = jig.Run(ctx, func(rc *v1.ReplicationController) {
2722-
rc.Spec.Template.Spec.Affinity = &v1.Affinity{
2723-
// We need to ensure the endpoint is on a different node
2724-
// from the exec pod, to ensure that the source IP of the
2725-
// traffic is the node's "public" IP. For
2726-
// node-to-pod-on-same-node traffic, it might end up using
2727-
// the "docker0" IP or something like that.
2728-
NodeAffinity: &v1.NodeAffinity{
2729-
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
2730-
NodeSelectorTerms: []v1.NodeSelectorTerm{{
2731-
MatchFields: []v1.NodeSelectorRequirement{{
2732-
Key: "metadata.name",
2733-
Operator: "NotIn",
2734-
Values: []string{execPod.Spec.NodeName},
2735-
}},
2736-
}},
2737-
},
2738-
},
2739-
}
2740-
})
2741-
framework.ExpectNoError(err, "creating the endpoint pod")
2742-
2743-
// Extract the single endpoint node IP from a map of endpoint node IPs
2744-
var endpointNodeIP string
2714+
// Get the (single) endpoint's node name and IP
2715+
var endpointNodeName, endpointNodeIP string
27452716
endpointsNodeMap, err := getEndpointNodesWithInternalIP(ctx, jig)
27462717
framework.ExpectNoError(err, "fetching endpoint node IPs")
27472718
for node, nodeIP := range endpointsNodeMap {
27482719
framework.Logf("endpoint is on node %s (%s)", node, nodeIP)
2720+
endpointNodeName = node
27492721
endpointNodeIP = nodeIP
27502722
break
27512723
}
27522724

2725+
// We need to ensure the endpoint is on a different node from the exec
2726+
// pod, to ensure that the source IP of the traffic is the node's "public"
2727+
// IP. For node-to-pod-on-same-node traffic, it might end up using the
2728+
// "docker0" IP or something like that.
2729+
ginkgo.By("creating a HostNetwork exec pod on a different node")
2730+
execPod := launchHostExecPod(ctx, cs, namespace, "hostexec", &endpointNodeName)
2731+
framework.Logf("execPod IP is %q", execPod.Status.PodIP)
2732+
27532733
ginkgo.By("connecting from the execpod to the NodePort on the endpoint's node")
27542734
cmd := fmt.Sprintf("curl -g -q -s --connect-timeout 3 http://%s/clientip", net.JoinHostPort(endpointNodeIP, nodePortStr))
27552735
var clientIP string
@@ -4184,14 +4164,33 @@ func createPodOrFail(ctx context.Context, f *framework.Framework, ns, name strin
41844164
}
41854165

41864166
// launchHostExecPod launches a hostexec pod in the given namespace and waits
4187-
// until it's Running
4188-
func launchHostExecPod(ctx context.Context, client clientset.Interface, ns, name string) *v1.Pod {
4167+
// until it's Running. If avoidNode is non-nil, it will ensure that the pod doesn't
4168+
// land on that node.
4169+
func launchHostExecPod(ctx context.Context, client clientset.Interface, ns, name string, avoidNode *string) *v1.Pod {
41894170
framework.Logf("Creating new host exec pod")
41904171
hostExecPod := e2epod.NewExecPodSpec(ns, name, true)
4191-
pod, err := client.CoreV1().Pods(ns).Create(ctx, hostExecPod, metav1.CreateOptions{})
4192-
framework.ExpectNoError(err)
4172+
if avoidNode != nil {
4173+
hostExecPod.Spec.Affinity = &v1.Affinity{
4174+
NodeAffinity: &v1.NodeAffinity{
4175+
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
4176+
NodeSelectorTerms: []v1.NodeSelectorTerm{{
4177+
MatchFields: []v1.NodeSelectorRequirement{{
4178+
Key: "metadata.name",
4179+
Operator: "NotIn",
4180+
Values: []string{*avoidNode},
4181+
}},
4182+
}},
4183+
},
4184+
},
4185+
}
4186+
}
4187+
_, err := client.CoreV1().Pods(ns).Create(ctx, hostExecPod, metav1.CreateOptions{})
4188+
framework.ExpectNoError(err, "creating host exec pod")
41934189
err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, client, name, ns, framework.PodStartTimeout)
4194-
framework.ExpectNoError(err)
4190+
framework.ExpectNoError(err, "waiting for host exec pod")
4191+
// re-fetch to get PodIP, etc
4192+
pod, err := client.CoreV1().Pods(ns).Get(ctx, name, metav1.GetOptions{})
4193+
framework.ExpectNoError(err, "getting podIP of host exec pod")
41954194
return pod
41964195
}
41974196

0 commit comments

Comments
 (0)