@@ -333,7 +333,7 @@ func StopServeHostnameService(ctx context.Context, clientset clientset.Interface
333
333
// given expectedPods list after a sort | uniq.
334
334
func verifyServeHostnameServiceUp (ctx context.Context , c clientset.Interface , ns string , expectedPods []string , serviceIP string , servicePort int ) error {
335
335
// to verify from host network
336
- hostExecPod := launchHostExecPod (ctx , c , ns , "verify-service-up-host-exec-pod" )
336
+ hostExecPod := launchHostExecPod (ctx , c , ns , "verify-service-up-host-exec-pod" , nil )
337
337
338
338
// to verify from container's network
339
339
execPod := e2epod .CreateExecPodOrFail (ctx , c , ns , "verify-service-up-exec-pod-" , nil )
@@ -403,7 +403,7 @@ func verifyServeHostnameServiceUp(ctx context.Context, c clientset.Interface, ns
403
403
// verifyServeHostnameServiceDown verifies that the given service isn't served.
404
404
func verifyServeHostnameServiceDown (ctx context.Context , c clientset.Interface , ns string , serviceIP string , servicePort int ) error {
405
405
// verify from host network
406
- hostExecPod := launchHostExecPod (ctx , c , ns , "verify-service-down-host-exec-pod" )
406
+ hostExecPod := launchHostExecPod (ctx , c , ns , "verify-service-down-host-exec-pod" , nil )
407
407
defer func () {
408
408
e2epod .DeletePodOrFail (ctx , c , ns , hostExecPod .Name )
409
409
}()
@@ -1706,7 +1706,7 @@ var _ = common.SIGDescribe("Services", func() {
1706
1706
err = t .DeleteService (serviceName )
1707
1707
framework .ExpectNoError (err , "failed to delete service: %s in namespace: %s" , serviceName , ns )
1708
1708
1709
- hostExec := launchHostExecPod (ctx , f .ClientSet , f .Namespace .Name , "hostexec" )
1709
+ hostExec := launchHostExecPod (ctx , f .ClientSet , f .Namespace .Name , "hostexec" , nil )
1710
1710
cmd := fmt .Sprintf (`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN` , nodePort )
1711
1711
var stdout string
1712
1712
if pollErr := wait .PollImmediate (framework .Poll , e2eservice .KubeProxyLagTimeout , func () (bool , error ) {
@@ -2705,51 +2705,31 @@ var _ = common.SIGDescribe("Services", func() {
2705
2705
jig := e2eservice .NewTestJig (cs , namespace , serviceName )
2706
2706
2707
2707
ginkgo .By ("creating the service" )
2708
- svc , err := jig .CreateOnlyLocalNodePortService (ctx , false )
2708
+ svc , err := jig .CreateOnlyLocalNodePortService (ctx , true )
2709
2709
framework .ExpectNoError (err , "creating the service" )
2710
2710
tcpNodePort := int (svc .Spec .Ports [0 ].NodePort )
2711
2711
nodePortStr := fmt .Sprintf ("%d" , tcpNodePort )
2712
2712
framework .Logf ("NodePort is %s" , nodePortStr )
2713
2713
2714
- ginkgo .By ("creating a HostNetwork exec pod" )
2715
- execPod := launchHostExecPod (ctx , cs , namespace , "hostexec" )
2716
- execPod , err = cs .CoreV1 ().Pods (namespace ).Get (ctx , execPod .Name , metav1.GetOptions {})
2717
- framework .ExpectNoError (err , "getting podIP of execPod" )
2718
- framework .Logf ("execPod IP is %q" , execPod .Status .PodIP )
2719
-
2720
- ginkgo .By ("creating an endpoint for the service on a different node from the execPod" )
2721
- _ , err = jig .Run (ctx , func (rc * v1.ReplicationController ) {
2722
- rc .Spec .Template .Spec .Affinity = & v1.Affinity {
2723
- // We need to ensure the endpoint is on a different node
2724
- // from the exec pod, to ensure that the source IP of the
2725
- // traffic is the node's "public" IP. For
2726
- // node-to-pod-on-same-node traffic, it might end up using
2727
- // the "docker0" IP or something like that.
2728
- NodeAffinity : & v1.NodeAffinity {
2729
- RequiredDuringSchedulingIgnoredDuringExecution : & v1.NodeSelector {
2730
- NodeSelectorTerms : []v1.NodeSelectorTerm {{
2731
- MatchFields : []v1.NodeSelectorRequirement {{
2732
- Key : "metadata.name" ,
2733
- Operator : "NotIn" ,
2734
- Values : []string {execPod .Spec .NodeName },
2735
- }},
2736
- }},
2737
- },
2738
- },
2739
- }
2740
- })
2741
- framework .ExpectNoError (err , "creating the endpoint pod" )
2742
-
2743
- // Extract the single endpoint node IP from a map of endpoint node IPs
2744
- var endpointNodeIP string
2714
+ // Get the (single) endpoint's node name and IP
2715
+ var endpointNodeName , endpointNodeIP string
2745
2716
endpointsNodeMap , err := getEndpointNodesWithInternalIP (ctx , jig )
2746
2717
framework .ExpectNoError (err , "fetching endpoint node IPs" )
2747
2718
for node , nodeIP := range endpointsNodeMap {
2748
2719
framework .Logf ("endpoint is on node %s (%s)" , node , nodeIP )
2720
+ endpointNodeName = node
2749
2721
endpointNodeIP = nodeIP
2750
2722
break
2751
2723
}
2752
2724
2725
+ // We need to ensure the endpoint is on a different node from the exec
2726
+ // pod, to ensure that the source IP of the traffic is the node's "public"
2727
+ // IP. For node-to-pod-on-same-node traffic, it might end up using the
2728
+ // "docker0" IP or something like that.
2729
+ ginkgo .By ("creating a HostNetwork exec pod on a different node" )
2730
+ execPod := launchHostExecPod (ctx , cs , namespace , "hostexec" , & endpointNodeName )
2731
+ framework .Logf ("execPod IP is %q" , execPod .Status .PodIP )
2732
+
2753
2733
ginkgo .By ("connecting from the execpod to the NodePort on the endpoint's node" )
2754
2734
cmd := fmt .Sprintf ("curl -g -q -s --connect-timeout 3 http://%s/clientip" , net .JoinHostPort (endpointNodeIP , nodePortStr ))
2755
2735
var clientIP string
@@ -4184,14 +4164,33 @@ func createPodOrFail(ctx context.Context, f *framework.Framework, ns, name strin
4184
4164
}
4185
4165
4186
4166
// launchHostExecPod launches a hostexec pod in the given namespace and waits
4187
- // until it's Running
4188
- func launchHostExecPod (ctx context.Context , client clientset.Interface , ns , name string ) * v1.Pod {
4167
+ // until it's Running. If avoidNode is non-nil, it will ensure that the pod doesn't
4168
+ // land on that node.
4169
+ func launchHostExecPod (ctx context.Context , client clientset.Interface , ns , name string , avoidNode * string ) * v1.Pod {
4189
4170
framework .Logf ("Creating new host exec pod" )
4190
4171
hostExecPod := e2epod .NewExecPodSpec (ns , name , true )
4191
- pod , err := client .CoreV1 ().Pods (ns ).Create (ctx , hostExecPod , metav1.CreateOptions {})
4192
- framework .ExpectNoError (err )
4172
+ if avoidNode != nil {
4173
+ hostExecPod .Spec .Affinity = & v1.Affinity {
4174
+ NodeAffinity : & v1.NodeAffinity {
4175
+ RequiredDuringSchedulingIgnoredDuringExecution : & v1.NodeSelector {
4176
+ NodeSelectorTerms : []v1.NodeSelectorTerm {{
4177
+ MatchFields : []v1.NodeSelectorRequirement {{
4178
+ Key : "metadata.name" ,
4179
+ Operator : "NotIn" ,
4180
+ Values : []string {* avoidNode },
4181
+ }},
4182
+ }},
4183
+ },
4184
+ },
4185
+ }
4186
+ }
4187
+ _ , err := client .CoreV1 ().Pods (ns ).Create (ctx , hostExecPod , metav1.CreateOptions {})
4188
+ framework .ExpectNoError (err , "creating host exec pod" )
4193
4189
err = e2epod .WaitTimeoutForPodReadyInNamespace (ctx , client , name , ns , framework .PodStartTimeout )
4194
- framework .ExpectNoError (err )
4190
+ framework .ExpectNoError (err , "waiting for host exec pod" )
4191
+ // re-fetch to get PodIP, etc
4192
+ pod , err := client .CoreV1 ().Pods (ns ).Get (ctx , name , metav1.GetOptions {})
4193
+ framework .ExpectNoError (err , "getting podIP of host exec pod" )
4195
4194
return pod
4196
4195
}
4197
4196
0 commit comments