@@ -303,56 +303,44 @@ func StopServeHostnameService(clientset clientset.Interface, ns, name string) er
303
303
}
304
304
305
305
// verifyServeHostnameServiceUp wgets the given serviceIP:servicePort from the
306
- // given host and from within a pod. The host is expected to be an SSH-able node
307
- // in the cluster. Each pod in the service is expected to echo its name. These
308
- // names are compared with the given expectedPods list after a sort | uniq.
309
- func verifyServeHostnameServiceUp (c clientset.Interface , ns , host string , expectedPods []string , serviceIP string , servicePort int ) error {
310
- execPod := e2epod .CreateExecPodOrFail (c , ns , "execpod-" , nil )
306
+ // the host exec pod of host network type and from the exec pod of container network type.
307
+ // Each pod in the service is expected to echo its name. These names are compared with the
308
+ // given expectedPods list after a sort | uniq.
309
+ func verifyServeHostnameServiceUp (c clientset.Interface , ns string , expectedPods []string , serviceIP string , servicePort int ) error {
310
+ // to verify from host network
311
+ hostExecPod := launchHostExecPod (c , ns , "verify-service-up-host-exec-pod" )
312
+
313
+ // to verify from container's network
314
+ execPod := e2epod .CreateExecPodOrFail (c , ns , "verify-service-up-exec-pod-" , nil )
311
315
defer func () {
316
+ e2epod .DeletePodOrFail (c , ns , hostExecPod .Name )
312
317
e2epod .DeletePodOrFail (c , ns , execPod .Name )
313
318
}()
314
319
315
- // Loop a bunch of times - the proxy is randomized, so we want a good
316
- // chance of hitting each backend at least once.
317
- buildCommand := func ( wget string ) string {
320
+ // verify service from pod
321
+ cmdFunc := func ( podName string ) string {
322
+ wgetCmd := " wget -q -T 1 -O -"
318
323
serviceIPPort := net .JoinHostPort (serviceIP , strconv .Itoa (servicePort ))
319
- return fmt .Sprintf ("for i in $(seq 1 %d); do %s http://%s 2>&1 || true; echo; done" ,
320
- 50 * len (expectedPods ), wget , serviceIPPort )
321
- }
322
- commands := []func () string {
323
- // verify service from node
324
- func () string {
325
- cmd := "set -e; " + buildCommand ("wget -q --timeout=0.2 --tries=1 -O -" )
326
- framework .Logf ("Executing cmd %q on host %v" , cmd , host )
327
- result , err := e2essh .SSH (cmd , host , framework .TestContext .Provider )
328
- if err != nil || result .Code != 0 {
329
- e2essh .LogResult (result )
330
- framework .Logf ("error while SSH-ing to node: %v" , err )
331
- }
332
- return result .Stdout
333
- },
334
- // verify service from pod
335
- func () string {
336
- cmd := buildCommand ("wget -q -T 1 -O -" )
337
- framework .Logf ("Executing cmd %q in pod %v/%v" , cmd , ns , execPod .Name )
338
- // TODO: Use exec-over-http via the netexec pod instead of kubectl exec.
339
- output , err := framework .RunHostCmd (ns , execPod .Name , cmd )
340
- if err != nil {
341
- framework .Logf ("error while kubectl execing %q in pod %v/%v: %v\n Output: %v" , cmd , ns , execPod .Name , err , output )
342
- }
343
- return output
344
- },
324
+ cmd := fmt .Sprintf ("for i in $(seq 1 %d); do %s http://%s 2>&1 || true; echo; done" ,
325
+ 50 * len (expectedPods ), wgetCmd , serviceIPPort )
326
+ framework .Logf ("Executing cmd %q in pod %v/%v" , cmd , ns , podName )
327
+ // TODO: Use exec-over-http via the netexec pod instead of kubectl exec.
328
+ output , err := framework .RunHostCmd (ns , podName , cmd )
329
+ if err != nil {
330
+ framework .Logf ("error while kubectl execing %q in pod %v/%v: %v\n Output: %v" , cmd , ns , podName , err , output )
331
+ }
332
+ return output
345
333
}
346
334
347
335
expectedEndpoints := sets .NewString (expectedPods ... )
348
336
ginkgo .By (fmt .Sprintf ("verifying service has %d reachable backends" , len (expectedPods )))
349
- for _ , cmdFunc := range commands {
337
+ for _ , podName := range [] string { hostExecPod . Name , execPod . Name } {
350
338
passed := false
351
339
gotEndpoints := sets .NewString ()
352
340
353
341
// Retry cmdFunc for a while
354
342
for start := time .Now (); time .Since (start ) < e2eservice .KubeProxyLagTimeout ; time .Sleep (5 * time .Second ) {
355
- for _ , endpoint := range strings .Split (cmdFunc (), "\n " ) {
343
+ for _ , endpoint := range strings .Split (cmdFunc (podName ), "\n " ) {
356
344
trimmedEp := strings .TrimSpace (endpoint )
357
345
if trimmedEp != "" {
358
346
gotEndpoints .Insert (trimmedEp )
@@ -384,25 +372,32 @@ func verifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expect
384
372
}
385
373
386
374
// verifyServeHostnameServiceDown verifies that the given service isn't served.
387
- func verifyServeHostnameServiceDown (c clientset.Interface , host string , serviceIP string , servicePort int ) error {
375
+ func verifyServeHostnameServiceDown (c clientset.Interface , ns string , serviceIP string , servicePort int ) error {
376
+ // verify from host network
377
+ hostExecPod := launchHostExecPod (c , ns , "verify-service-down-host-exec-pod" )
378
+
379
+ defer func () {
380
+ e2epod .DeletePodOrFail (c , ns , hostExecPod .Name )
381
+ }()
382
+
388
383
ipPort := net .JoinHostPort (serviceIP , strconv .Itoa (servicePort ))
389
384
// The current versions of curl included in CentOS and RHEL distros
390
385
// misinterpret square brackets around IPv6 as globbing, so use the -g
391
386
// argument to disable globbing to handle the IPv6 case.
392
387
command := fmt .Sprintf (
393
- "curl -g -s --connect-timeout 2 http://%s && exit 99 " , ipPort )
388
+ "curl -g -s --connect-timeout 2 http://%s && echo service-down-failed " , ipPort )
394
389
395
- for start := time .Now (); time .Since (start ) < time . Minute ; time .Sleep (5 * time .Second ) {
396
- result , err := e2essh . SSH ( command , host , framework . TestContext . Provider )
390
+ for start := time .Now (); time .Since (start ) < e2eservice . KubeProxyLagTimeout ; time .Sleep (5 * time .Second ) {
391
+ output , err := framework . RunHostCmd ( ns , hostExecPod . Name , command )
397
392
if err != nil {
398
- e2essh .LogResult (result )
399
- framework .Logf ("error while SSH-ing to node: %v" , err )
393
+ framework .Logf ("error while kubectl execing %q in pod %v/%v: %v\n Output: %v" , command , ns , hostExecPod .Name , err , output )
400
394
}
401
- if result . Code != 99 {
395
+ if ! strings . Contains ( output , "service-down-failed" ) {
402
396
return nil
403
397
}
404
398
framework .Logf ("service still alive - still waiting" )
405
399
}
400
+
406
401
return fmt .Errorf ("waiting for service to be down timed out" )
407
402
}
408
403
@@ -1060,12 +1055,6 @@ var _ = SIGDescribe("Services", func() {
1060
1055
})
1061
1056
1062
1057
ginkgo .It ("should be able to up and down services" , func () {
1063
- // TODO: use the ServiceTestJig here
1064
- // this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
1065
- e2eskipper .SkipUnlessProviderIs (framework .ProvidersWithSSH ... )
1066
- // this test does not work if the Node does not support SSH Key
1067
- e2eskipper .SkipUnlessSSHKeyPresent ()
1068
-
1069
1058
ns := f .Namespace .Name
1070
1059
numPods , servicePort := 3 , defaultServeHostnameServicePort
1071
1060
@@ -1080,27 +1069,20 @@ var _ = SIGDescribe("Services", func() {
1080
1069
podNames2 , svc2IP , err := StartServeHostnameService (cs , getServeHostnameService (svc2 ), ns , numPods )
1081
1070
framework .ExpectNoError (err , "failed to create replication controller with service: %s in the namespace: %s" , svc2 , ns )
1082
1071
1083
- hosts , err := e2essh .NodeSSHHosts (cs )
1084
- framework .ExpectNoError (err , "failed to find external/internal IPs for every node" )
1085
- if len (hosts ) == 0 {
1086
- framework .Failf ("No ssh-able nodes" )
1087
- }
1088
- host := hosts [0 ]
1089
-
1090
1072
ginkgo .By ("verifying service " + svc1 + " is up" )
1091
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podNames1 , svc1IP , servicePort ))
1073
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podNames1 , svc1IP , servicePort ))
1092
1074
1093
1075
ginkgo .By ("verifying service " + svc2 + " is up" )
1094
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podNames2 , svc2IP , servicePort ))
1076
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podNames2 , svc2IP , servicePort ))
1095
1077
1096
1078
// Stop service 1 and make sure it is gone.
1097
1079
ginkgo .By ("stopping service " + svc1 )
1098
1080
framework .ExpectNoError (StopServeHostnameService (f .ClientSet , ns , svc1 ))
1099
1081
1100
1082
ginkgo .By ("verifying service " + svc1 + " is not up" )
1101
- framework .ExpectNoError (verifyServeHostnameServiceDown (cs , host , svc1IP , servicePort ))
1083
+ framework .ExpectNoError (verifyServeHostnameServiceDown (cs , ns , svc1IP , servicePort ))
1102
1084
ginkgo .By ("verifying service " + svc2 + " is still up" )
1103
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podNames2 , svc2IP , servicePort ))
1085
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podNames2 , svc2IP , servicePort ))
1104
1086
1105
1087
// Start another service and verify both are up.
1106
1088
ginkgo .By ("creating service " + svc3 + " in namespace " + ns )
@@ -1112,10 +1094,10 @@ var _ = SIGDescribe("Services", func() {
1112
1094
}
1113
1095
1114
1096
ginkgo .By ("verifying service " + svc2 + " is still up" )
1115
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podNames2 , svc2IP , servicePort ))
1097
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podNames2 , svc2IP , servicePort ))
1116
1098
1117
1099
ginkgo .By ("verifying service " + svc3 + " is up" )
1118
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podNames3 , svc3IP , servicePort ))
1100
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podNames3 , svc3IP , servicePort ))
1119
1101
})
1120
1102
1121
1103
ginkgo .It ("should work after restarting kube-proxy [Disruptive]" , func () {
@@ -1152,15 +1134,15 @@ var _ = SIGDescribe("Services", func() {
1152
1134
}
1153
1135
host := hosts [0 ]
1154
1136
1155
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podNames1 , svc1IP , servicePort ))
1156
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podNames2 , svc2IP , servicePort ))
1137
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podNames1 , svc1IP , servicePort ))
1138
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podNames2 , svc2IP , servicePort ))
1157
1139
1158
1140
ginkgo .By (fmt .Sprintf ("Restarting kube-proxy on %v" , host ))
1159
1141
if err := restartKubeProxy (host ); err != nil {
1160
1142
framework .Failf ("error restarting kube-proxy: %v" , err )
1161
1143
}
1162
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podNames1 , svc1IP , servicePort ))
1163
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podNames2 , svc2IP , servicePort ))
1144
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podNames1 , svc1IP , servicePort ))
1145
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podNames2 , svc2IP , servicePort ))
1164
1146
})
1165
1147
1166
1148
ginkgo .It ("should work after restarting apiserver [Disruptive]" , func () {
@@ -1180,14 +1162,7 @@ var _ = SIGDescribe("Services", func() {
1180
1162
podNames1 , svc1IP , err := StartServeHostnameService (cs , getServeHostnameService (svc1 ), ns , numPods )
1181
1163
framework .ExpectNoError (err , "failed to create replication controller with service: %s in the namespace: %s" , svc1 , ns )
1182
1164
1183
- hosts , err := e2essh .NodeSSHHosts (cs )
1184
- framework .ExpectNoError (err , "failed to find external/internal IPs for every node" )
1185
- if len (hosts ) == 0 {
1186
- framework .Failf ("No ssh-able nodes" )
1187
- }
1188
- host := hosts [0 ]
1189
-
1190
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podNames1 , svc1IP , servicePort ))
1165
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podNames1 , svc1IP , servicePort ))
1191
1166
1192
1167
// Restart apiserver
1193
1168
ginkgo .By ("Restarting apiserver" )
@@ -1198,7 +1173,7 @@ var _ = SIGDescribe("Services", func() {
1198
1173
if err := waitForApiserverUp (cs ); err != nil {
1199
1174
framework .Failf ("error while waiting for apiserver up: %v" , err )
1200
1175
}
1201
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podNames1 , svc1IP , servicePort ))
1176
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podNames1 , svc1IP , servicePort ))
1202
1177
1203
1178
// Create a new service and check if it's not reusing IP.
1204
1179
defer func () {
@@ -1210,8 +1185,8 @@ var _ = SIGDescribe("Services", func() {
1210
1185
if svc1IP == svc2IP {
1211
1186
framework .Failf ("VIPs conflict: %v" , svc1IP )
1212
1187
}
1213
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podNames1 , svc1IP , servicePort ))
1214
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podNames2 , svc2IP , servicePort ))
1188
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podNames1 , svc1IP , servicePort ))
1189
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podNames2 , svc2IP , servicePort ))
1215
1190
})
1216
1191
1217
1192
/*
@@ -2590,11 +2565,6 @@ var _ = SIGDescribe("Services", func() {
2590
2565
})
2591
2566
2592
2567
ginkgo .It ("should implement service.kubernetes.io/service-proxy-name" , func () {
2593
- // this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
2594
- e2eskipper .SkipUnlessProviderIs (framework .ProvidersWithSSH ... )
2595
- // this test does not work if the Node does not support SSH Key
2596
- e2eskipper .SkipUnlessSSHKeyPresent ()
2597
-
2598
2568
ns := f .Namespace .Name
2599
2569
numPods , servicePort := 3 , defaultServeHostnameServicePort
2600
2570
serviceProxyNameLabels := map [string ]string {"service.kubernetes.io/service-proxy-name" : "foo-bar" }
@@ -2617,18 +2587,11 @@ var _ = SIGDescribe("Services", func() {
2617
2587
2618
2588
jig := e2eservice .NewTestJig (cs , ns , svcToggled .ObjectMeta .Name )
2619
2589
2620
- hosts , err := e2essh .NodeSSHHosts (cs )
2621
- framework .ExpectNoError (err , "failed to find external/internal IPs for every node" )
2622
- if len (hosts ) == 0 {
2623
- framework .Failf ("No ssh-able nodes" )
2624
- }
2625
- host := hosts [0 ]
2626
-
2627
2590
ginkgo .By ("verifying service is up" )
2628
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podToggledNames , svcToggledIP , servicePort ))
2591
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podToggledNames , svcToggledIP , servicePort ))
2629
2592
2630
2593
ginkgo .By ("verifying service-disabled is not up" )
2631
- framework .ExpectNoError (verifyServeHostnameServiceDown (cs , host , svcDisabledIP , servicePort ))
2594
+ framework .ExpectNoError (verifyServeHostnameServiceDown (cs , ns , svcDisabledIP , servicePort ))
2632
2595
2633
2596
ginkgo .By ("adding service-proxy-name label" )
2634
2597
_ , err = jig .UpdateService (func (svc * v1.Service ) {
@@ -2637,7 +2600,7 @@ var _ = SIGDescribe("Services", func() {
2637
2600
framework .ExpectNoError (err )
2638
2601
2639
2602
ginkgo .By ("verifying service is not up" )
2640
- framework .ExpectNoError (verifyServeHostnameServiceDown (cs , host , svcToggledIP , servicePort ))
2603
+ framework .ExpectNoError (verifyServeHostnameServiceDown (cs , ns , svcToggledIP , servicePort ))
2641
2604
2642
2605
ginkgo .By ("removing service-proxy-name annotation" )
2643
2606
_ , err = jig .UpdateService (func (svc * v1.Service ) {
@@ -2646,18 +2609,13 @@ var _ = SIGDescribe("Services", func() {
2646
2609
framework .ExpectNoError (err )
2647
2610
2648
2611
ginkgo .By ("verifying service is up" )
2649
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podToggledNames , svcToggledIP , servicePort ))
2612
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podToggledNames , svcToggledIP , servicePort ))
2650
2613
2651
2614
ginkgo .By ("verifying service-disabled is still not up" )
2652
- framework .ExpectNoError (verifyServeHostnameServiceDown (cs , host , svcDisabledIP , servicePort ))
2615
+ framework .ExpectNoError (verifyServeHostnameServiceDown (cs , ns , svcDisabledIP , servicePort ))
2653
2616
})
2654
2617
2655
2618
ginkgo .It ("should implement service.kubernetes.io/headless" , func () {
2656
- // this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
2657
- e2eskipper .SkipUnlessProviderIs (framework .ProvidersWithSSH ... )
2658
- // this test does not work if the Node does not support SSH Key
2659
- e2eskipper .SkipUnlessSSHKeyPresent ()
2660
-
2661
2619
ns := f .Namespace .Name
2662
2620
numPods , servicePort := 3 , defaultServeHostnameServicePort
2663
2621
serviceHeadlessLabels := map [string ]string {v1 .IsHeadlessService : "" }
@@ -2681,18 +2639,11 @@ var _ = SIGDescribe("Services", func() {
2681
2639
2682
2640
jig := e2eservice .NewTestJig (cs , ns , svcHeadlessToggled .ObjectMeta .Name )
2683
2641
2684
- hosts , err := e2essh .NodeSSHHosts (cs )
2685
- framework .ExpectNoError (err , "failed to find external/internal IPs for every node" )
2686
- if len (hosts ) == 0 {
2687
- framework .Failf ("No ssh-able nodes" )
2688
- }
2689
- host := hosts [0 ]
2690
-
2691
2642
ginkgo .By ("verifying service is up" )
2692
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podHeadlessToggledNames , svcHeadlessToggledIP , servicePort ))
2643
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podHeadlessToggledNames , svcHeadlessToggledIP , servicePort ))
2693
2644
2694
2645
ginkgo .By ("verifying service-headless is not up" )
2695
- framework .ExpectNoError (verifyServeHostnameServiceDown (cs , host , svcHeadlessIP , servicePort ))
2646
+ framework .ExpectNoError (verifyServeHostnameServiceDown (cs , ns , svcHeadlessIP , servicePort ))
2696
2647
2697
2648
ginkgo .By ("adding service.kubernetes.io/headless label" )
2698
2649
_ , err = jig .UpdateService (func (svc * v1.Service ) {
@@ -2701,7 +2652,7 @@ var _ = SIGDescribe("Services", func() {
2701
2652
framework .ExpectNoError (err )
2702
2653
2703
2654
ginkgo .By ("verifying service is not up" )
2704
- framework .ExpectNoError (verifyServeHostnameServiceDown (cs , host , svcHeadlessToggledIP , servicePort ))
2655
+ framework .ExpectNoError (verifyServeHostnameServiceDown (cs , ns , svcHeadlessToggledIP , servicePort ))
2705
2656
2706
2657
ginkgo .By ("removing service.kubernetes.io/headless annotation" )
2707
2658
_ , err = jig .UpdateService (func (svc * v1.Service ) {
@@ -2710,10 +2661,10 @@ var _ = SIGDescribe("Services", func() {
2710
2661
framework .ExpectNoError (err )
2711
2662
2712
2663
ginkgo .By ("verifying service is up" )
2713
- framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , host , podHeadlessToggledNames , svcHeadlessToggledIP , servicePort ))
2664
+ framework .ExpectNoError (verifyServeHostnameServiceUp (cs , ns , podHeadlessToggledNames , svcHeadlessToggledIP , servicePort ))
2714
2665
2715
2666
ginkgo .By ("verifying service-headless is still not up" )
2716
- framework .ExpectNoError (verifyServeHostnameServiceDown (cs , host , svcHeadlessIP , servicePort ))
2667
+ framework .ExpectNoError (verifyServeHostnameServiceDown (cs , ns , svcHeadlessIP , servicePort ))
2717
2668
})
2718
2669
2719
2670
ginkgo .It ("should be rejected when no endpoints exist" , func () {
@@ -3621,6 +3572,7 @@ func createPodOrFail(c clientset.Interface, ns, name string, labels map[string]s
3621
3572
// launchHostExecPod launches a hostexec pod in the given namespace and waits
3622
3573
// until it's Running
3623
3574
func launchHostExecPod (client clientset.Interface , ns , name string ) * v1.Pod {
3575
+ framework .Logf ("Creating new host exec pod" )
3624
3576
hostExecPod := e2epod .NewExecPodSpec (ns , name , true )
3625
3577
pod , err := client .CoreV1 ().Pods (ns ).Create (context .TODO (), hostExecPod , metav1.CreateOptions {})
3626
3578
framework .ExpectNoError (err )
0 commit comments