Skip to content

Commit 9646d59

Browse files
authored
Merge pull request kubernetes#94822 from JornShen/replace_e2essh_e2e_service_tests
Replace e2essh on e2e service tests
2 parents cd4ee6b + 40474d7 commit 9646d59

File tree

2 files changed

+64
-112
lines changed

2 files changed

+64
-112
lines changed

test/e2e/network/networking.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -491,7 +491,7 @@ var _ = SIGDescribe("Networking", func() {
491491
}
492492

493493
ginkgo.By("verifying that kube-proxy rules are eventually recreated")
494-
framework.ExpectNoError(verifyServeHostnameServiceUp(f.ClientSet, ns, host, podNames, svcIP, servicePort))
494+
framework.ExpectNoError(verifyServeHostnameServiceUp(f.ClientSet, ns, podNames, svcIP, servicePort))
495495

496496
ginkgo.By("verifying that kubelet rules are eventually recreated")
497497
err = utilwait.PollImmediate(framework.Poll, framework.RestartNodeReadyAgainTimeout, func() (bool, error) {

test/e2e/network/service.go

Lines changed: 63 additions & 111 deletions
Original file line numberDiff line numberDiff line change
@@ -303,56 +303,44 @@ func StopServeHostnameService(clientset clientset.Interface, ns, name string) er
303303
}
304304

305305
// verifyServeHostnameServiceUp wgets the given serviceIP:servicePort from the
306-
// given host and from within a pod. The host is expected to be an SSH-able node
307-
// in the cluster. Each pod in the service is expected to echo its name. These
308-
// names are compared with the given expectedPods list after a sort | uniq.
309-
func verifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expectedPods []string, serviceIP string, servicePort int) error {
310-
execPod := e2epod.CreateExecPodOrFail(c, ns, "execpod-", nil)
306+
// the host exec pod of host network type and from the exec pod of container network type.
307+
// Each pod in the service is expected to echo its name. These names are compared with the
308+
// given expectedPods list after a sort | uniq.
309+
func verifyServeHostnameServiceUp(c clientset.Interface, ns string, expectedPods []string, serviceIP string, servicePort int) error {
310+
// to verify from host network
311+
hostExecPod := launchHostExecPod(c, ns, "verify-service-up-host-exec-pod")
312+
313+
// to verify from container's network
314+
execPod := e2epod.CreateExecPodOrFail(c, ns, "verify-service-up-exec-pod-", nil)
311315
defer func() {
316+
e2epod.DeletePodOrFail(c, ns, hostExecPod.Name)
312317
e2epod.DeletePodOrFail(c, ns, execPod.Name)
313318
}()
314319

315-
// Loop a bunch of times - the proxy is randomized, so we want a good
316-
// chance of hitting each backend at least once.
317-
buildCommand := func(wget string) string {
320+
// verify service from pod
321+
cmdFunc := func(podName string) string {
322+
wgetCmd := "wget -q -T 1 -O -"
318323
serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
319-
return fmt.Sprintf("for i in $(seq 1 %d); do %s http://%s 2>&1 || true; echo; done",
320-
50*len(expectedPods), wget, serviceIPPort)
321-
}
322-
commands := []func() string{
323-
// verify service from node
324-
func() string {
325-
cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -")
326-
framework.Logf("Executing cmd %q on host %v", cmd, host)
327-
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
328-
if err != nil || result.Code != 0 {
329-
e2essh.LogResult(result)
330-
framework.Logf("error while SSH-ing to node: %v", err)
331-
}
332-
return result.Stdout
333-
},
334-
// verify service from pod
335-
func() string {
336-
cmd := buildCommand("wget -q -T 1 -O -")
337-
framework.Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPod.Name)
338-
// TODO: Use exec-over-http via the netexec pod instead of kubectl exec.
339-
output, err := framework.RunHostCmd(ns, execPod.Name, cmd)
340-
if err != nil {
341-
framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPod.Name, err, output)
342-
}
343-
return output
344-
},
324+
cmd := fmt.Sprintf("for i in $(seq 1 %d); do %s http://%s 2>&1 || true; echo; done",
325+
50*len(expectedPods), wgetCmd, serviceIPPort)
326+
framework.Logf("Executing cmd %q in pod %v/%v", cmd, ns, podName)
327+
// TODO: Use exec-over-http via the netexec pod instead of kubectl exec.
328+
output, err := framework.RunHostCmd(ns, podName, cmd)
329+
if err != nil {
330+
framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, podName, err, output)
331+
}
332+
return output
345333
}
346334

347335
expectedEndpoints := sets.NewString(expectedPods...)
348336
ginkgo.By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods)))
349-
for _, cmdFunc := range commands {
337+
for _, podName := range []string{hostExecPod.Name, execPod.Name} {
350338
passed := false
351339
gotEndpoints := sets.NewString()
352340

353341
// Retry cmdFunc for a while
354342
for start := time.Now(); time.Since(start) < e2eservice.KubeProxyLagTimeout; time.Sleep(5 * time.Second) {
355-
for _, endpoint := range strings.Split(cmdFunc(), "\n") {
343+
for _, endpoint := range strings.Split(cmdFunc(podName), "\n") {
356344
trimmedEp := strings.TrimSpace(endpoint)
357345
if trimmedEp != "" {
358346
gotEndpoints.Insert(trimmedEp)
@@ -384,25 +372,32 @@ func verifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expect
384372
}
385373

386374
// verifyServeHostnameServiceDown verifies that the given service isn't served.
387-
func verifyServeHostnameServiceDown(c clientset.Interface, host string, serviceIP string, servicePort int) error {
375+
func verifyServeHostnameServiceDown(c clientset.Interface, ns string, serviceIP string, servicePort int) error {
376+
// verify from host network
377+
hostExecPod := launchHostExecPod(c, ns, "verify-service-down-host-exec-pod")
378+
379+
defer func() {
380+
e2epod.DeletePodOrFail(c, ns, hostExecPod.Name)
381+
}()
382+
388383
ipPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
389384
// The current versions of curl included in CentOS and RHEL distros
390385
// misinterpret square brackets around IPv6 as globbing, so use the -g
391386
// argument to disable globbing to handle the IPv6 case.
392387
command := fmt.Sprintf(
393-
"curl -g -s --connect-timeout 2 http://%s && exit 99", ipPort)
388+
"curl -g -s --connect-timeout 2 http://%s && echo service-down-failed", ipPort)
394389

395-
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
396-
result, err := e2essh.SSH(command, host, framework.TestContext.Provider)
390+
for start := time.Now(); time.Since(start) < e2eservice.KubeProxyLagTimeout; time.Sleep(5 * time.Second) {
391+
output, err := framework.RunHostCmd(ns, hostExecPod.Name, command)
397392
if err != nil {
398-
e2essh.LogResult(result)
399-
framework.Logf("error while SSH-ing to node: %v", err)
393+
framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", command, ns, hostExecPod.Name, err, output)
400394
}
401-
if result.Code != 99 {
395+
if !strings.Contains(output, "service-down-failed") {
402396
return nil
403397
}
404398
framework.Logf("service still alive - still waiting")
405399
}
400+
406401
return fmt.Errorf("waiting for service to be down timed out")
407402
}
408403

@@ -1060,12 +1055,6 @@ var _ = SIGDescribe("Services", func() {
10601055
})
10611056

10621057
ginkgo.It("should be able to up and down services", func() {
1063-
// TODO: use the ServiceTestJig here
1064-
// this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
1065-
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
1066-
// this test does not work if the Node does not support SSH Key
1067-
e2eskipper.SkipUnlessSSHKeyPresent()
1068-
10691058
ns := f.Namespace.Name
10701059
numPods, servicePort := 3, defaultServeHostnameServicePort
10711060

@@ -1080,27 +1069,20 @@ var _ = SIGDescribe("Services", func() {
10801069
podNames2, svc2IP, err := StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods)
10811070
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns)
10821071

1083-
hosts, err := e2essh.NodeSSHHosts(cs)
1084-
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
1085-
if len(hosts) == 0 {
1086-
framework.Failf("No ssh-able nodes")
1087-
}
1088-
host := hosts[0]
1089-
10901072
ginkgo.By("verifying service " + svc1 + " is up")
1091-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
1073+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort))
10921074

10931075
ginkgo.By("verifying service " + svc2 + " is up")
1094-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
1076+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort))
10951077

10961078
// Stop service 1 and make sure it is gone.
10971079
ginkgo.By("stopping service " + svc1)
10981080
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc1))
10991081

11001082
ginkgo.By("verifying service " + svc1 + " is not up")
1101-
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svc1IP, servicePort))
1083+
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svc1IP, servicePort))
11021084
ginkgo.By("verifying service " + svc2 + " is still up")
1103-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
1085+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort))
11041086

11051087
// Start another service and verify both are up.
11061088
ginkgo.By("creating service " + svc3 + " in namespace " + ns)
@@ -1112,10 +1094,10 @@ var _ = SIGDescribe("Services", func() {
11121094
}
11131095

11141096
ginkgo.By("verifying service " + svc2 + " is still up")
1115-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
1097+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort))
11161098

11171099
ginkgo.By("verifying service " + svc3 + " is up")
1118-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames3, svc3IP, servicePort))
1100+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames3, svc3IP, servicePort))
11191101
})
11201102

11211103
ginkgo.It("should work after restarting kube-proxy [Disruptive]", func() {
@@ -1152,15 +1134,15 @@ var _ = SIGDescribe("Services", func() {
11521134
}
11531135
host := hosts[0]
11541136

1155-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
1156-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
1137+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort))
1138+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort))
11571139

11581140
ginkgo.By(fmt.Sprintf("Restarting kube-proxy on %v", host))
11591141
if err := restartKubeProxy(host); err != nil {
11601142
framework.Failf("error restarting kube-proxy: %v", err)
11611143
}
1162-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
1163-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
1144+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort))
1145+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort))
11641146
})
11651147

11661148
ginkgo.It("should work after restarting apiserver [Disruptive]", func() {
@@ -1180,14 +1162,7 @@ var _ = SIGDescribe("Services", func() {
11801162
podNames1, svc1IP, err := StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods)
11811163
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns)
11821164

1183-
hosts, err := e2essh.NodeSSHHosts(cs)
1184-
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
1185-
if len(hosts) == 0 {
1186-
framework.Failf("No ssh-able nodes")
1187-
}
1188-
host := hosts[0]
1189-
1190-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
1165+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort))
11911166

11921167
// Restart apiserver
11931168
ginkgo.By("Restarting apiserver")
@@ -1198,7 +1173,7 @@ var _ = SIGDescribe("Services", func() {
11981173
if err := waitForApiserverUp(cs); err != nil {
11991174
framework.Failf("error while waiting for apiserver up: %v", err)
12001175
}
1201-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
1176+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort))
12021177

12031178
// Create a new service and check if it's not reusing IP.
12041179
defer func() {
@@ -1210,8 +1185,8 @@ var _ = SIGDescribe("Services", func() {
12101185
if svc1IP == svc2IP {
12111186
framework.Failf("VIPs conflict: %v", svc1IP)
12121187
}
1213-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
1214-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
1188+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort))
1189+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort))
12151190
})
12161191

12171192
/*
@@ -2590,11 +2565,6 @@ var _ = SIGDescribe("Services", func() {
25902565
})
25912566

25922567
ginkgo.It("should implement service.kubernetes.io/service-proxy-name", func() {
2593-
// this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
2594-
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
2595-
// this test does not work if the Node does not support SSH Key
2596-
e2eskipper.SkipUnlessSSHKeyPresent()
2597-
25982568
ns := f.Namespace.Name
25992569
numPods, servicePort := 3, defaultServeHostnameServicePort
26002570
serviceProxyNameLabels := map[string]string{"service.kubernetes.io/service-proxy-name": "foo-bar"}
@@ -2617,18 +2587,11 @@ var _ = SIGDescribe("Services", func() {
26172587

26182588
jig := e2eservice.NewTestJig(cs, ns, svcToggled.ObjectMeta.Name)
26192589

2620-
hosts, err := e2essh.NodeSSHHosts(cs)
2621-
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
2622-
if len(hosts) == 0 {
2623-
framework.Failf("No ssh-able nodes")
2624-
}
2625-
host := hosts[0]
2626-
26272590
ginkgo.By("verifying service is up")
2628-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podToggledNames, svcToggledIP, servicePort))
2591+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podToggledNames, svcToggledIP, servicePort))
26292592

26302593
ginkgo.By("verifying service-disabled is not up")
2631-
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort))
2594+
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcDisabledIP, servicePort))
26322595

26332596
ginkgo.By("adding service-proxy-name label")
26342597
_, err = jig.UpdateService(func(svc *v1.Service) {
@@ -2637,7 +2600,7 @@ var _ = SIGDescribe("Services", func() {
26372600
framework.ExpectNoError(err)
26382601

26392602
ginkgo.By("verifying service is not up")
2640-
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svcToggledIP, servicePort))
2603+
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcToggledIP, servicePort))
26412604

26422605
ginkgo.By("removing service-proxy-name annotation")
26432606
_, err = jig.UpdateService(func(svc *v1.Service) {
@@ -2646,18 +2609,13 @@ var _ = SIGDescribe("Services", func() {
26462609
framework.ExpectNoError(err)
26472610

26482611
ginkgo.By("verifying service is up")
2649-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podToggledNames, svcToggledIP, servicePort))
2612+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podToggledNames, svcToggledIP, servicePort))
26502613

26512614
ginkgo.By("verifying service-disabled is still not up")
2652-
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort))
2615+
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcDisabledIP, servicePort))
26532616
})
26542617

26552618
ginkgo.It("should implement service.kubernetes.io/headless", func() {
2656-
// this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
2657-
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
2658-
// this test does not work if the Node does not support SSH Key
2659-
e2eskipper.SkipUnlessSSHKeyPresent()
2660-
26612619
ns := f.Namespace.Name
26622620
numPods, servicePort := 3, defaultServeHostnameServicePort
26632621
serviceHeadlessLabels := map[string]string{v1.IsHeadlessService: ""}
@@ -2681,18 +2639,11 @@ var _ = SIGDescribe("Services", func() {
26812639

26822640
jig := e2eservice.NewTestJig(cs, ns, svcHeadlessToggled.ObjectMeta.Name)
26832641

2684-
hosts, err := e2essh.NodeSSHHosts(cs)
2685-
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
2686-
if len(hosts) == 0 {
2687-
framework.Failf("No ssh-able nodes")
2688-
}
2689-
host := hosts[0]
2690-
26912642
ginkgo.By("verifying service is up")
2692-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podHeadlessToggledNames, svcHeadlessToggledIP, servicePort))
2643+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podHeadlessToggledNames, svcHeadlessToggledIP, servicePort))
26932644

26942645
ginkgo.By("verifying service-headless is not up")
2695-
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svcHeadlessIP, servicePort))
2646+
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcHeadlessIP, servicePort))
26962647

26972648
ginkgo.By("adding service.kubernetes.io/headless label")
26982649
_, err = jig.UpdateService(func(svc *v1.Service) {
@@ -2701,7 +2652,7 @@ var _ = SIGDescribe("Services", func() {
27012652
framework.ExpectNoError(err)
27022653

27032654
ginkgo.By("verifying service is not up")
2704-
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svcHeadlessToggledIP, servicePort))
2655+
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcHeadlessToggledIP, servicePort))
27052656

27062657
ginkgo.By("removing service.kubernetes.io/headless annotation")
27072658
_, err = jig.UpdateService(func(svc *v1.Service) {
@@ -2710,10 +2661,10 @@ var _ = SIGDescribe("Services", func() {
27102661
framework.ExpectNoError(err)
27112662

27122663
ginkgo.By("verifying service is up")
2713-
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podHeadlessToggledNames, svcHeadlessToggledIP, servicePort))
2664+
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podHeadlessToggledNames, svcHeadlessToggledIP, servicePort))
27142665

27152666
ginkgo.By("verifying service-headless is still not up")
2716-
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svcHeadlessIP, servicePort))
2667+
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcHeadlessIP, servicePort))
27172668
})
27182669

27192670
ginkgo.It("should be rejected when no endpoints exist", func() {
@@ -3621,6 +3572,7 @@ func createPodOrFail(c clientset.Interface, ns, name string, labels map[string]s
36213572
// launchHostExecPod launches a hostexec pod in the given namespace and waits
36223573
// until it's Running
36233574
func launchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod {
3575+
framework.Logf("Creating new host exec pod")
36243576
hostExecPod := e2epod.NewExecPodSpec(ns, name, true)
36253577
pod, err := client.CoreV1().Pods(ns).Create(context.TODO(), hostExecPod, metav1.CreateOptions{})
36263578
framework.ExpectNoError(err)

0 commit comments

Comments
 (0)