Skip to content

Commit a6e9953

Browse files
committed
Add test cases for Service with restartable init containers
* Add a test case to verify that specifying a port number as the target port for restartable init containers in Service * Add a failing test case to verify that specifying a named port as the target port for restartable init containers in Service
1 parent cf480a3 commit a6e9953

File tree

1 file changed

+108
-0
lines changed

1 file changed

+108
-0
lines changed

test/e2e/network/service.go

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4076,6 +4076,78 @@ var _ = common.SIGDescribe("Services", func() {
40764076
checkOneNodePort(hostExecPodNodeIP, true, v1.ServiceExternalTrafficPolicyLocal, deadline)
40774077
checkOneNodePort(thirdNodeIP, false, v1.ServiceExternalTrafficPolicyLocal, deadline)
40784078
})
4079+
4080+
ginkgo.It("should connect to the ports exposed by restartable init containers", func(ctx context.Context) {
4081+
serviceName := "sidecar-with-port"
4082+
ns := f.Namespace.Name
4083+
4084+
t := NewServerTest(cs, ns, serviceName)
4085+
defer func() {
4086+
defer ginkgo.GinkgoRecover()
4087+
errs := t.Cleanup()
4088+
if len(errs) != 0 {
4089+
framework.Failf("errors in cleanup: %v", errs)
4090+
}
4091+
}()
4092+
4093+
name := "sidecar-port"
4094+
port := int32(8080)
4095+
namedPort := "http-sidecar"
4096+
4097+
service := t.BuildServiceSpec()
4098+
service.Spec.Ports = []v1.ServicePort{
4099+
{
4100+
Name: namedPort,
4101+
Port: port,
4102+
TargetPort: intstr.FromInt(int(port)),
4103+
},
4104+
}
4105+
ports := []v1.ContainerPort{{Name: namedPort, ContainerPort: port, Protocol: v1.ProtocolTCP}}
4106+
args := []string{"netexec", fmt.Sprintf("--http-port=%d", port)}
4107+
createPodWithRestartableInitContainerOrFail(ctx, f, ns, name, t.Labels, ports, args...)
4108+
4109+
ginkgo.By(fmt.Sprintf("creating Service %v with selectors %v", service.Name, service.Spec.Selector))
4110+
service, err := t.CreateService(service)
4111+
framework.ExpectNoError(err)
4112+
4113+
checkServiceReachabilityFromExecPod(ctx, f.ClientSet, ns, service.Name, service.Spec.ClusterIP, port)
4114+
})
4115+
4116+
ginkgo.It("should connect to the named ports exposed by restartable init containers", func(ctx context.Context) {
4117+
serviceName := "sidecar-with-named-port"
4118+
ns := f.Namespace.Name
4119+
4120+
t := NewServerTest(cs, ns, serviceName)
4121+
defer func() {
4122+
defer ginkgo.GinkgoRecover()
4123+
errs := t.Cleanup()
4124+
if len(errs) != 0 {
4125+
framework.Failf("errors in cleanup: %v", errs)
4126+
}
4127+
}()
4128+
4129+
name := "sidecar-port"
4130+
port := int32(8080)
4131+
namedPort := "http-sidecar"
4132+
4133+
service := t.BuildServiceSpec()
4134+
service.Spec.Ports = []v1.ServicePort{
4135+
{
4136+
Name: namedPort,
4137+
Port: port,
4138+
TargetPort: intstr.FromString(namedPort),
4139+
},
4140+
}
4141+
ports := []v1.ContainerPort{{Name: namedPort, ContainerPort: port, Protocol: v1.ProtocolTCP}}
4142+
args := []string{"netexec", fmt.Sprintf("--http-port=%d", port)}
4143+
createPodWithRestartableInitContainerOrFail(ctx, f, ns, name, t.Labels, ports, args...)
4144+
4145+
ginkgo.By(fmt.Sprintf("creating Service %v with selectors %v", service.Name, service.Spec.Selector))
4146+
service, err := t.CreateService(service)
4147+
framework.ExpectNoError(err)
4148+
4149+
checkServiceReachabilityFromExecPod(ctx, f.ClientSet, ns, service.Name, service.Spec.ClusterIP, port)
4150+
})
40794151
})
40804152

40814153
// execAffinityTestForSessionAffinityTimeout is a helper function that wrap the logic of
@@ -4325,6 +4397,18 @@ func createPodOrFail(ctx context.Context, f *framework.Framework, ns, name strin
43254397
e2epod.NewPodClient(f).CreateSync(ctx, pod)
43264398
}
43274399

4400+
// createPodWithRestartableInitContainerOrFail creates a pod with restartable init containers using the specified containerPorts.
4401+
func createPodWithRestartableInitContainerOrFail(ctx context.Context, f *framework.Framework, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort, args ...string) {
4402+
ginkgo.By(fmt.Sprintf("Creating pod %s with restartable init containers in namespace %s", name, ns))
4403+
pod := e2epod.NewAgnhostPod(ns, name, nil, nil, nil, "pause")
4404+
pod.ObjectMeta.Labels = labels
4405+
restartPolicyAlways := v1.ContainerRestartPolicyAlways
4406+
init := e2epod.NewAgnhostContainer(name, nil, containerPorts, args...)
4407+
init.RestartPolicy = &restartPolicyAlways
4408+
pod.Spec.InitContainers = []v1.Container{init}
4409+
e2epod.NewPodClient(f).CreateSync(ctx, pod)
4410+
}
4411+
43284412
// launchHostExecPod launches a hostexec pod in the given namespace and waits
43294413
// until it's Running. If avoidNode is non-nil, it will ensure that the pod doesn't
43304414
// land on that node.
@@ -4375,6 +4459,30 @@ func checkReachabilityFromPod(ctx context.Context, expectToBeReachable bool, tim
43754459
framework.ExpectNoError(err)
43764460
}
43774461

4462+
// checkServiceReachabilityFromExecPod creates a dedicated client pod, executes into it,
4463+
// and checks reachability to the specified target host and port.
4464+
func checkServiceReachabilityFromExecPod(ctx context.Context, client clientset.Interface, namespace, name, clusterIP string, port int32) {
4465+
// We avoid relying on DNS lookup with the service name here because
4466+
// we only want to test whether the named port is accessible from the service.
4467+
serverHost := net.JoinHostPort(clusterIP, strconv.Itoa(int(port)))
4468+
ginkgo.By("creating a dedicated client to send request to the http server " + serverHost)
4469+
execPod := e2epod.CreateExecPodOrFail(ctx, client, namespace, "execpod-", nil)
4470+
execPodName := execPod.Name
4471+
cmd := fmt.Sprintf("curl -q -s --connect-timeout 2 http://%s/", serverHost)
4472+
var stdout string
4473+
if pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, e2eservice.KubeProxyLagTimeout, true, func(ctx context.Context) (bool, error) {
4474+
var err error
4475+
stdout, err = e2eoutput.RunHostCmd(namespace, execPodName, cmd)
4476+
if err != nil {
4477+
framework.Logf("error trying to connect to service %s: %v, ... retrying", name, err)
4478+
return false, nil
4479+
}
4480+
return true, nil
4481+
}); pollErr != nil {
4482+
framework.Failf("connection to the Service %v within %v should be succeeded, stdout: %v", name, e2eservice.KubeProxyLagTimeout, stdout)
4483+
}
4484+
}
4485+
43784486
func validatePorts(ep, expectedEndpoints portsByPodUID) error {
43794487
if len(ep) != len(expectedEndpoints) {
43804488
// should not happen because we check this condition before

0 commit comments

Comments
 (0)