Skip to content

Commit beeb1d2

Browse files
authored
Merge pull request kubernetes#128850 from toVersus/fix/sidecar-container-named-port
Fix named ports of restartable init containers don't propagate to EndpointSlice
2 parents 78ad9a7 + 4e21f53 commit beeb1d2

File tree

3 files changed

+202
-0
lines changed

3 files changed

+202
-0
lines changed

staging/src/k8s.io/endpointslice/utils.go

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -392,6 +392,17 @@ func findPort(pod *v1.Pod, svcPort *v1.ServicePort) (int, error) {
392392
}
393393
}
394394
}
395+
// also support sidecar container (initContainer with restartPolicy=Always)
396+
for _, container := range pod.Spec.InitContainers {
397+
if container.RestartPolicy == nil || *container.RestartPolicy != v1.ContainerRestartPolicyAlways {
398+
continue
399+
}
400+
for _, port := range container.Ports {
401+
if port.Name == name && port.Protocol == svcPort.Protocol {
402+
return int(port.ContainerPort), nil
403+
}
404+
}
405+
}
395406
case intstr.Int:
396407
return portName.IntValue(), nil
397408
}

staging/src/k8s.io/endpointslice/utils_test.go

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -515,6 +515,7 @@ func TestServiceControllerKey(t *testing.T) {
515515

516516
func TestGetEndpointPorts(t *testing.T) {
517517
protoTCP := v1.ProtocolTCP
518+
restartPolicyAlways := v1.ContainerRestartPolicyAlways
518519

519520
testCases := map[string]struct {
520521
service *v1.Service
@@ -585,6 +586,88 @@ func TestGetEndpointPorts(t *testing.T) {
585586
AppProtocol: pointer.String("https"),
586587
}},
587588
},
589+
"service with named port for restartable init container": {
590+
service: &v1.Service{
591+
Spec: v1.ServiceSpec{
592+
Ports: []v1.ServicePort{{
593+
Name: "http-sidecar",
594+
Port: 8080,
595+
TargetPort: intstr.FromInt32(8080),
596+
Protocol: protoTCP,
597+
}, {
598+
Name: "http",
599+
Port: 8090,
600+
TargetPort: intstr.FromString("http"),
601+
Protocol: protoTCP,
602+
}},
603+
},
604+
},
605+
pod: &v1.Pod{
606+
Spec: v1.PodSpec{
607+
InitContainers: []v1.Container{{
608+
Ports: []v1.ContainerPort{{
609+
Name: "http-sidecar",
610+
ContainerPort: int32(8080),
611+
Protocol: protoTCP,
612+
}},
613+
RestartPolicy: &restartPolicyAlways,
614+
}},
615+
Containers: []v1.Container{{
616+
Ports: []v1.ContainerPort{{
617+
Name: "http",
618+
ContainerPort: int32(8090),
619+
Protocol: protoTCP,
620+
}},
621+
}},
622+
},
623+
},
624+
expectedPorts: []*discovery.EndpointPort{{
625+
Name: pointer.String("http-sidecar"),
626+
Port: pointer.Int32(8080),
627+
Protocol: &protoTCP,
628+
}, {
629+
Name: pointer.String("http"),
630+
Port: pointer.Int32(8090),
631+
Protocol: &protoTCP,
632+
}},
633+
},
634+
"service with same named port for regular and restartable init container": {
635+
service: &v1.Service{
636+
Spec: v1.ServiceSpec{
637+
Ports: []v1.ServicePort{
638+
{
639+
Name: "http",
640+
Port: 80,
641+
TargetPort: intstr.FromString("http"),
642+
Protocol: protoTCP,
643+
}},
644+
},
645+
},
646+
pod: &v1.Pod{
647+
Spec: v1.PodSpec{
648+
InitContainers: []v1.Container{{
649+
Ports: []v1.ContainerPort{{
650+
Name: "http",
651+
ContainerPort: int32(8080),
652+
Protocol: protoTCP,
653+
}},
654+
RestartPolicy: &restartPolicyAlways,
655+
}},
656+
Containers: []v1.Container{{
657+
Ports: []v1.ContainerPort{{
658+
Name: "http",
659+
ContainerPort: int32(8090),
660+
Protocol: protoTCP,
661+
}},
662+
}},
663+
},
664+
},
665+
expectedPorts: []*discovery.EndpointPort{{
666+
Name: pointer.String("http"),
667+
Port: pointer.Int32(8090),
668+
Protocol: &protoTCP,
669+
}},
670+
},
588671
}
589672

590673
for name, tc := range testCases {

test/e2e/network/service.go

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4133,6 +4133,78 @@ var _ = common.SIGDescribe("Services", func() {
41334133
checkOneNodePort(hostExecPodNodeIP, true, v1.ServiceExternalTrafficPolicyLocal, deadline)
41344134
checkOneNodePort(thirdNodeIP, false, v1.ServiceExternalTrafficPolicyLocal, deadline)
41354135
})
4136+
4137+
ginkgo.It("should connect to the ports exposed by restartable init containers", func(ctx context.Context) {
4138+
serviceName := "sidecar-with-port"
4139+
ns := f.Namespace.Name
4140+
4141+
t := NewServerTest(cs, ns, serviceName)
4142+
defer func() {
4143+
defer ginkgo.GinkgoRecover()
4144+
errs := t.Cleanup()
4145+
if len(errs) != 0 {
4146+
framework.Failf("errors in cleanup: %v", errs)
4147+
}
4148+
}()
4149+
4150+
name := "sidecar-port"
4151+
port := int32(8080)
4152+
namedPort := "http-sidecar"
4153+
4154+
service := t.BuildServiceSpec()
4155+
service.Spec.Ports = []v1.ServicePort{
4156+
{
4157+
Name: namedPort,
4158+
Port: port,
4159+
TargetPort: intstr.FromInt(int(port)),
4160+
},
4161+
}
4162+
ports := []v1.ContainerPort{{Name: namedPort, ContainerPort: port, Protocol: v1.ProtocolTCP}}
4163+
args := []string{"netexec", fmt.Sprintf("--http-port=%d", port)}
4164+
createPodWithRestartableInitContainerOrFail(ctx, f, ns, name, t.Labels, ports, args...)
4165+
4166+
ginkgo.By(fmt.Sprintf("creating Service %v with selectors %v", service.Name, service.Spec.Selector))
4167+
service, err := t.CreateService(service)
4168+
framework.ExpectNoError(err)
4169+
4170+
checkServiceReachabilityFromExecPod(ctx, f.ClientSet, ns, service.Name, service.Spec.ClusterIP, port)
4171+
})
4172+
4173+
ginkgo.It("should connect to the named ports exposed by restartable init containers", func(ctx context.Context) {
4174+
serviceName := "sidecar-with-named-port"
4175+
ns := f.Namespace.Name
4176+
4177+
t := NewServerTest(cs, ns, serviceName)
4178+
defer func() {
4179+
defer ginkgo.GinkgoRecover()
4180+
errs := t.Cleanup()
4181+
if len(errs) != 0 {
4182+
framework.Failf("errors in cleanup: %v", errs)
4183+
}
4184+
}()
4185+
4186+
name := "sidecar-port"
4187+
port := int32(8080)
4188+
namedPort := "http-sidecar"
4189+
4190+
service := t.BuildServiceSpec()
4191+
service.Spec.Ports = []v1.ServicePort{
4192+
{
4193+
Name: namedPort,
4194+
Port: port,
4195+
TargetPort: intstr.FromString(namedPort),
4196+
},
4197+
}
4198+
ports := []v1.ContainerPort{{Name: namedPort, ContainerPort: port, Protocol: v1.ProtocolTCP}}
4199+
args := []string{"netexec", fmt.Sprintf("--http-port=%d", port)}
4200+
createPodWithRestartableInitContainerOrFail(ctx, f, ns, name, t.Labels, ports, args...)
4201+
4202+
ginkgo.By(fmt.Sprintf("creating Service %v with selectors %v", service.Name, service.Spec.Selector))
4203+
service, err := t.CreateService(service)
4204+
framework.ExpectNoError(err)
4205+
4206+
checkServiceReachabilityFromExecPod(ctx, f.ClientSet, ns, service.Name, service.Spec.ClusterIP, port)
4207+
})
41364208
})
41374209

41384210
// execAffinityTestForSessionAffinityTimeout is a helper function that wrap the logic of
@@ -4382,6 +4454,18 @@ func createPodOrFail(ctx context.Context, f *framework.Framework, ns, name strin
43824454
e2epod.NewPodClient(f).CreateSync(ctx, pod)
43834455
}
43844456

4457+
// createPodWithRestartableInitContainerOrFail creates a pod with restartable init containers using the specified containerPorts.
4458+
func createPodWithRestartableInitContainerOrFail(ctx context.Context, f *framework.Framework, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort, args ...string) {
4459+
ginkgo.By(fmt.Sprintf("Creating pod %s with restartable init containers in namespace %s", name, ns))
4460+
pod := e2epod.NewAgnhostPod(ns, name, nil, nil, nil, "pause")
4461+
pod.ObjectMeta.Labels = labels
4462+
restartPolicyAlways := v1.ContainerRestartPolicyAlways
4463+
init := e2epod.NewAgnhostContainer(name, nil, containerPorts, args...)
4464+
init.RestartPolicy = &restartPolicyAlways
4465+
pod.Spec.InitContainers = []v1.Container{init}
4466+
e2epod.NewPodClient(f).CreateSync(ctx, pod)
4467+
}
4468+
43854469
// launchHostExecPod launches a hostexec pod in the given namespace and waits
43864470
// until it's Running. If avoidNode is non-nil, it will ensure that the pod doesn't
43874471
// land on that node.
@@ -4432,6 +4516,30 @@ func checkReachabilityFromPod(ctx context.Context, expectToBeReachable bool, tim
44324516
framework.ExpectNoError(err)
44334517
}
44344518

4519+
// checkServiceReachabilityFromExecPod creates a dedicated client pod, executes into it,
4520+
// and checks reachability to the specified target host and port.
4521+
func checkServiceReachabilityFromExecPod(ctx context.Context, client clientset.Interface, namespace, name, clusterIP string, port int32) {
4522+
// We avoid relying on DNS lookup with the service name here because
4523+
// we only want to test whether the named port is accessible from the service.
4524+
serverHost := net.JoinHostPort(clusterIP, strconv.Itoa(int(port)))
4525+
ginkgo.By("creating a dedicated client to send request to the http server " + serverHost)
4526+
execPod := e2epod.CreateExecPodOrFail(ctx, client, namespace, "execpod-", nil)
4527+
execPodName := execPod.Name
4528+
cmd := fmt.Sprintf("curl -q -s --connect-timeout 2 http://%s/", serverHost)
4529+
var stdout string
4530+
if pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, e2eservice.KubeProxyLagTimeout, true, func(ctx context.Context) (bool, error) {
4531+
var err error
4532+
stdout, err = e2eoutput.RunHostCmd(namespace, execPodName, cmd)
4533+
if err != nil {
4534+
framework.Logf("error trying to connect to service %s: %v, ... retrying", name, err)
4535+
return false, nil
4536+
}
4537+
return true, nil
4538+
}); pollErr != nil {
4539+
framework.Failf("connection to the Service %v within %v should be succeeded, stdout: %v", name, e2eservice.KubeProxyLagTimeout, stdout)
4540+
}
4541+
}
4542+
44354543
func validatePorts(ep, expectedEndpoints portsByPodUID) error {
44364544
if len(ep) != len(expectedEndpoints) {
44374545
// should not happen because we check this condition before

0 commit comments

Comments
 (0)