@@ -4133,6 +4133,78 @@ var _ = common.SIGDescribe("Services", func() {
4133
4133
checkOneNodePort (hostExecPodNodeIP , true , v1 .ServiceExternalTrafficPolicyLocal , deadline )
4134
4134
checkOneNodePort (thirdNodeIP , false , v1 .ServiceExternalTrafficPolicyLocal , deadline )
4135
4135
})
4136
+
4137
+ ginkgo .It ("should connect to the ports exposed by restartable init containers" , func (ctx context.Context ) {
4138
+ serviceName := "sidecar-with-port"
4139
+ ns := f .Namespace .Name
4140
+
4141
+ t := NewServerTest (cs , ns , serviceName )
4142
+ defer func () {
4143
+ defer ginkgo .GinkgoRecover ()
4144
+ errs := t .Cleanup ()
4145
+ if len (errs ) != 0 {
4146
+ framework .Failf ("errors in cleanup: %v" , errs )
4147
+ }
4148
+ }()
4149
+
4150
+ name := "sidecar-port"
4151
+ port := int32 (8080 )
4152
+ namedPort := "http-sidecar"
4153
+
4154
+ service := t .BuildServiceSpec ()
4155
+ service .Spec .Ports = []v1.ServicePort {
4156
+ {
4157
+ Name : namedPort ,
4158
+ Port : port ,
4159
+ TargetPort : intstr .FromInt (int (port )),
4160
+ },
4161
+ }
4162
+ ports := []v1.ContainerPort {{Name : namedPort , ContainerPort : port , Protocol : v1 .ProtocolTCP }}
4163
+ args := []string {"netexec" , fmt .Sprintf ("--http-port=%d" , port )}
4164
+ createPodWithRestartableInitContainerOrFail (ctx , f , ns , name , t .Labels , ports , args ... )
4165
+
4166
+ ginkgo .By (fmt .Sprintf ("creating Service %v with selectors %v" , service .Name , service .Spec .Selector ))
4167
+ service , err := t .CreateService (service )
4168
+ framework .ExpectNoError (err )
4169
+
4170
+ checkServiceReachabilityFromExecPod (ctx , f .ClientSet , ns , service .Name , service .Spec .ClusterIP , port )
4171
+ })
4172
+
4173
+ ginkgo .It ("should connect to the named ports exposed by restartable init containers" , func (ctx context.Context ) {
4174
+ serviceName := "sidecar-with-named-port"
4175
+ ns := f .Namespace .Name
4176
+
4177
+ t := NewServerTest (cs , ns , serviceName )
4178
+ defer func () {
4179
+ defer ginkgo .GinkgoRecover ()
4180
+ errs := t .Cleanup ()
4181
+ if len (errs ) != 0 {
4182
+ framework .Failf ("errors in cleanup: %v" , errs )
4183
+ }
4184
+ }()
4185
+
4186
+ name := "sidecar-port"
4187
+ port := int32 (8080 )
4188
+ namedPort := "http-sidecar"
4189
+
4190
+ service := t .BuildServiceSpec ()
4191
+ service .Spec .Ports = []v1.ServicePort {
4192
+ {
4193
+ Name : namedPort ,
4194
+ Port : port ,
4195
+ TargetPort : intstr .FromString (namedPort ),
4196
+ },
4197
+ }
4198
+ ports := []v1.ContainerPort {{Name : namedPort , ContainerPort : port , Protocol : v1 .ProtocolTCP }}
4199
+ args := []string {"netexec" , fmt .Sprintf ("--http-port=%d" , port )}
4200
+ createPodWithRestartableInitContainerOrFail (ctx , f , ns , name , t .Labels , ports , args ... )
4201
+
4202
+ ginkgo .By (fmt .Sprintf ("creating Service %v with selectors %v" , service .Name , service .Spec .Selector ))
4203
+ service , err := t .CreateService (service )
4204
+ framework .ExpectNoError (err )
4205
+
4206
+ checkServiceReachabilityFromExecPod (ctx , f .ClientSet , ns , service .Name , service .Spec .ClusterIP , port )
4207
+ })
4136
4208
})
4137
4209
4138
4210
// execAffinityTestForSessionAffinityTimeout is a helper function that wrap the logic of
@@ -4382,6 +4454,18 @@ func createPodOrFail(ctx context.Context, f *framework.Framework, ns, name strin
4382
4454
e2epod .NewPodClient (f ).CreateSync (ctx , pod )
4383
4455
}
4384
4456
4457
+ // createPodWithRestartableInitContainerOrFail creates a pod with restartable init containers using the specified containerPorts.
4458
+ func createPodWithRestartableInitContainerOrFail (ctx context.Context , f * framework.Framework , ns , name string , labels map [string ]string , containerPorts []v1.ContainerPort , args ... string ) {
4459
+ ginkgo .By (fmt .Sprintf ("Creating pod %s with restartable init containers in namespace %s" , name , ns ))
4460
+ pod := e2epod .NewAgnhostPod (ns , name , nil , nil , nil , "pause" )
4461
+ pod .ObjectMeta .Labels = labels
4462
+ restartPolicyAlways := v1 .ContainerRestartPolicyAlways
4463
+ init := e2epod .NewAgnhostContainer (name , nil , containerPorts , args ... )
4464
+ init .RestartPolicy = & restartPolicyAlways
4465
+ pod .Spec .InitContainers = []v1.Container {init }
4466
+ e2epod .NewPodClient (f ).CreateSync (ctx , pod )
4467
+ }
4468
+
4385
4469
// launchHostExecPod launches a hostexec pod in the given namespace and waits
4386
4470
// until it's Running. If avoidNode is non-nil, it will ensure that the pod doesn't
4387
4471
// land on that node.
@@ -4432,6 +4516,30 @@ func checkReachabilityFromPod(ctx context.Context, expectToBeReachable bool, tim
4432
4516
framework .ExpectNoError (err )
4433
4517
}
4434
4518
4519
+ // checkServiceReachabilityFromExecPod creates a dedicated client pod, executes into it,
4520
+ // and checks reachability to the specified target host and port.
4521
+ func checkServiceReachabilityFromExecPod (ctx context.Context , client clientset.Interface , namespace , name , clusterIP string , port int32 ) {
4522
+ // We avoid relying on DNS lookup with the service name here because
4523
+ // we only want to test whether the named port is accessible from the service.
4524
+ serverHost := net .JoinHostPort (clusterIP , strconv .Itoa (int (port )))
4525
+ ginkgo .By ("creating a dedicated client to send request to the http server " + serverHost )
4526
+ execPod := e2epod .CreateExecPodOrFail (ctx , client , namespace , "execpod-" , nil )
4527
+ execPodName := execPod .Name
4528
+ cmd := fmt .Sprintf ("curl -q -s --connect-timeout 2 http://%s/" , serverHost )
4529
+ var stdout string
4530
+ if pollErr := wait .PollUntilContextTimeout (ctx , framework .Poll , e2eservice .KubeProxyLagTimeout , true , func (ctx context.Context ) (bool , error ) {
4531
+ var err error
4532
+ stdout , err = e2eoutput .RunHostCmd (namespace , execPodName , cmd )
4533
+ if err != nil {
4534
+ framework .Logf ("error trying to connect to service %s: %v, ... retrying" , name , err )
4535
+ return false , nil
4536
+ }
4537
+ return true , nil
4538
+ }); pollErr != nil {
4539
+ framework .Failf ("connection to the Service %v within %v should be succeeded, stdout: %v" , name , e2eservice .KubeProxyLagTimeout , stdout )
4540
+ }
4541
+ }
4542
+
4435
4543
func validatePorts (ep , expectedEndpoints portsByPodUID ) error {
4436
4544
if len (ep ) != len (expectedEndpoints ) {
4437
4545
// should not happen because we check this condition before
0 commit comments