@@ -25,22 +25,25 @@ import (
25
25
"strings"
26
26
"time"
27
27
28
+ "github.com/onsi/ginkgo/v2"
29
+ "github.com/onsi/gomega"
30
+
28
31
v1 "k8s.io/api/core/v1"
29
32
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
33
+ "k8s.io/apimachinery/pkg/util/intstr"
30
34
"k8s.io/apimachinery/pkg/util/wait"
31
-
35
+ "k8s.io/kubernetes/pkg/cluster/ports"
36
+ "k8s.io/kubernetes/pkg/proxy/apis/config"
32
37
"k8s.io/kubernetes/test/e2e/framework"
38
+ e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
33
39
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
34
40
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
35
- e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
41
+ e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
36
42
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
37
43
"k8s.io/kubernetes/test/e2e/network/common"
38
44
imageutils "k8s.io/kubernetes/test/utils/image"
39
45
admissionapi "k8s.io/pod-security-admission/api"
40
46
netutils "k8s.io/utils/net"
41
-
42
- "github.com/onsi/ginkgo/v2"
43
- "github.com/onsi/gomega"
44
47
)
45
48
46
49
var kubeProxyE2eImage = imageutils .GetE2EImage (imageutils .Agnhost )
@@ -220,7 +223,7 @@ var _ = common.SIGDescribe("KubeProxy", func() {
220
223
"| grep -m 1 'CLOSE_WAIT.*dport=%v' " ,
221
224
ipFamily , ip , testDaemonTCPPort )
222
225
if err := wait .PollImmediate (2 * time .Second , epsilonSeconds * time .Second , func () (bool , error ) {
223
- result , err := e2eoutput .RunHostCmd (fr .Namespace .Name , "e2e-net-exec" , cmd )
226
+ result , err := e2epodoutput .RunHostCmd (fr .Namespace .Name , "e2e-net-exec" , cmd )
224
227
// retry if we can't obtain the conntrack entry
225
228
if err != nil {
226
229
framework .Logf ("failed to obtain conntrack entry: %v %v" , result , err )
@@ -242,7 +245,7 @@ var _ = common.SIGDescribe("KubeProxy", func() {
242
245
return false , fmt .Errorf ("wrong TCP CLOSE_WAIT timeout: %v expected: %v" , timeoutSeconds , expectedTimeoutSeconds )
243
246
}); err != nil {
244
247
// Dump all conntrack entries for debugging
245
- result , err2 := e2eoutput .RunHostCmd (fr .Namespace .Name , "e2e-net-exec" , "conntrack -L" )
248
+ result , err2 := e2epodoutput .RunHostCmd (fr .Namespace .Name , "e2e-net-exec" , "conntrack -L" )
246
249
if err2 != nil {
247
250
framework .Logf ("failed to obtain conntrack entry: %v %v" , result , err2 )
248
251
}
@@ -251,4 +254,115 @@ var _ = common.SIGDescribe("KubeProxy", func() {
251
254
}
252
255
})
253
256
257
+ ginkgo .It ("should update metric for tracking accepted packets destined for localhost nodeports" , func (ctx context.Context ) {
258
+ if framework .TestContext .ClusterIsIPv6 () {
259
+ e2eskipper .Skipf ("test requires IPv4 cluster" )
260
+ }
261
+
262
+ cs := fr .ClientSet
263
+ ns := fr .Namespace .Name
264
+
265
+ nodes , err := e2enode .GetBoundedReadySchedulableNodes (ctx , cs , 1 )
266
+ framework .ExpectNoError (err )
267
+ if len (nodes .Items ) < 1 {
268
+ e2eskipper .Skipf (
269
+ "Test requires >= 1 Ready nodes, but there are only %v nodes" ,
270
+ len (nodes .Items ))
271
+ }
272
+ nodeName := nodes .Items [0 ].Name
273
+
274
+ metricName := "kubeproxy_iptables_localhost_nodeports_accepted_packets_total"
275
+ metricsGrabber , err := e2emetrics .NewMetricsGrabber (ctx , fr .ClientSet , nil , fr .ClientConfig (), false , false , false , false , false , false )
276
+ framework .ExpectNoError (err )
277
+
278
+ // create a pod with host-network for execing
279
+ hostExecPodName := "host-exec-pod"
280
+ hostExecPod := e2epod .NewExecPodSpec (fr .Namespace .Name , hostExecPodName , true )
281
+ nodeSelection := e2epod.NodeSelection {Name : nodeName }
282
+ e2epod .SetNodeSelection (& hostExecPod .Spec , nodeSelection )
283
+ e2epod .NewPodClient (fr ).CreateSync (ctx , hostExecPod )
284
+
285
+ // get proxyMode
286
+ stdout , err := e2epodoutput .RunHostCmd (fr .Namespace .Name , hostExecPodName , fmt .Sprintf ("curl --silent 127.0.0.1:%d/proxyMode" , ports .ProxyStatusPort ))
287
+ framework .ExpectNoError (err )
288
+ proxyMode := strings .TrimSpace (stdout )
289
+
290
+ // get value of route_localnet
291
+ stdout , err = e2epodoutput .RunHostCmd (fr .Namespace .Name , hostExecPodName , "cat /proc/sys/net/ipv4/conf/all/route_localnet" )
292
+ framework .ExpectNoError (err )
293
+ routeLocalnet := strings .TrimSpace (stdout )
294
+
295
+ if ! (proxyMode == string (config .ProxyModeIPTables ) && routeLocalnet == "1" ) {
296
+ e2eskipper .Skipf ("test requires iptables proxy mode with route_localnet set" )
297
+ }
298
+
299
+ // get value of target metric before accessing localhost nodeports
300
+ metrics , err := metricsGrabber .GrabFromKubeProxy (ctx , nodeName )
301
+ framework .ExpectNoError (err )
302
+ targetMetricBefore := metrics .GetCounterMetricValue (metricName )
303
+
304
+ // create pod
305
+ ginkgo .By ("creating test pod" )
306
+ label := map [string ]string {
307
+ "app" : "agnhost-localhost-nodeports" ,
308
+ }
309
+ httpPort := []v1.ContainerPort {
310
+ {
311
+ ContainerPort : 8080 ,
312
+ Protocol : v1 .ProtocolTCP ,
313
+ },
314
+ }
315
+ pod := e2epod .NewAgnhostPod (ns , "agnhost-localhost-nodeports" , nil , nil , httpPort , "netexec" )
316
+ pod .Labels = label
317
+ e2epod .NewPodClient (fr ).CreateSync (ctx , pod )
318
+
319
+ // create nodeport service
320
+ ginkgo .By ("creating test nodeport service" )
321
+ svc := & v1.Service {
322
+ ObjectMeta : metav1.ObjectMeta {
323
+ Name : "agnhost-localhost-nodeports" ,
324
+ },
325
+ Spec : v1.ServiceSpec {
326
+ Type : v1 .ServiceTypeNodePort ,
327
+ Selector : label ,
328
+ Ports : []v1.ServicePort {
329
+ {
330
+ Protocol : v1 .ProtocolTCP ,
331
+ Port : 9000 ,
332
+ TargetPort : intstr.IntOrString {Type : 0 , IntVal : 8080 },
333
+ },
334
+ },
335
+ },
336
+ }
337
+ svc , err = fr .ClientSet .CoreV1 ().Services (fr .Namespace .Name ).Create (ctx , svc , metav1.CreateOptions {})
338
+ framework .ExpectNoError (err )
339
+
340
+ // wait for endpoints update
341
+ ginkgo .By ("waiting for endpoints to be updated" )
342
+ err = framework .WaitForServiceEndpointsNum (ctx , fr .ClientSet , ns , svc .Name , 1 , time .Second , wait .ForeverTestTimeout )
343
+ framework .ExpectNoError (err )
344
+
345
+ ginkgo .By ("accessing endpoint via localhost nodeports 10 times" )
346
+ for i := 0 ; i < 10 ; i ++ {
347
+ if err := wait .PollUntilContextTimeout (ctx , 1 * time .Second , 10 * time .Second , true , func (_ context.Context ) (bool , error ) {
348
+ _ , err = e2epodoutput .RunHostCmd (fr .Namespace .Name , hostExecPodName , fmt .Sprintf ("curl --silent http://localhost:%d/hostname" , svc .Spec .Ports [0 ].NodePort ))
349
+ if err != nil {
350
+ return false , nil
351
+ }
352
+ return true , nil
353
+ }); err != nil {
354
+ e2eskipper .Skipf ("skipping test as localhost nodeports are not acceesible in this environment" )
355
+ }
356
+ }
357
+
358
+ // our target metric should be updated by now
359
+ if err := wait .PollUntilContextTimeout (ctx , 10 * time .Second , 2 * time .Minute , true , func (_ context.Context ) (bool , error ) {
360
+ metrics , err := metricsGrabber .GrabFromKubeProxy (ctx , nodeName )
361
+ framework .ExpectNoError (err )
362
+ targetMetricAfter := metrics .GetCounterMetricValue (metricName )
363
+ return targetMetricAfter > targetMetricBefore , nil
364
+ }); err != nil {
365
+ framework .Failf ("expected %s metric to be updated after accessing endpoints via localhost nodeports" , metricName )
366
+ }
367
+ })
254
368
})
0 commit comments