@@ -55,7 +55,7 @@ import (
55
55
"k8s.io/kubernetes/test/e2e/network/common"
56
56
admissionapi "k8s.io/pod-security-admission/api"
57
57
netutils "k8s.io/utils/net"
58
- utilpointer "k8s.io/utils/pointer "
58
+ "k8s.io/utils/ptr "
59
59
60
60
"github.com/onsi/ginkgo/v2"
61
61
"github.com/onsi/gomega"
@@ -248,7 +248,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
248
248
framework .ExpectNoError (err )
249
249
250
250
ginkgo .By ("hitting the TCP service's LoadBalancer with no backends, no answer expected" )
251
- testNotReachableHTTP (tcpIngressIP , svcPort , loadBalancerLagTimeout )
251
+ testNotReachableHTTP (ctx , tcpIngressIP , svcPort , loadBalancerLagTimeout )
252
252
253
253
ginkgo .By ("Scaling the pods to 1" )
254
254
err = tcpJig .Scale (ctx , 1 )
@@ -272,7 +272,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
272
272
framework .ExpectNoError (err )
273
273
274
274
ginkgo .By ("checking the TCP LoadBalancer is closed" )
275
- testNotReachableHTTP (tcpIngressIP , svcPort , loadBalancerLagTimeout )
275
+ testNotReachableHTTP (ctx , tcpIngressIP , svcPort , loadBalancerLagTimeout )
276
276
})
277
277
278
278
f .It ("should be able to change the type and ports of a UDP service" , f .WithSlow (), func (ctx context.Context ) {
@@ -340,7 +340,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
340
340
framework .ExpectNoError (err )
341
341
342
342
ginkgo .By ("hitting the UDP service's LoadBalancer" )
343
- testReachableUDP (udpIngressIP , svcPort , loadBalancerLagTimeout )
343
+ testReachableUDP (ctx , udpIngressIP , svcPort , loadBalancerLagTimeout )
344
344
345
345
// Change the services' node ports.
346
346
@@ -361,7 +361,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
361
361
framework .ExpectNoError (err )
362
362
363
363
ginkgo .By ("hitting the UDP service's LoadBalancer" )
364
- testReachableUDP (udpIngressIP , svcPort , loadBalancerLagTimeout )
364
+ testReachableUDP (ctx , udpIngressIP , svcPort , loadBalancerLagTimeout )
365
365
366
366
// Change the services' main ports.
367
367
@@ -389,14 +389,14 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
389
389
framework .ExpectNoError (err )
390
390
391
391
ginkgo .By ("hitting the UDP service's LoadBalancer" )
392
- testReachableUDP (udpIngressIP , svcPort , loadBalancerCreateTimeout )
392
+ testReachableUDP (ctx , udpIngressIP , svcPort , loadBalancerCreateTimeout )
393
393
394
394
ginkgo .By ("Scaling the pods to 0" )
395
395
err = udpJig .Scale (ctx , 0 )
396
396
framework .ExpectNoError (err )
397
397
398
398
ginkgo .By ("looking for ICMP REJECT on the UDP service's LoadBalancer" )
399
- testRejectedUDP (udpIngressIP , svcPort , loadBalancerCreateTimeout )
399
+ testRejectedUDP (ctx , udpIngressIP , svcPort , loadBalancerCreateTimeout )
400
400
401
401
ginkgo .By ("Scaling the pods to 1" )
402
402
err = udpJig .Scale (ctx , 1 )
@@ -407,7 +407,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
407
407
framework .ExpectNoError (err )
408
408
409
409
ginkgo .By ("hitting the UDP service's LoadBalancer" )
410
- testReachableUDP (udpIngressIP , svcPort , loadBalancerCreateTimeout )
410
+ testReachableUDP (ctx , udpIngressIP , svcPort , loadBalancerCreateTimeout )
411
411
412
412
// Change the services back to ClusterIP.
413
413
@@ -424,7 +424,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
424
424
framework .ExpectNoError (err )
425
425
426
426
ginkgo .By ("checking the UDP LoadBalancer is closed" )
427
- testNotReachableUDP (udpIngressIP , svcPort , loadBalancerLagTimeout )
427
+ testNotReachableUDP (ctx , udpIngressIP , svcPort , loadBalancerLagTimeout )
428
428
})
429
429
430
430
f .It ("should only allow access from service loadbalancer source ranges" , f .WithSlow (), func (ctx context.Context ) {
@@ -475,8 +475,8 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
475
475
// as this may take significant amount of time, especially in large clusters.
476
476
// However, the information whether it was already programmed isn't achievable.
477
477
// So we're resolving it by using loadBalancerCreateTimeout that takes cluster size into account.
478
- checkReachabilityFromPod (true , loadBalancerCreateTimeout , namespace , acceptPod .Name , svcIP )
479
- checkReachabilityFromPod (false , loadBalancerCreateTimeout , namespace , dropPod .Name , svcIP )
478
+ checkReachabilityFromPod (ctx , true , loadBalancerCreateTimeout , namespace , acceptPod .Name , svcIP )
479
+ checkReachabilityFromPod (ctx , false , loadBalancerCreateTimeout , namespace , dropPod .Name , svcIP )
480
480
481
481
// Make sure dropPod is running. There are certain chances that the pod might be terminated due to unexpected reasons.
482
482
dropPod , err = cs .CoreV1 ().Pods (namespace ).Get (ctx , dropPod .Name , metav1.GetOptions {})
@@ -495,8 +495,8 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
495
495
// significant amount of time, especially in large clusters.
496
496
// However, the information whether it was already programmed isn't achievable.
497
497
// So we're resolving it by using loadBalancerCreateTimeout that takes cluster size into account.
498
- checkReachabilityFromPod (false , loadBalancerCreateTimeout , namespace , acceptPod .Name , svcIP )
499
- checkReachabilityFromPod (true , loadBalancerCreateTimeout , namespace , dropPod .Name , svcIP )
498
+ checkReachabilityFromPod (ctx , false , loadBalancerCreateTimeout , namespace , acceptPod .Name , svcIP )
499
+ checkReachabilityFromPod (ctx , true , loadBalancerCreateTimeout , namespace , dropPod .Name , svcIP )
500
500
501
501
ginkgo .By ("Delete LoadBalancerSourceRange field and check reachability" )
502
502
_ , err = jig .UpdateService (ctx , func (svc * v1.Service ) {
@@ -507,8 +507,8 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
507
507
// significant amount of time, especially in large clusters.
508
508
// However, the information whether it was already programmed isn't achievable.
509
509
// So we're resolving it by using loadBalancerCreateTimeout that takes cluster size into account.
510
- checkReachabilityFromPod (true , loadBalancerCreateTimeout , namespace , acceptPod .Name , svcIP )
511
- checkReachabilityFromPod (true , loadBalancerCreateTimeout , namespace , dropPod .Name , svcIP )
510
+ checkReachabilityFromPod (ctx , true , loadBalancerCreateTimeout , namespace , acceptPod .Name , svcIP )
511
+ checkReachabilityFromPod (ctx , true , loadBalancerCreateTimeout , namespace , dropPod .Name , svcIP )
512
512
})
513
513
514
514
// [LinuxOnly]: Windows does not support session affinity.
@@ -626,7 +626,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
626
626
ginkgo .By ("changing the TCP service to type=LoadBalancer" )
627
627
_ , err = tcpJig .UpdateService (ctx , func (s * v1.Service ) {
628
628
s .Spec .Type = v1 .ServiceTypeLoadBalancer
629
- s .Spec .AllocateLoadBalancerNodePorts = utilpointer . BoolPtr (false )
629
+ s .Spec .AllocateLoadBalancerNodePorts = ptr . To (false )
630
630
})
631
631
framework .ExpectNoError (err )
632
632
@@ -647,7 +647,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
647
647
648
648
ginkgo .By ("adding a TCP service's NodePort" )
649
649
tcpService , err = tcpJig .UpdateService (ctx , func (s * v1.Service ) {
650
- s .Spec .AllocateLoadBalancerNodePorts = utilpointer . BoolPtr (true )
650
+ s .Spec .AllocateLoadBalancerNodePorts = ptr . To (true )
651
651
})
652
652
framework .ExpectNoError (err )
653
653
tcpNodePort := int (tcpService .Spec .Ports [0 ].NodePort )
@@ -728,9 +728,9 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
728
728
framework .Logf ("Failed to connect to: %s %d" , udpIngressIP , port )
729
729
continue
730
730
}
731
- conn .SetDeadline (time .Now ().Add (3 * time .Second ))
731
+ _ = conn .SetDeadline (time .Now ().Add (3 * time .Second ))
732
732
framework .Logf ("Connected successfully to: %s" , raddr .String ())
733
- conn .Write ([]byte ("hostname\n " ))
733
+ _ , _ = conn .Write ([]byte ("hostname\n " ))
734
734
buff := make ([]byte , 1024 )
735
735
n , _ , err := conn .ReadFrom (buff )
736
736
if err == nil {
@@ -739,7 +739,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
739
739
mu .Unlock ()
740
740
framework .Logf ("Connected successfully to hostname: %s" , string (buff [:n ]))
741
741
}
742
- conn .Close ()
742
+ _ = conn .Close ()
743
743
}
744
744
}()
745
745
@@ -760,7 +760,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
760
760
// 30 seconds by default.
761
761
// Based on the above check if the pod receives the traffic.
762
762
ginkgo .By ("checking client pod connected to the backend 1 on Node " + nodes .Items [0 ].Name )
763
- if err := wait .PollImmediate ( 1 * time .Second , loadBalancerLagTimeout , func () (bool , error ) {
763
+ if err := wait .PollUntilContextTimeout ( ctx , 1 * time .Second , loadBalancerLagTimeout , true , func (ctx context. Context ) (bool , error ) {
764
764
mu .Lock ()
765
765
defer mu .Unlock ()
766
766
return hostnames .Has (serverPod1 .Spec .Hostname ), nil
@@ -786,7 +786,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
786
786
// Check that the second pod keeps receiving traffic
787
787
// UDP conntrack entries timeout is 30 sec by default
788
788
ginkgo .By ("checking client pod connected to the backend 2 on Node " + nodes .Items [1 ].Name )
789
- if err := wait .PollImmediate ( 1 * time .Second , loadBalancerLagTimeout , func () (bool , error ) {
789
+ if err := wait .PollUntilContextTimeout ( ctx , 1 * time .Second , loadBalancerLagTimeout , true , func (ctx context. Context ) (bool , error ) {
790
790
mu .Lock ()
791
791
defer mu .Unlock ()
792
792
return hostnames .Has (serverPod2 .Spec .Hostname ), nil
@@ -860,9 +860,9 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
860
860
framework .Logf ("Failed to connect to: %s %d" , udpIngressIP , port )
861
861
continue
862
862
}
863
- conn .SetDeadline (time .Now ().Add (3 * time .Second ))
863
+ _ = conn .SetDeadline (time .Now ().Add (3 * time .Second ))
864
864
framework .Logf ("Connected successfully to: %s" , raddr .String ())
865
- conn .Write ([]byte ("hostname\n " ))
865
+ _ , _ = conn .Write ([]byte ("hostname\n " ))
866
866
buff := make ([]byte , 1024 )
867
867
n , _ , err := conn .ReadFrom (buff )
868
868
if err == nil {
@@ -871,7 +871,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
871
871
mu .Unlock ()
872
872
framework .Logf ("Connected successfully to hostname: %s" , string (buff [:n ]))
873
873
}
874
- conn .Close ()
874
+ _ = conn .Close ()
875
875
}
876
876
}()
877
877
@@ -892,7 +892,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
892
892
// 30 seconds by default.
893
893
// Based on the above check if the pod receives the traffic.
894
894
ginkgo .By ("checking client pod connected to the backend 1 on Node " + nodes .Items [0 ].Name )
895
- if err := wait .PollImmediate ( 1 * time .Second , loadBalancerLagTimeout , func () (bool , error ) {
895
+ if err := wait .PollUntilContextTimeout ( ctx , 1 * time .Second , loadBalancerLagTimeout , true , func (ctx context. Context ) (bool , error ) {
896
896
mu .Lock ()
897
897
defer mu .Unlock ()
898
898
return hostnames .Has (serverPod1 .Spec .Hostname ), nil
@@ -918,7 +918,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
918
918
// Check that the second pod keeps receiving traffic
919
919
// UDP conntrack entries timeout is 30 sec by default
920
920
ginkgo .By ("checking client pod connected to the backend 2 on Node " + nodes .Items [0 ].Name )
921
- if err := wait .PollImmediate ( 1 * time .Second , loadBalancerLagTimeout , func () (bool , error ) {
921
+ if err := wait .PollUntilContextTimeout ( ctx , 1 * time .Second , loadBalancerLagTimeout , true , func (ctx context. Context ) (bool , error ) {
922
922
mu .Lock ()
923
923
defer mu .Unlock ()
924
924
return hostnames .Has (serverPod2 .Spec .Hostname ), nil
@@ -1181,7 +1181,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP", framework.WithSlow(), func() {
1181
1181
var srcIP string
1182
1182
loadBalancerPropagationTimeout := e2eservice .GetServiceLoadBalancerPropagationTimeout (ctx , cs )
1183
1183
ginkgo .By (fmt .Sprintf ("Hitting external lb %v from pod %v on node %v" , ingressIP , pausePod .Name , pausePod .Spec .NodeName ))
1184
- if pollErr := wait .PollImmediate ( framework .Poll , loadBalancerPropagationTimeout , func () (bool , error ) {
1184
+ if pollErr := wait .PollUntilContextTimeout ( ctx , framework .Poll , loadBalancerPropagationTimeout , true , func (ctx context. Context ) (bool , error ) {
1185
1185
stdout , err := e2eoutput .RunHostCmd (pausePod .Namespace , pausePod .Name , cmd )
1186
1186
if err != nil {
1187
1187
framework .Logf ("got err: %v, retry until timeout" , err )
@@ -1270,7 +1270,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP", framework.WithSlow(), func() {
1270
1270
for nodeName , nodeIP := range endpointNodeMap {
1271
1271
ginkgo .By (fmt .Sprintf ("checking kube-proxy health check fails on node with endpoint (%s), public IP %s" , nodeName , nodeIP ))
1272
1272
var body string
1273
- pollFn := func () (bool , error ) {
1273
+ pollFn := func (ctx context. Context ) (bool , error ) {
1274
1274
// we expect connection failure here, but not other errors
1275
1275
resp , err := config .GetResponseFromTestContainer (ctx ,
1276
1276
"http" ,
@@ -1288,7 +1288,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP", framework.WithSlow(), func() {
1288
1288
}
1289
1289
return false , nil
1290
1290
}
1291
- if pollErr := wait .PollImmediate ( framework .Poll , e2eservice .TestTimeout , pollFn ); pollErr != nil {
1291
+ if pollErr := wait .PollUntilContextTimeout ( ctx , framework .Poll , e2eservice .TestTimeout , true , pollFn ); pollErr != nil {
1292
1292
framework .Failf ("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s" ,
1293
1293
nodeName , healthCheckNodePort , body )
1294
1294
}
@@ -1297,7 +1297,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP", framework.WithSlow(), func() {
1297
1297
// Poll till kube-proxy re-adds the MASQUERADE rule on the node.
1298
1298
ginkgo .By (fmt .Sprintf ("checking source ip is NOT preserved through loadbalancer %v" , ingressIP ))
1299
1299
var clientIP string
1300
- pollErr := wait .PollImmediate ( framework .Poll , 3 * e2eservice .KubeProxyLagTimeout , func () (bool , error ) {
1300
+ pollErr := wait .PollUntilContextTimeout ( ctx , framework .Poll , 3 * e2eservice .KubeProxyLagTimeout , true , func (ctx context. Context ) (bool , error ) {
1301
1301
clientIPPort , err := GetHTTPContent (ingressIP , svcTCPPort , e2eservice .KubeProxyLagTimeout , path )
1302
1302
if err != nil {
1303
1303
return false , nil
@@ -1336,7 +1336,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP", framework.WithSlow(), func() {
1336
1336
})
1337
1337
framework .ExpectNoError (err )
1338
1338
loadBalancerPropagationTimeout := e2eservice .GetServiceLoadBalancerPropagationTimeout (ctx , cs )
1339
- pollErr = wait .PollImmediate ( framework .PollShortTimeout , loadBalancerPropagationTimeout , func () (bool , error ) {
1339
+ pollErr = wait .PollUntilContextTimeout ( ctx , framework .PollShortTimeout , loadBalancerPropagationTimeout , true , func (ctx context. Context ) (bool , error ) {
1340
1340
clientIPPort , err := GetHTTPContent (ingressIP , svcTCPPort , e2eservice .KubeProxyLagTimeout , path )
1341
1341
if err != nil {
1342
1342
return false , nil
@@ -1384,7 +1384,7 @@ func testRollingUpdateLBConnectivityDisruption(ctx context.Context, f *framework
1384
1384
},
1385
1385
}
1386
1386
ds .Spec .Template .Labels = labels
1387
- ds .Spec .Template .Spec .TerminationGracePeriodSeconds = utilpointer . Int64 (gracePeriod )
1387
+ ds .Spec .Template .Spec .TerminationGracePeriodSeconds = ptr . To (gracePeriod )
1388
1388
1389
1389
nodeNames := e2edaemonset .SchedulableNodes (ctx , cs , ds )
1390
1390
e2eskipper .SkipUnlessAtLeast (len (nodeNames ), 2 , "load-balancer rolling update test requires at least 2 schedulable nodes for the DaemonSet" )
@@ -1447,7 +1447,7 @@ func testRollingUpdateLBConnectivityDisruption(ctx context.Context, f *framework
1447
1447
atomic .AddUint64 (& networkErrors , 1 )
1448
1448
return
1449
1449
}
1450
- defer resp .Body .Close ()
1450
+ defer func () { _ = resp .Body .Close () } ()
1451
1451
if resp .StatusCode != http .StatusOK {
1452
1452
framework .Logf ("Got bad status code: %d" , resp .StatusCode )
1453
1453
atomic .AddUint64 (& httpErrors , 1 )
@@ -1470,16 +1470,16 @@ func testRollingUpdateLBConnectivityDisruption(ctx context.Context, f *framework
1470
1470
ginkgo .By ("Triggering DaemonSet rolling update several times" )
1471
1471
var previousTotalRequests uint64 = 0
1472
1472
var previousNetworkErrors uint64 = 0
1473
- var previousHttpErrors uint64 = 0
1473
+ var previousHTTPErrors uint64 = 0
1474
1474
for i := 1 ; i <= 5 ; i ++ {
1475
1475
framework .Logf ("Update daemon pods environment: [{\" name\" :\" VERSION\" ,\" value\" :\" %d\" }]" , i )
1476
1476
patch := fmt .Sprintf (`{"spec":{"template":{"spec":{"containers":[{"name":"%s","env":[{"name":"VERSION","value":"%d"}]}]}}}}` , ds .Spec .Template .Spec .Containers [0 ].Name , i )
1477
1477
ds , err = cs .AppsV1 ().DaemonSets (ns ).Patch (context .TODO (), name , types .StrategicMergePatchType , []byte (patch ), metav1.PatchOptions {})
1478
1478
framework .ExpectNoError (err )
1479
1479
1480
1480
framework .Logf ("Check that daemon pods are available on every node of the cluster with the updated environment." )
1481
- err = wait .PollImmediate ( framework .Poll , creationTimeout , func () (bool , error ) {
1482
- podList , err := cs .CoreV1 ().Pods (ds .Namespace ).List (context . TODO () , metav1.ListOptions {})
1481
+ err = wait .PollUntilContextTimeout ( ctx , framework .Poll , creationTimeout , true , func (ctx context. Context ) (bool , error ) {
1482
+ podList , err := cs .CoreV1 ().Pods (ds .Namespace ).List (ctx , metav1.ListOptions {})
1483
1483
if err != nil {
1484
1484
return false , err
1485
1485
}
@@ -1517,24 +1517,24 @@ func testRollingUpdateLBConnectivityDisruption(ctx context.Context, f *framework
1517
1517
// assert that the HTTP requests success rate is above the acceptable threshold after this rolling update
1518
1518
currentTotalRequests := atomic .LoadUint64 (& totalRequests )
1519
1519
currentNetworkErrors := atomic .LoadUint64 (& networkErrors )
1520
- currentHttpErrors := atomic .LoadUint64 (& httpErrors )
1520
+ currentHTTPErrors := atomic .LoadUint64 (& httpErrors )
1521
1521
1522
1522
partialTotalRequests := currentTotalRequests - previousTotalRequests
1523
1523
partialNetworkErrors := currentNetworkErrors - previousNetworkErrors
1524
- partialHttpErrors := currentHttpErrors - previousHttpErrors
1525
- partialSuccessRate := (float64 (partialTotalRequests ) - float64 (partialNetworkErrors + partialHttpErrors )) / float64 (partialTotalRequests )
1524
+ partialHTTPErrors := currentHTTPErrors - previousHTTPErrors
1525
+ partialSuccessRate := (float64 (partialTotalRequests ) - float64 (partialNetworkErrors + partialHTTPErrors )) / float64 (partialTotalRequests )
1526
1526
1527
1527
framework .Logf ("Load Balancer total HTTP requests: %d" , partialTotalRequests )
1528
1528
framework .Logf ("Network errors: %d" , partialNetworkErrors )
1529
- framework .Logf ("HTTP errors: %d" , partialHttpErrors )
1529
+ framework .Logf ("HTTP errors: %d" , partialHTTPErrors )
1530
1530
framework .Logf ("Success rate: %.2f%%" , partialSuccessRate * 100 )
1531
1531
if partialSuccessRate < minSuccessRate {
1532
1532
framework .Failf ("Encountered too many errors when doing HTTP requests to the load balancer address. Success rate is %.2f%%, and the minimum allowed threshold is %.2f%%." , partialSuccessRate * 100 , minSuccessRate * 100 )
1533
1533
}
1534
1534
1535
1535
previousTotalRequests = currentTotalRequests
1536
1536
previousNetworkErrors = currentNetworkErrors
1537
- previousHttpErrors = currentHttpErrors
1537
+ previousHTTPErrors = currentHTTPErrors
1538
1538
}
1539
1539
1540
1540
// assert that the load balancer address is still reachable after the rolling updates are finished
0 commit comments