Skip to content

Commit 582a495

Browse files
committed
Update the "new" code to pass current lint
1 parent b421bde commit 582a495

File tree

2 files changed

+66
-66
lines changed

2 files changed

+66
-66
lines changed

test/e2e/network/loadbalancer.go

Lines changed: 42 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ import (
5555
"k8s.io/kubernetes/test/e2e/network/common"
5656
admissionapi "k8s.io/pod-security-admission/api"
5757
netutils "k8s.io/utils/net"
58-
utilpointer "k8s.io/utils/pointer"
58+
"k8s.io/utils/ptr"
5959

6060
"github.com/onsi/ginkgo/v2"
6161
"github.com/onsi/gomega"
@@ -248,7 +248,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
248248
framework.ExpectNoError(err)
249249

250250
ginkgo.By("hitting the TCP service's LoadBalancer with no backends, no answer expected")
251-
testNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
251+
testNotReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout)
252252

253253
ginkgo.By("Scaling the pods to 1")
254254
err = tcpJig.Scale(ctx, 1)
@@ -272,7 +272,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
272272
framework.ExpectNoError(err)
273273

274274
ginkgo.By("checking the TCP LoadBalancer is closed")
275-
testNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
275+
testNotReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout)
276276
})
277277

278278
f.It("should be able to change the type and ports of a UDP service", f.WithSlow(), func(ctx context.Context) {
@@ -340,7 +340,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
340340
framework.ExpectNoError(err)
341341

342342
ginkgo.By("hitting the UDP service's LoadBalancer")
343-
testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
343+
testReachableUDP(ctx, udpIngressIP, svcPort, loadBalancerLagTimeout)
344344

345345
// Change the services' node ports.
346346

@@ -361,7 +361,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
361361
framework.ExpectNoError(err)
362362

363363
ginkgo.By("hitting the UDP service's LoadBalancer")
364-
testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
364+
testReachableUDP(ctx, udpIngressIP, svcPort, loadBalancerLagTimeout)
365365

366366
// Change the services' main ports.
367367

@@ -389,14 +389,14 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
389389
framework.ExpectNoError(err)
390390

391391
ginkgo.By("hitting the UDP service's LoadBalancer")
392-
testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
392+
testReachableUDP(ctx, udpIngressIP, svcPort, loadBalancerCreateTimeout)
393393

394394
ginkgo.By("Scaling the pods to 0")
395395
err = udpJig.Scale(ctx, 0)
396396
framework.ExpectNoError(err)
397397

398398
ginkgo.By("looking for ICMP REJECT on the UDP service's LoadBalancer")
399-
testRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
399+
testRejectedUDP(ctx, udpIngressIP, svcPort, loadBalancerCreateTimeout)
400400

401401
ginkgo.By("Scaling the pods to 1")
402402
err = udpJig.Scale(ctx, 1)
@@ -407,7 +407,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
407407
framework.ExpectNoError(err)
408408

409409
ginkgo.By("hitting the UDP service's LoadBalancer")
410-
testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
410+
testReachableUDP(ctx, udpIngressIP, svcPort, loadBalancerCreateTimeout)
411411

412412
// Change the services back to ClusterIP.
413413

@@ -424,7 +424,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
424424
framework.ExpectNoError(err)
425425

426426
ginkgo.By("checking the UDP LoadBalancer is closed")
427-
testNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
427+
testNotReachableUDP(ctx, udpIngressIP, svcPort, loadBalancerLagTimeout)
428428
})
429429

430430
f.It("should only allow access from service loadbalancer source ranges", f.WithSlow(), func(ctx context.Context) {
@@ -475,8 +475,8 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
475475
// as this may take significant amount of time, especially in large clusters.
476476
// However, the information whether it was already programmed isn't achievable.
477477
// So we're resolving it by using loadBalancerCreateTimeout that takes cluster size into account.
478-
checkReachabilityFromPod(true, loadBalancerCreateTimeout, namespace, acceptPod.Name, svcIP)
479-
checkReachabilityFromPod(false, loadBalancerCreateTimeout, namespace, dropPod.Name, svcIP)
478+
checkReachabilityFromPod(ctx, true, loadBalancerCreateTimeout, namespace, acceptPod.Name, svcIP)
479+
checkReachabilityFromPod(ctx, false, loadBalancerCreateTimeout, namespace, dropPod.Name, svcIP)
480480

481481
// Make sure dropPod is running. There are certain chances that the pod might be terminated due to unexpected reasons.
482482
dropPod, err = cs.CoreV1().Pods(namespace).Get(ctx, dropPod.Name, metav1.GetOptions{})
@@ -495,8 +495,8 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
495495
// significant amount of time, especially in large clusters.
496496
// However, the information whether it was already programmed isn't achievable.
497497
// So we're resolving it by using loadBalancerCreateTimeout that takes cluster size into account.
498-
checkReachabilityFromPod(false, loadBalancerCreateTimeout, namespace, acceptPod.Name, svcIP)
499-
checkReachabilityFromPod(true, loadBalancerCreateTimeout, namespace, dropPod.Name, svcIP)
498+
checkReachabilityFromPod(ctx, false, loadBalancerCreateTimeout, namespace, acceptPod.Name, svcIP)
499+
checkReachabilityFromPod(ctx, true, loadBalancerCreateTimeout, namespace, dropPod.Name, svcIP)
500500

501501
ginkgo.By("Delete LoadBalancerSourceRange field and check reachability")
502502
_, err = jig.UpdateService(ctx, func(svc *v1.Service) {
@@ -507,8 +507,8 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
507507
// significant amount of time, especially in large clusters.
508508
// However, the information whether it was already programmed isn't achievable.
509509
// So we're resolving it by using loadBalancerCreateTimeout that takes cluster size into account.
510-
checkReachabilityFromPod(true, loadBalancerCreateTimeout, namespace, acceptPod.Name, svcIP)
511-
checkReachabilityFromPod(true, loadBalancerCreateTimeout, namespace, dropPod.Name, svcIP)
510+
checkReachabilityFromPod(ctx, true, loadBalancerCreateTimeout, namespace, acceptPod.Name, svcIP)
511+
checkReachabilityFromPod(ctx, true, loadBalancerCreateTimeout, namespace, dropPod.Name, svcIP)
512512
})
513513

514514
// [LinuxOnly]: Windows does not support session affinity.
@@ -626,7 +626,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
626626
ginkgo.By("changing the TCP service to type=LoadBalancer")
627627
_, err = tcpJig.UpdateService(ctx, func(s *v1.Service) {
628628
s.Spec.Type = v1.ServiceTypeLoadBalancer
629-
s.Spec.AllocateLoadBalancerNodePorts = utilpointer.BoolPtr(false)
629+
s.Spec.AllocateLoadBalancerNodePorts = ptr.To(false)
630630
})
631631
framework.ExpectNoError(err)
632632

@@ -647,7 +647,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
647647

648648
ginkgo.By("adding a TCP service's NodePort")
649649
tcpService, err = tcpJig.UpdateService(ctx, func(s *v1.Service) {
650-
s.Spec.AllocateLoadBalancerNodePorts = utilpointer.BoolPtr(true)
650+
s.Spec.AllocateLoadBalancerNodePorts = ptr.To(true)
651651
})
652652
framework.ExpectNoError(err)
653653
tcpNodePort := int(tcpService.Spec.Ports[0].NodePort)
@@ -728,9 +728,9 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
728728
framework.Logf("Failed to connect to: %s %d", udpIngressIP, port)
729729
continue
730730
}
731-
conn.SetDeadline(time.Now().Add(3 * time.Second))
731+
_ = conn.SetDeadline(time.Now().Add(3 * time.Second))
732732
framework.Logf("Connected successfully to: %s", raddr.String())
733-
conn.Write([]byte("hostname\n"))
733+
_, _ = conn.Write([]byte("hostname\n"))
734734
buff := make([]byte, 1024)
735735
n, _, err := conn.ReadFrom(buff)
736736
if err == nil {
@@ -739,7 +739,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
739739
mu.Unlock()
740740
framework.Logf("Connected successfully to hostname: %s", string(buff[:n]))
741741
}
742-
conn.Close()
742+
_ = conn.Close()
743743
}
744744
}()
745745

@@ -760,7 +760,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
760760
// 30 seconds by default.
761761
// Based on the above check if the pod receives the traffic.
762762
ginkgo.By("checking client pod connected to the backend 1 on Node " + nodes.Items[0].Name)
763-
if err := wait.PollImmediate(1*time.Second, loadBalancerLagTimeout, func() (bool, error) {
763+
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, loadBalancerLagTimeout, true, func(ctx context.Context) (bool, error) {
764764
mu.Lock()
765765
defer mu.Unlock()
766766
return hostnames.Has(serverPod1.Spec.Hostname), nil
@@ -786,7 +786,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
786786
// Check that the second pod keeps receiving traffic
787787
// UDP conntrack entries timeout is 30 sec by default
788788
ginkgo.By("checking client pod connected to the backend 2 on Node " + nodes.Items[1].Name)
789-
if err := wait.PollImmediate(1*time.Second, loadBalancerLagTimeout, func() (bool, error) {
789+
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, loadBalancerLagTimeout, true, func(ctx context.Context) (bool, error) {
790790
mu.Lock()
791791
defer mu.Unlock()
792792
return hostnames.Has(serverPod2.Spec.Hostname), nil
@@ -860,9 +860,9 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
860860
framework.Logf("Failed to connect to: %s %d", udpIngressIP, port)
861861
continue
862862
}
863-
conn.SetDeadline(time.Now().Add(3 * time.Second))
863+
_ = conn.SetDeadline(time.Now().Add(3 * time.Second))
864864
framework.Logf("Connected successfully to: %s", raddr.String())
865-
conn.Write([]byte("hostname\n"))
865+
_, _ = conn.Write([]byte("hostname\n"))
866866
buff := make([]byte, 1024)
867867
n, _, err := conn.ReadFrom(buff)
868868
if err == nil {
@@ -871,7 +871,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
871871
mu.Unlock()
872872
framework.Logf("Connected successfully to hostname: %s", string(buff[:n]))
873873
}
874-
conn.Close()
874+
_ = conn.Close()
875875
}
876876
}()
877877

@@ -892,7 +892,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
892892
// 30 seconds by default.
893893
// Based on the above check if the pod receives the traffic.
894894
ginkgo.By("checking client pod connected to the backend 1 on Node " + nodes.Items[0].Name)
895-
if err := wait.PollImmediate(1*time.Second, loadBalancerLagTimeout, func() (bool, error) {
895+
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, loadBalancerLagTimeout, true, func(ctx context.Context) (bool, error) {
896896
mu.Lock()
897897
defer mu.Unlock()
898898
return hostnames.Has(serverPod1.Spec.Hostname), nil
@@ -918,7 +918,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
918918
// Check that the second pod keeps receiving traffic
919919
// UDP conntrack entries timeout is 30 sec by default
920920
ginkgo.By("checking client pod connected to the backend 2 on Node " + nodes.Items[0].Name)
921-
if err := wait.PollImmediate(1*time.Second, loadBalancerLagTimeout, func() (bool, error) {
921+
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, loadBalancerLagTimeout, true, func(ctx context.Context) (bool, error) {
922922
mu.Lock()
923923
defer mu.Unlock()
924924
return hostnames.Has(serverPod2.Spec.Hostname), nil
@@ -1181,7 +1181,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP", framework.WithSlow(), func() {
11811181
var srcIP string
11821182
loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, cs)
11831183
ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, pausePod.Name, pausePod.Spec.NodeName))
1184-
if pollErr := wait.PollImmediate(framework.Poll, loadBalancerPropagationTimeout, func() (bool, error) {
1184+
if pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, loadBalancerPropagationTimeout, true, func(ctx context.Context) (bool, error) {
11851185
stdout, err := e2eoutput.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd)
11861186
if err != nil {
11871187
framework.Logf("got err: %v, retry until timeout", err)
@@ -1270,7 +1270,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP", framework.WithSlow(), func() {
12701270
for nodeName, nodeIP := range endpointNodeMap {
12711271
ginkgo.By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIP))
12721272
var body string
1273-
pollFn := func() (bool, error) {
1273+
pollFn := func(ctx context.Context) (bool, error) {
12741274
// we expect connection failure here, but not other errors
12751275
resp, err := config.GetResponseFromTestContainer(ctx,
12761276
"http",
@@ -1288,7 +1288,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP", framework.WithSlow(), func() {
12881288
}
12891289
return false, nil
12901290
}
1291-
if pollErr := wait.PollImmediate(framework.Poll, e2eservice.TestTimeout, pollFn); pollErr != nil {
1291+
if pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, e2eservice.TestTimeout, true, pollFn); pollErr != nil {
12921292
framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s",
12931293
nodeName, healthCheckNodePort, body)
12941294
}
@@ -1297,7 +1297,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP", framework.WithSlow(), func() {
12971297
// Poll till kube-proxy re-adds the MASQUERADE rule on the node.
12981298
ginkgo.By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP))
12991299
var clientIP string
1300-
pollErr := wait.PollImmediate(framework.Poll, 3*e2eservice.KubeProxyLagTimeout, func() (bool, error) {
1300+
pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, 3*e2eservice.KubeProxyLagTimeout, true, func(ctx context.Context) (bool, error) {
13011301
clientIPPort, err := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, path)
13021302
if err != nil {
13031303
return false, nil
@@ -1336,7 +1336,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP", framework.WithSlow(), func() {
13361336
})
13371337
framework.ExpectNoError(err)
13381338
loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, cs)
1339-
pollErr = wait.PollImmediate(framework.PollShortTimeout, loadBalancerPropagationTimeout, func() (bool, error) {
1339+
pollErr = wait.PollUntilContextTimeout(ctx, framework.PollShortTimeout, loadBalancerPropagationTimeout, true, func(ctx context.Context) (bool, error) {
13401340
clientIPPort, err := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, path)
13411341
if err != nil {
13421342
return false, nil
@@ -1384,7 +1384,7 @@ func testRollingUpdateLBConnectivityDisruption(ctx context.Context, f *framework
13841384
},
13851385
}
13861386
ds.Spec.Template.Labels = labels
1387-
ds.Spec.Template.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(gracePeriod)
1387+
ds.Spec.Template.Spec.TerminationGracePeriodSeconds = ptr.To(gracePeriod)
13881388

13891389
nodeNames := e2edaemonset.SchedulableNodes(ctx, cs, ds)
13901390
e2eskipper.SkipUnlessAtLeast(len(nodeNames), 2, "load-balancer rolling update test requires at least 2 schedulable nodes for the DaemonSet")
@@ -1447,7 +1447,7 @@ func testRollingUpdateLBConnectivityDisruption(ctx context.Context, f *framework
14471447
atomic.AddUint64(&networkErrors, 1)
14481448
return
14491449
}
1450-
defer resp.Body.Close()
1450+
defer func() { _ = resp.Body.Close() }()
14511451
if resp.StatusCode != http.StatusOK {
14521452
framework.Logf("Got bad status code: %d", resp.StatusCode)
14531453
atomic.AddUint64(&httpErrors, 1)
@@ -1470,16 +1470,16 @@ func testRollingUpdateLBConnectivityDisruption(ctx context.Context, f *framework
14701470
ginkgo.By("Triggering DaemonSet rolling update several times")
14711471
var previousTotalRequests uint64 = 0
14721472
var previousNetworkErrors uint64 = 0
1473-
var previousHttpErrors uint64 = 0
1473+
var previousHTTPErrors uint64 = 0
14741474
for i := 1; i <= 5; i++ {
14751475
framework.Logf("Update daemon pods environment: [{\"name\":\"VERSION\",\"value\":\"%d\"}]", i)
14761476
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","env":[{"name":"VERSION","value":"%d"}]}]}}}}`, ds.Spec.Template.Spec.Containers[0].Name, i)
14771477
ds, err = cs.AppsV1().DaemonSets(ns).Patch(context.TODO(), name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
14781478
framework.ExpectNoError(err)
14791479

14801480
framework.Logf("Check that daemon pods are available on every node of the cluster with the updated environment.")
1481-
err = wait.PollImmediate(framework.Poll, creationTimeout, func() (bool, error) {
1482-
podList, err := cs.CoreV1().Pods(ds.Namespace).List(context.TODO(), metav1.ListOptions{})
1481+
err = wait.PollUntilContextTimeout(ctx, framework.Poll, creationTimeout, true, func(ctx context.Context) (bool, error) {
1482+
podList, err := cs.CoreV1().Pods(ds.Namespace).List(ctx, metav1.ListOptions{})
14831483
if err != nil {
14841484
return false, err
14851485
}
@@ -1517,24 +1517,24 @@ func testRollingUpdateLBConnectivityDisruption(ctx context.Context, f *framework
15171517
// assert that the HTTP requests success rate is above the acceptable threshold after this rolling update
15181518
currentTotalRequests := atomic.LoadUint64(&totalRequests)
15191519
currentNetworkErrors := atomic.LoadUint64(&networkErrors)
1520-
currentHttpErrors := atomic.LoadUint64(&httpErrors)
1520+
currentHTTPErrors := atomic.LoadUint64(&httpErrors)
15211521

15221522
partialTotalRequests := currentTotalRequests - previousTotalRequests
15231523
partialNetworkErrors := currentNetworkErrors - previousNetworkErrors
1524-
partialHttpErrors := currentHttpErrors - previousHttpErrors
1525-
partialSuccessRate := (float64(partialTotalRequests) - float64(partialNetworkErrors+partialHttpErrors)) / float64(partialTotalRequests)
1524+
partialHTTPErrors := currentHTTPErrors - previousHTTPErrors
1525+
partialSuccessRate := (float64(partialTotalRequests) - float64(partialNetworkErrors+partialHTTPErrors)) / float64(partialTotalRequests)
15261526

15271527
framework.Logf("Load Balancer total HTTP requests: %d", partialTotalRequests)
15281528
framework.Logf("Network errors: %d", partialNetworkErrors)
1529-
framework.Logf("HTTP errors: %d", partialHttpErrors)
1529+
framework.Logf("HTTP errors: %d", partialHTTPErrors)
15301530
framework.Logf("Success rate: %.2f%%", partialSuccessRate*100)
15311531
if partialSuccessRate < minSuccessRate {
15321532
framework.Failf("Encountered too many errors when doing HTTP requests to the load balancer address. Success rate is %.2f%%, and the minimum allowed threshold is %.2f%%.", partialSuccessRate*100, minSuccessRate*100)
15331533
}
15341534

15351535
previousTotalRequests = currentTotalRequests
15361536
previousNetworkErrors = currentNetworkErrors
1537-
previousHttpErrors = currentHttpErrors
1537+
previousHTTPErrors = currentHTTPErrors
15381538
}
15391539

15401540
// assert that the load balancer address is still reachable after the rolling updates are finished

0 commit comments

Comments
 (0)