Skip to content

Commit b681345

Browse files
committed
K8s rebase 1.33.3
> test/e2e go get k8s.io/[email protected] go get k8s.io/[email protected] go get k8s.io/[email protected] go get k8s.io/[email protected] go get k8s.io/[email protected] go get sigs.k8s.io/[email protected] go get k8s.io/[email protected] go get k8s.io/[email protected] go get k8s.io/[email protected] go get k8s.io/[email protected] go get k8s.io/[email protected] go get k8s.io/[email protected] go get k8s.io/[email protected] go get sigs.k8s.io/apiserver-network-proxy/[email protected] go mod tidy Test failure "non-constant format string in call to k8s.io/kubernetes/test/e2e/framework.Logf" in e2e.go, egress_services.go, egressip.go, reporter.go and service.go Remove fmt.Sprintf() used in YAML for objects Fix "non-constant format string in call to k8s.io/kubernetes/test/e2e/framework.Failf" in multicast.go Fix service tests failing on dualstack using endpointslices in test/e2e/e2e.go and test/e2e/service.go Expected endpoint number passed to WaitForServiceEndpointsNum() is based on stack of the cluster MetalLB test to use endpoint slices instead of subset and increase time in buildAndRunCommand() to 10s. Skip conformance tests in test/scripts/e2e-kind.sh - service cidr (not implemented) Signed-off-by: Arti Sood <[email protected]>
1 parent afad0c8 commit b681345

File tree

10 files changed

+322
-324
lines changed

10 files changed

+322
-324
lines changed

test/e2e/e2e.go

Lines changed: 32 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -671,7 +671,7 @@ func findOvnKubeControlPlaneNode(namespace, controlPlanePodName, leaseName strin
671671
framework.ExpectNoError(err, fmt.Sprintf("Unable to retrieve leases (%s)"+
672672
"from %s %v", leaseName, namespace, err))
673673

674-
framework.Logf(fmt.Sprintf("master instance of %s is running on node %s", controlPlanePodName, ovnkubeControlPlaneNode))
674+
framework.Logf("master instance of %s is running on node %s", controlPlanePodName, ovnkubeControlPlaneNode)
675675
// Strip leading and trailing quotes if present
676676
if ovnkubeControlPlaneNode[0] == '\'' || ovnkubeControlPlaneNode[0] == '"' {
677677
ovnkubeControlPlaneNode = ovnkubeControlPlaneNode[1 : len(ovnkubeControlPlaneNode)-1]
@@ -897,7 +897,7 @@ var _ = ginkgo.Describe("e2e control plane", func() {
897897
}
898898
}
899899

900-
framework.Logf(fmt.Sprintf("Killed all pods running on node %s", ovnKubeControlPlaneNode))
900+
framework.Logf("Killed all pods running on node %s", ovnKubeControlPlaneNode)
901901

902902
framework.ExpectNoError(<-errChan)
903903
})
@@ -1298,7 +1298,11 @@ var _ = ginkgo.Describe("e2e ingress traffic validation", func() {
12981298
framework.ExpectNoError(err)
12991299

13001300
ginkgo.By("Waiting for the endpoints to pop up")
1301-
err = framework.WaitForServiceEndpointsNum(context.TODO(), f.ClientSet, f.Namespace.Name, serviceName, len(endPoints), time.Second, wait.ForeverTestTimeout)
1301+
expectedEndpointsNum := len(endPoints)
1302+
if isDualStack {
1303+
expectedEndpointsNum = expectedEndpointsNum * 2
1304+
}
1305+
err = framework.WaitForServiceEndpointsNum(context.TODO(), f.ClientSet, f.Namespace.Name, serviceName, expectedEndpointsNum, time.Second, wait.ForeverTestTimeout)
13021306
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, f.Namespace.Name)
13031307

13041308
for _, protocol := range []string{"http", "udp"} {
@@ -1494,7 +1498,11 @@ var _ = ginkgo.Describe("e2e ingress traffic validation", func() {
14941498
framework.ExpectNoError(err)
14951499

14961500
ginkgo.By("Waiting for the endpoints to pop up")
1497-
err = framework.WaitForServiceEndpointsNum(context.TODO(), f.ClientSet, f.Namespace.Name, serviceName, len(endPoints), time.Second, wait.ForeverTestTimeout)
1501+
expectedEndpointsNum := len(endPoints)
1502+
if isDualStack {
1503+
expectedEndpointsNum = expectedEndpointsNum * 2
1504+
}
1505+
err = framework.WaitForServiceEndpointsNum(context.TODO(), f.ClientSet, f.Namespace.Name, serviceName, expectedEndpointsNum, time.Second, wait.ForeverTestTimeout)
14981506
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, f.Namespace.Name)
14991507

15001508
for _, protocol := range []string{"http", "udp"} {
@@ -1578,7 +1586,11 @@ var _ = ginkgo.Describe("e2e ingress traffic validation", func() {
15781586
framework.ExpectNoError(err)
15791587

15801588
ginkgo.By("Waiting for the endpoints to pop up")
1581-
err = framework.WaitForServiceEndpointsNum(context.TODO(), f.ClientSet, f.Namespace.Name, serviceName, len(endPoints), time.Second, wait.ForeverTestTimeout)
1589+
expectedEndpointsNum := len(endPoints)
1590+
if isDualStack {
1591+
expectedEndpointsNum = expectedEndpointsNum * 2
1592+
}
1593+
err = framework.WaitForServiceEndpointsNum(context.TODO(), f.ClientSet, f.Namespace.Name, serviceName, expectedEndpointsNum, time.Second, wait.ForeverTestTimeout)
15821594
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, f.Namespace.Name)
15831595

15841596
for _, externalAddress := range addresses {
@@ -1725,7 +1737,11 @@ var _ = ginkgo.Describe("e2e ingress traffic validation", func() {
17251737
framework.ExpectNoError(err)
17261738

17271739
ginkgo.By("Waiting for the endpoints to pop up")
1728-
err = framework.WaitForServiceEndpointsNum(context.TODO(), f.ClientSet, f.Namespace.Name, serviceName, len(endPoints), time.Second, wait.ForeverTestTimeout)
1740+
expectedEndpointsNum := len(endPoints)
1741+
if isDualStack {
1742+
expectedEndpointsNum = expectedEndpointsNum * 2
1743+
}
1744+
err = framework.WaitForServiceEndpointsNum(context.TODO(), f.ClientSet, f.Namespace.Name, serviceName, expectedEndpointsNum, time.Second, wait.ForeverTestTimeout)
17291745
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, f.Namespace.Name)
17301746

17311747
for _, protocol := range []string{"http", "udp"} {
@@ -1773,6 +1789,7 @@ var _ = ginkgo.Describe("e2e ingress to host-networked pods traffic validation",
17731789
maxTries := 0
17741790
var nodes *v1.NodeList
17751791
var providerCtx infraapi.Context
1792+
var isDualStack bool
17761793

17771794
ginkgo.BeforeEach(func() {
17781795
providerCtx = infraprovider.Get().NewTestContext()
@@ -1803,6 +1820,8 @@ var _ = ginkgo.Describe("e2e ingress to host-networked pods traffic validation",
18031820
len(nodes.Items))
18041821
}
18051822

1823+
isDualStack = isDualStackCluster(nodes)
1824+
18061825
ginkgo.By("Creating the endpoints pod, one for each worker")
18071826
for _, node := range nodes.Items {
18081827
// this create a udp / http netexec listener which is able to receive the "hostname"
@@ -1855,7 +1874,11 @@ var _ = ginkgo.Describe("e2e ingress to host-networked pods traffic validation",
18551874
nodeTCPPort, nodeUDPPort := nodePortsFromService(np)
18561875

18571876
ginkgo.By("Waiting for the endpoints to pop up")
1858-
err = framework.WaitForServiceEndpointsNum(context.TODO(), f.ClientSet, f.Namespace.Name, serviceName, len(endPoints), time.Second, wait.ForeverTestTimeout)
1877+
expectedEndpointsNum := len(endPoints)
1878+
if isDualStack {
1879+
expectedEndpointsNum = expectedEndpointsNum * 2
1880+
}
1881+
err = framework.WaitForServiceEndpointsNum(context.TODO(), f.ClientSet, f.Namespace.Name, serviceName, expectedEndpointsNum, time.Second, wait.ForeverTestTimeout)
18591882
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, f.Namespace.Name)
18601883

18611884
for _, protocol := range []string{"http", "udp"} {
@@ -2229,7 +2252,7 @@ var _ = ginkgo.Describe("e2e delete databases", func() {
22292252
for {
22302253
select {
22312254
case msg := <-syncChan:
2232-
framework.Logf(msg + ": finish connectivity test.")
2255+
framework.Logf("%s: finish connectivity test.", msg)
22332256
break L
22342257
default:
22352258
stdout, err := e2ekubectl.RunKubectl(f.Namespace.Name, "exec", pod1Name, "--", "curl", fmt.Sprintf("%s/hostname",
@@ -2279,7 +2302,7 @@ var _ = ginkgo.Describe("e2e delete databases", func() {
22792302
select {
22802303
case msg := <-syncChan:
22812304
// wait for the connectivity test pods to be ready
2282-
framework.Logf(msg + ": delete and restart db pods.")
2305+
framework.Logf("%s: delete and restart db pods.", msg)
22832306
case err := <-errChan:
22842307
// fail if error is returned before test pods are ready
22852308
framework.Fail(err.Error())

test/e2e/egress_services.go

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -115,15 +115,15 @@ var _ = ginkgo.Describe("EgressService", feature.EgressService, func() {
115115
framework.ExpectNoError(err, "failed to create backend pods")
116116

117117
ginkgo.By("Creating an egress service without node selectors")
118-
egressServiceConfig := fmt.Sprintf(`
118+
egressServiceConfig := `
119119
apiVersion: k8s.ovn.org/v1
120120
kind: EgressService
121121
metadata:
122122
name: ` + serviceName + `
123123
namespace: ` + f.Namespace.Name + `
124124
spec:
125125
sourceIPBy: "LoadBalancerIP"
126-
`)
126+
`
127127

128128
if err := os.WriteFile(egressServiceYAML, []byte(egressServiceConfig), 0644); err != nil {
129129
framework.Failf("Unable to write CRD config to disk: %v", err)
@@ -167,7 +167,7 @@ spec:
167167
setBlackholeRoutingTableOnNodes(providerCtx, nodes, externalContainer, blackholeRoutingTable, protocol == v1.IPv4Protocol)
168168

169169
ginkgo.By("Updating the resource to contain a Network")
170-
egressServiceConfig = fmt.Sprintf(`
170+
egressServiceConfig = `
171171
apiVersion: k8s.ovn.org/v1
172172
kind: EgressService
173173
metadata:
@@ -176,7 +176,7 @@ metadata:
176176
spec:
177177
sourceIPBy: "LoadBalancerIP"
178178
network: "100"
179-
`)
179+
`
180180
if err := os.WriteFile(egressServiceYAML, []byte(egressServiceConfig), 0644); err != nil {
181181
framework.Failf("Unable to write CRD config to disk: %v", err)
182182
}
@@ -257,7 +257,7 @@ spec:
257257
framework.ExpectNoError(err, "failed to create backend pods")
258258

259259
ginkgo.By("Creating an egress service with custom network without SNAT")
260-
egressServiceConfig := fmt.Sprintf(`
260+
egressServiceConfig := `
261261
apiVersion: k8s.ovn.org/v1
262262
kind: EgressService
263263
metadata:
@@ -266,7 +266,7 @@ metadata:
266266
spec:
267267
sourceIPBy: "Network"
268268
network: "100"
269-
`)
269+
`
270270

271271
if err := os.WriteFile(egressServiceYAML, []byte(egressServiceConfig), 0644); err != nil {
272272
framework.Failf("Unable to write CRD config to disk: %v", err)
@@ -381,15 +381,15 @@ spec:
381381
framework.ExpectNoError(err, "failed to create backend pods")
382382

383383
ginkgo.By("Creating an egress service without node selectors")
384-
egressServiceConfig := fmt.Sprintf(`
384+
egressServiceConfig := `
385385
apiVersion: k8s.ovn.org/v1
386386
kind: EgressService
387387
metadata:
388388
name: ` + serviceName + `
389389
namespace: ` + f.Namespace.Name + `
390390
spec:
391391
sourceIPBy: "LoadBalancerIP"
392-
`)
392+
`
393393

394394
if err := os.WriteFile(egressServiceYAML, []byte(egressServiceConfig), 0644); err != nil {
395395
framework.Failf("Unable to write CRD config to disk: %v", err)
@@ -494,7 +494,7 @@ spec:
494494

495495
ginkgo.By("Creating an egress service selecting the first node")
496496
firstNode := nodes[0].Name
497-
egressServiceConfig := fmt.Sprintf(`
497+
egressServiceConfig := `
498498
apiVersion: k8s.ovn.org/v1
499499
kind: EgressService
500500
metadata:
@@ -505,7 +505,7 @@ spec:
505505
nodeSelector:
506506
matchLabels:
507507
kubernetes.io/hostname: ` + firstNode + `
508-
`)
508+
`
509509

510510
if err := os.WriteFile(egressServiceYAML, []byte(egressServiceConfig), 0644); err != nil {
511511
framework.Failf("Unable to write CRD config to disk: %v", err)
@@ -547,7 +547,7 @@ spec:
547547

548548
ginkgo.By("Updating the egress service to select the second node")
549549
secondNode := nodes[1].Name
550-
egressServiceConfig = fmt.Sprintf(`
550+
egressServiceConfig = `
551551
apiVersion: k8s.ovn.org/v1
552552
kind: EgressService
553553
metadata:
@@ -558,7 +558,7 @@ spec:
558558
nodeSelector:
559559
matchLabels:
560560
kubernetes.io/hostname: ` + secondNode + `
561-
`)
561+
`
562562
if err := os.WriteFile(egressServiceYAML, []byte(egressServiceConfig), 0644); err != nil {
563563
framework.Failf("Unable to write CRD config to disk: %v", err)
564564
}
@@ -594,7 +594,7 @@ spec:
594594
reachAllServiceBackendsFromExternalContainer(externalContainer, svcIP, podHTTPPort, pods)
595595

596596
ginkgo.By("Updating the egress service selector to match no node")
597-
egressServiceConfig = fmt.Sprintf(`
597+
egressServiceConfig = `
598598
apiVersion: k8s.ovn.org/v1
599599
kind: EgressService
600600
metadata:
@@ -605,7 +605,7 @@ spec:
605605
nodeSelector:
606606
matchLabels:
607607
perfect: match
608-
`)
608+
`
609609
if err := os.WriteFile(egressServiceYAML, []byte(egressServiceConfig), 0644); err != nil {
610610
framework.Failf("Unable to write CRD config to disk: %v", err)
611611
}
@@ -717,7 +717,7 @@ spec:
717717
framework.ExpectNoError(err, "failed to create backend pods")
718718

719719
ginkgo.By("Creating an egress service with node selector")
720-
egressServiceConfig := fmt.Sprintf(`
720+
egressServiceConfig := `
721721
apiVersion: k8s.ovn.org/v1
722722
kind: EgressService
723723
metadata:
@@ -727,7 +727,7 @@ spec:
727727
sourceIPBy: "LoadBalancerIP"
728728
nodeSelector:
729729
matchLabels:
730-
kubernetes.io/hostname: ` + nodes[1].Name)
730+
kubernetes.io/hostname: ` + nodes[1].Name
731731

732732
if err := os.WriteFile(egressServiceYAML, []byte(egressServiceConfig), 0644); err != nil {
733733
framework.Failf("Unable to write CRD config to disk: %v", err)
@@ -764,7 +764,7 @@ spec:
764764
}
765765
framework.ExpectNoError(err, "must allocate new primary network IP address")
766766
egressIPYaml := "egressip.yaml"
767-
egressIPConfig := fmt.Sprintf(`apiVersion: k8s.ovn.org/v1
767+
egressIPConfig := `apiVersion: k8s.ovn.org/v1
768768
kind: EgressIP
769769
metadata:
770770
name: egress-svc-test-eip
@@ -777,7 +777,7 @@ spec:
777777
namespaceSelector:
778778
matchLabels:
779779
kubernetes.io/metadata.name: ` + f.Namespace.Name + `
780-
`)
780+
`
781781

782782
if err := os.WriteFile(egressIPYaml, []byte(egressIPConfig), 0644); err != nil {
783783
framework.Failf("Unable to write CRD config to disk: %v", err)
@@ -882,7 +882,7 @@ spec:
882882
framework.ExpectNoError(err, "failed to create backend pods")
883883

884884
ginkgo.By("Creating an ETP=Local egress service selecting the first node")
885-
egressServiceConfig := fmt.Sprintf(`
885+
egressServiceConfig := `
886886
apiVersion: k8s.ovn.org/v1
887887
kind: EgressService
888888
metadata:
@@ -893,7 +893,7 @@ spec:
893893
nodeSelector:
894894
matchLabels:
895895
kubernetes.io/hostname: ` + firstNode + `
896-
`)
896+
`
897897

898898
if err := os.WriteFile(egressServiceYAML, []byte(egressServiceConfig), 0644); err != nil {
899899
framework.Failf("Unable to write CRD config to disk: %v", err)
@@ -1304,7 +1304,7 @@ metadata:
13041304

13051305
ginkgo.By("Creating the EgressServices for the networks")
13061306
for _, net := range []*netSettings{net1, net2} {
1307-
egressServiceConfig := fmt.Sprintf(`
1307+
egressServiceConfig := `
13081308
apiVersion: k8s.ovn.org/v1
13091309
kind: EgressService
13101310
metadata:
@@ -1313,7 +1313,7 @@ metadata:
13131313
spec:
13141314
sourceIPBy: "Network"
13151315
network: ` + fmt.Sprintf("\"%s\"", net.routingTable) + `
1316-
`)
1316+
`
13171317

13181318
if err := os.WriteFile(net.egressServiceYAML, []byte(egressServiceConfig), 0644); err != nil {
13191319
framework.Failf("Unable to write CRD config to disk: %v", err)

0 commit comments

Comments
 (0)