Skip to content

Commit 78f7217

Browse files
authored
Merge pull request kubernetes#130082 from elizabeth-dev/replace-network-e2e-replicationcontrollers-4
test(network): replace RCs with Deployments in util function jig.Run
2 parents 1d73c5d + 0c68aa6 commit 78f7217

File tree

4 files changed

+66
-104
lines changed

4 files changed

+66
-104
lines changed

test/e2e/framework/service/jig.go

Lines changed: 48 additions & 88 deletions
Original file line numberDiff line numberDiff line change
@@ -28,26 +28,27 @@ import (
2828
"time"
2929

3030
"github.com/onsi/ginkgo/v2"
31+
appsv1 "k8s.io/api/apps/v1"
3132
v1 "k8s.io/api/core/v1"
3233
discoveryv1 "k8s.io/api/discovery/v1"
3334
policyv1 "k8s.io/api/policy/v1"
3435
apierrors "k8s.io/apimachinery/pkg/api/errors"
3536
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
36-
"k8s.io/apimachinery/pkg/labels"
3737
"k8s.io/apimachinery/pkg/util/intstr"
3838
utilnet "k8s.io/apimachinery/pkg/util/net"
3939
"k8s.io/apimachinery/pkg/util/sets"
4040
"k8s.io/apimachinery/pkg/util/uuid"
4141
"k8s.io/apimachinery/pkg/util/wait"
4242
clientset "k8s.io/client-go/kubernetes"
4343
"k8s.io/kubernetes/test/e2e/framework"
44+
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
4445
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
45-
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
4646
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
4747
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
4848
testutils "k8s.io/kubernetes/test/utils"
4949
imageutils "k8s.io/kubernetes/test/utils/image"
5050
netutils "k8s.io/utils/net"
51+
"k8s.io/utils/ptr"
5152
)
5253

5354
// NodePortRange should match whatever the default/configured range is
@@ -655,23 +656,25 @@ func (j *TestJig) waitForCondition(ctx context.Context, timeout time.Duration, m
655656
return service, nil
656657
}
657658

658-
// newRCTemplate returns the default v1.ReplicationController object for
659-
// this j, but does not actually create the RC. The default RC has the same
659+
// newDeploymentTemplate returns the default appsv1.Deployment object for
660+
// this j, but does not actually create the Deployment. The default Deployment has the same
660661
// name as the j and runs the "netexec" container.
661-
func (j *TestJig) newRCTemplate() *v1.ReplicationController {
662+
func (j *TestJig) newDeploymentTemplate() *appsv1.Deployment {
662663
var replicas int32 = 1
663664
var grace int64 = 3 // so we don't race with kube-proxy when scaling up/down
664665

665-
rc := &v1.ReplicationController{
666+
deployment := &appsv1.Deployment{
666667
ObjectMeta: metav1.ObjectMeta{
667668
Namespace: j.Namespace,
668669
Name: j.Name,
669670
Labels: j.Labels,
670671
},
671-
Spec: v1.ReplicationControllerSpec{
672+
Spec: appsv1.DeploymentSpec{
672673
Replicas: &replicas,
673-
Selector: j.Labels,
674-
Template: &v1.PodTemplateSpec{
674+
Selector: &metav1.LabelSelector{
675+
MatchLabels: j.Labels,
676+
},
677+
Template: v1.PodTemplateSpec{
675678
ObjectMeta: metav1.ObjectMeta{
676679
Labels: j.Labels,
677680
},
@@ -697,32 +700,32 @@ func (j *TestJig) newRCTemplate() *v1.ReplicationController {
697700
},
698701
},
699702
}
700-
return rc
703+
return deployment
701704
}
702705

703-
// AddRCAntiAffinity adds AntiAffinity to the given ReplicationController.
704-
func (j *TestJig) AddRCAntiAffinity(rc *v1.ReplicationController) {
706+
// AddDeploymentAntiAffinity adds AntiAffinity to the given Deployment.
707+
func (j *TestJig) AddDeploymentAntiAffinity(deployment *appsv1.Deployment) {
705708
var replicas int32 = 2
706709

707-
rc.Spec.Replicas = &replicas
708-
if rc.Spec.Template.Spec.Affinity == nil {
709-
rc.Spec.Template.Spec.Affinity = &v1.Affinity{}
710+
deployment.Spec.Replicas = &replicas
711+
if deployment.Spec.Template.Spec.Affinity == nil {
712+
deployment.Spec.Template.Spec.Affinity = &v1.Affinity{}
710713
}
711-
if rc.Spec.Template.Spec.Affinity.PodAntiAffinity == nil {
712-
rc.Spec.Template.Spec.Affinity.PodAntiAffinity = &v1.PodAntiAffinity{}
714+
if deployment.Spec.Template.Spec.Affinity.PodAntiAffinity == nil {
715+
deployment.Spec.Template.Spec.Affinity.PodAntiAffinity = &v1.PodAntiAffinity{}
713716
}
714-
rc.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
715-
rc.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
717+
deployment.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
718+
deployment.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
716719
v1.PodAffinityTerm{
717720
LabelSelector: &metav1.LabelSelector{MatchLabels: j.Labels},
718721
Namespaces: nil,
719722
TopologyKey: "kubernetes.io/hostname",
720723
})
721724
}
722725

723-
// CreatePDB returns a PodDisruptionBudget for the given ReplicationController, or returns an error if a PodDisruptionBudget isn't ready
724-
func (j *TestJig) CreatePDB(ctx context.Context, rc *v1.ReplicationController) (*policyv1.PodDisruptionBudget, error) {
725-
pdb := j.newPDBTemplate(rc)
726+
// CreatePDB returns a PodDisruptionBudget for the given Deployment, or returns an error if a PodDisruptionBudget isn't ready
727+
func (j *TestJig) CreatePDB(ctx context.Context, deployment *appsv1.Deployment) (*policyv1.PodDisruptionBudget, error) {
728+
pdb := j.newPDBTemplate(deployment)
726729
newPdb, err := j.Client.PolicyV1().PodDisruptionBudgets(j.Namespace).Create(ctx, pdb, metav1.CreateOptions{})
727730
if err != nil {
728731
return nil, fmt.Errorf("failed to create PDB %q %v", pdb.Name, err)
@@ -736,8 +739,8 @@ func (j *TestJig) CreatePDB(ctx context.Context, rc *v1.ReplicationController) (
736739

737740
// newPDBTemplate returns the default policyv1.PodDisruptionBudget object for
738741
// this j, but does not actually create the PDB. The default PDB specifies a
739-
// MinAvailable of N-1 and matches the pods created by the RC.
740-
func (j *TestJig) newPDBTemplate(rc *v1.ReplicationController) *policyv1.PodDisruptionBudget {
742+
// MinAvailable of N-1 and matches the pods created by the Deployment.
743+
func (j *TestJig) newPDBTemplate(rc *appsv1.Deployment) *policyv1.PodDisruptionBudget {
741744
minAvailable := intstr.FromInt32(*rc.Spec.Replicas - 1)
742745

743746
pdb := &policyv1.PodDisruptionBudget{
@@ -755,49 +758,43 @@ func (j *TestJig) newPDBTemplate(rc *v1.ReplicationController) *policyv1.PodDisr
755758
return pdb
756759
}
757760

758-
// Run creates a ReplicationController and Pod(s) and waits for the
759-
// Pod(s) to be running. Callers can provide a function to tweak the RC object
761+
// Run creates a Deployment and Pod(s) and waits for the
762+
// Pod(s) to be running. Callers can provide a function to tweak the Deployment object
760763
// before it is created.
761-
func (j *TestJig) Run(ctx context.Context, tweak func(rc *v1.ReplicationController)) (*v1.ReplicationController, error) {
762-
rc := j.newRCTemplate()
764+
func (j *TestJig) Run(ctx context.Context, tweak func(rc *appsv1.Deployment)) (*appsv1.Deployment, error) {
765+
deployment := j.newDeploymentTemplate()
763766
if tweak != nil {
764-
tweak(rc)
767+
tweak(deployment)
765768
}
766-
result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(ctx, rc, metav1.CreateOptions{})
769+
770+
result, err := j.Client.AppsV1().Deployments(j.Namespace).Create(ctx, deployment, metav1.CreateOptions{})
767771
if err != nil {
768-
return nil, fmt.Errorf("failed to create RC %q: %w", rc.Name, err)
772+
return nil, fmt.Errorf("failed to create Deployment %q: %w", deployment.Name, err)
769773
}
770-
pods, err := j.waitForPodsCreated(ctx, int(*(rc.Spec.Replicas)))
774+
775+
err = e2edeployment.WaitForDeploymentComplete(j.Client, result)
771776
if err != nil {
772-
return nil, fmt.Errorf("failed to create pods: %w", err)
773-
}
774-
if err := j.waitForPodsReady(ctx, pods); err != nil {
775-
return nil, fmt.Errorf("failed waiting for pods to be running: %w", err)
777+
return nil, fmt.Errorf("failed waiting for Deployment %q: %w", deployment.Name, err)
776778
}
779+
777780
return result, nil
778781
}
779782

780783
// Scale scales pods to the given replicas
781-
func (j *TestJig) Scale(ctx context.Context, replicas int) error {
782-
rc := j.Name
783-
scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(ctx, rc, metav1.GetOptions{})
784+
func (j *TestJig) Scale(replicas int) error {
785+
deployment, err := e2edeployment.UpdateDeploymentWithRetries(j.Client, j.Namespace, j.Name, func(deployment *appsv1.Deployment) {
786+
deployment.Spec.Replicas = ptr.To(int32(replicas))
787+
})
784788
if err != nil {
785-
return fmt.Errorf("failed to get scale for RC %q: %w", rc, err)
789+
return fmt.Errorf("failed to scale Deployment %q: %w", j.Name, err)
786790
}
787791

788-
scale.ResourceVersion = "" // indicate the scale update should be unconditional
789-
scale.Spec.Replicas = int32(replicas)
790-
_, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(ctx, rc, scale, metav1.UpdateOptions{})
791-
if err != nil {
792-
return fmt.Errorf("failed to scale RC %q: %w", rc, err)
793-
}
794-
pods, err := j.waitForPodsCreated(ctx, replicas)
792+
err = e2edeployment.WaitForDeploymentComplete(j.Client, deployment)
793+
795794
if err != nil {
796-
return fmt.Errorf("failed waiting for pods: %w", err)
797-
}
798-
if err := j.waitForPodsReady(ctx, pods); err != nil {
799-
return fmt.Errorf("failed waiting for pods to be running: %w", err)
795+
return fmt.Errorf("failed waiting for Deployment %q: %w", j.Name, err)
800796
}
797+
801798
return nil
802799
}
803800

@@ -816,43 +813,6 @@ func (j *TestJig) waitForPdbReady(ctx context.Context) error {
816813
return fmt.Errorf("timeout waiting for PDB %q to be ready", j.Name)
817814
}
818815

819-
func (j *TestJig) waitForPodsCreated(ctx context.Context, replicas int) ([]string, error) {
820-
// TODO (pohly): replace with gomega.Eventually
821-
timeout := 2 * time.Minute
822-
// List the pods, making sure we observe all the replicas.
823-
label := labels.SelectorFromSet(labels.Set(j.Labels))
824-
framework.Logf("Waiting up to %v for %d pods to be created", timeout, replicas)
825-
for start := time.Now(); time.Since(start) < timeout && ctx.Err() == nil; time.Sleep(2 * time.Second) {
826-
options := metav1.ListOptions{LabelSelector: label.String()}
827-
pods, err := j.Client.CoreV1().Pods(j.Namespace).List(ctx, options)
828-
if err != nil {
829-
return nil, err
830-
}
831-
832-
found := []string{}
833-
for _, pod := range pods.Items {
834-
if pod.DeletionTimestamp != nil {
835-
continue
836-
}
837-
found = append(found, pod.Name)
838-
}
839-
if len(found) == replicas {
840-
framework.Logf("Found all %d pods", replicas)
841-
return found, nil
842-
}
843-
framework.Logf("Found %d/%d pods - will retry", len(found), replicas)
844-
}
845-
return nil, fmt.Errorf("timeout waiting for %d pods to be created", replicas)
846-
}
847-
848-
func (j *TestJig) waitForPodsReady(ctx context.Context, pods []string) error {
849-
timeout := 2 * time.Minute
850-
if !e2epod.CheckPodsRunningReady(ctx, j.Client, j.Namespace, pods, timeout) {
851-
return fmt.Errorf("timeout waiting for %d pods to be ready", len(pods))
852-
}
853-
return nil
854-
}
855-
856816
func testReachabilityOverServiceName(ctx context.Context, serviceName string, sp v1.ServicePort, execPod *v1.Pod) error {
857817
return testEndpointReachability(ctx, serviceName, sp.Port, sp.Protocol, execPod)
858818
}

test/e2e/network/loadbalancer.go

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,6 @@ import (
4747
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
4848
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
4949
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
50-
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
5150
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
5251
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
5352
"k8s.io/kubernetes/test/e2e/network/common"
@@ -236,14 +235,14 @@ var _ = common.SIGDescribe("LoadBalancers", feature.LoadBalancer, func() {
236235
e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout)
237236

238237
ginkgo.By("Scaling the pods to 0")
239-
err = tcpJig.Scale(ctx, 0)
238+
err = tcpJig.Scale(0)
240239
framework.ExpectNoError(err)
241240

242241
ginkgo.By("hitting the TCP service's LoadBalancer with no backends, no answer expected")
243242
testNotReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout)
244243

245244
ginkgo.By("Scaling the pods to 1")
246-
err = tcpJig.Scale(ctx, 1)
245+
err = tcpJig.Scale(1)
247246
framework.ExpectNoError(err)
248247

249248
ginkgo.By("hitting the TCP service's LoadBalancer")
@@ -384,14 +383,14 @@ var _ = common.SIGDescribe("LoadBalancers", feature.LoadBalancer, func() {
384383
testReachableUDP(ctx, udpIngressIP, svcPort, loadBalancerCreateTimeout)
385384

386385
ginkgo.By("Scaling the pods to 0")
387-
err = udpJig.Scale(ctx, 0)
386+
err = udpJig.Scale(0)
388387
framework.ExpectNoError(err)
389388

390389
ginkgo.By("checking that the UDP service's LoadBalancer is not reachable")
391390
testNotReachableUDP(ctx, udpIngressIP, svcPort, loadBalancerCreateTimeout)
392391

393392
ginkgo.By("Scaling the pods to 1")
394-
err = udpJig.Scale(ctx, 1)
393+
err = udpJig.Scale(1)
395394
framework.ExpectNoError(err)
396395

397396
ginkgo.By("hitting the UDP service's NodePort")
@@ -1115,10 +1114,10 @@ var _ = common.SIGDescribe("LoadBalancers ExternalTrafficPolicy: Local", feature
11151114
endpointNodeName := nodes.Items[i].Name
11161115

11171116
ginkgo.By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName)
1118-
_, err = jig.Run(ctx, func(rc *v1.ReplicationController) {
1119-
rc.Name = serviceName
1117+
_, err = jig.Run(ctx, func(deployment *appsv1.Deployment) {
1118+
deployment.Name = serviceName
11201119
if endpointNodeName != "" {
1121-
rc.Spec.Template.Spec.NodeName = endpointNodeName
1120+
deployment.Spec.Template.Spec.NodeName = endpointNodeName
11221121
}
11231122
})
11241123
framework.ExpectNoError(err)
@@ -1146,7 +1145,9 @@ var _ = common.SIGDescribe("LoadBalancers ExternalTrafficPolicy: Local", feature
11461145
threshold)
11471146
framework.ExpectNoError(err)
11481147
}
1149-
framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, namespace, serviceName))
1148+
1149+
err = f.ClientSet.AppsV1().Deployments(namespace).Delete(ctx, serviceName, metav1.DeleteOptions{})
1150+
framework.ExpectNoError(err)
11501151
}
11511152
})
11521153

@@ -1172,9 +1173,9 @@ var _ = common.SIGDescribe("LoadBalancers ExternalTrafficPolicy: Local", feature
11721173
framework.Logf("ingress is %s:%d", ingress, svcPort)
11731174

11741175
ginkgo.By("creating endpoints on multiple nodes")
1175-
_, err = jig.Run(ctx, func(rc *v1.ReplicationController) {
1176-
rc.Spec.Replicas = ptr.To[int32](2)
1177-
rc.Spec.Template.Spec.Affinity = &v1.Affinity{
1176+
_, err = jig.Run(ctx, func(deployment *appsv1.Deployment) {
1177+
deployment.Spec.Replicas = ptr.To[int32](2)
1178+
deployment.Spec.Template.Spec.Affinity = &v1.Affinity{
11781179
PodAntiAffinity: &v1.PodAntiAffinity{
11791180
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
11801181
{

test/e2e/upgrades/network/services.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -62,11 +62,11 @@ func (t *ServiceUpgradeTest) Setup(ctx context.Context, f *framework.Framework)
6262
svcPort := int(tcpService.Spec.Ports[0].Port)
6363

6464
ginkgo.By("creating pod to be part of service " + serviceName)
65-
rc, err := jig.Run(ctx, jig.AddRCAntiAffinity)
65+
deployment, err := jig.Run(ctx, jig.AddDeploymentAntiAffinity)
6666
framework.ExpectNoError(err)
6767

68-
ginkgo.By("creating a PodDisruptionBudget to cover the ReplicationController")
69-
_, err = jig.CreatePDB(ctx, rc)
68+
ginkgo.By("creating a PodDisruptionBudget to cover the Deployment")
69+
_, err = jig.CreatePDB(ctx, deployment)
7070
framework.ExpectNoError(err)
7171

7272
// Hit it once before considering ourselves ready

test/e2e/windows/service.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ package windows
1919
import (
2020
"context"
2121
"fmt"
22+
appsv1 "k8s.io/api/apps/v1"
2223
"net"
2324
"strconv"
2425

@@ -66,7 +67,7 @@ var _ = sigDescribe("Services", skipUnlessWindows(func() {
6667

6768
ginkgo.By("creating Pod to be part of service " + serviceName)
6869
// tweak the Jig to use windows...
69-
windowsNodeSelectorTweak := func(rc *v1.ReplicationController) {
70+
windowsNodeSelectorTweak := func(rc *appsv1.Deployment) {
7071
rc.Spec.Template.Spec.NodeSelector = map[string]string{
7172
"kubernetes.io/os": "windows",
7273
}

0 commit comments

Comments
 (0)