Skip to content

Commit e62ce1c

Browse files
authored
Merge pull request kubernetes#129489 from elizabeth-dev/replace-network-e2e-replicationcontrollers
test(network): replace jig.CreateRC with jig.CreateDeployment
2 parents 2642d82 + 4724870 commit e62ce1c

File tree

14 files changed

+165
-144
lines changed

14 files changed

+165
-144
lines changed

test/e2e/apps/deployment.go

Lines changed: 28 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -737,9 +737,10 @@ func testRollingUpdateDeployment(ctx context.Context, f *framework.Framework) {
737737
ns := f.Namespace.Name
738738
c := f.ClientSet
739739
// Create webserver pods.
740-
deploymentPodLabels := map[string]string{"name": "sample-pod"}
740+
podName := "sample-pod"
741+
deploymentPodLabels := map[string]string{"name": podName}
741742
rsPodLabels := map[string]string{
742-
"name": "sample-pod",
743+
"name": podName,
743744
"pod": WebserverImageName,
744745
}
745746

@@ -754,7 +755,13 @@ func testRollingUpdateDeployment(ctx context.Context, f *framework.Framework) {
754755
_, err := c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{})
755756
framework.ExpectNoError(err)
756757
// Verify that the required pods have come up.
757-
err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
758+
err = e2epod.VerifyPodsRunning(ctx,
759+
c,
760+
ns,
761+
podName,
762+
labels.SelectorFromSet(map[string]string{"name": podName}),
763+
false,
764+
replicas)
758765
framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
759766

760767
// Create a deployment to delete webserver pods and instead bring up agnhost pods.
@@ -820,9 +827,10 @@ func testDeploymentCleanUpPolicy(ctx context.Context, f *framework.Framework) {
820827
ns := f.Namespace.Name
821828
c := f.ClientSet
822829
// Create webserver pods.
823-
deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
830+
podName := "cleanup-pod"
831+
deploymentPodLabels := map[string]string{"name": podName}
824832
rsPodLabels := map[string]string{
825-
"name": "cleanup-pod",
833+
"name": podName,
826834
"pod": WebserverImageName,
827835
}
828836
rsName := "test-cleanup-controller"
@@ -832,7 +840,13 @@ func testDeploymentCleanUpPolicy(ctx context.Context, f *framework.Framework) {
832840
framework.ExpectNoError(err)
833841

834842
// Verify that the required pods have come up.
835-
err = e2epod.VerifyPodsRunning(ctx, c, ns, "cleanup-pod", false, replicas)
843+
err = e2epod.VerifyPodsRunning(ctx,
844+
c,
845+
ns,
846+
podName,
847+
labels.SelectorFromSet(map[string]string{"name": podName}),
848+
false,
849+
replicas)
836850
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
837851

838852
// Create a deployment to delete webserver pods and instead bring up agnhost pods.
@@ -903,7 +917,13 @@ func testRolloverDeployment(ctx context.Context, f *framework.Framework) {
903917
_, err := c.AppsV1().ReplicaSets(ns).Create(ctx, newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{})
904918
framework.ExpectNoError(err)
905919
// Verify that the required pods have come up.
906-
err = e2epod.VerifyPodsRunning(ctx, c, ns, podName, false, rsReplicas)
920+
err = e2epod.VerifyPodsRunning(ctx,
921+
c,
922+
ns,
923+
podName,
924+
labels.SelectorFromSet(map[string]string{"name": podName}),
925+
false,
926+
rsReplicas)
907927
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
908928

909929
// Wait for replica set to become ready before adopting it.
@@ -1202,7 +1222,7 @@ func testProportionalScalingDeployment(ctx context.Context, f *framework.Framewo
12021222

12031223
// Verify that the required pods have come up.
12041224
framework.Logf("Waiting for all required pods to come up")
1205-
err = e2epod.VerifyPodsRunning(ctx, c, ns, WebserverImageName, false, *(deployment.Spec.Replicas))
1225+
err = e2epod.VerifyPodsRunning(ctx, c, ns, WebserverImageName, labels.SelectorFromSet(podLabels), false, *(deployment.Spec.Replicas))
12061226
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
12071227

12081228
framework.Logf("Waiting for deployment %q to complete", deployment.Name)

test/e2e/apps/disruption.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -494,7 +494,7 @@ var _ = SIGDescribe("DisruptionController", func() {
494494
waitForPdbToObserveHealthyPods(ctx, cs, ns, replicas, replicas-1)
495495
} else {
496496
ginkgo.By("Wait for pods to be running and not ready")
497-
err := e2epod.VerifyPodsRunning(ctx, cs, ns, rsName, false, replicas)
497+
err := e2epod.VerifyPodsRunning(ctx, cs, ns, rsName, labels.SelectorFromSet(rs.Labels), false, replicas)
498498
framework.ExpectNoError(err)
499499
waitForPdbToObserveHealthyPods(ctx, cs, ns, 0, replicas-1)
500500
}

test/e2e/apps/rc.go

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ import (
2828
apierrors "k8s.io/apimachinery/pkg/api/errors"
2929
"k8s.io/apimachinery/pkg/api/resource"
3030
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31+
"k8s.io/apimachinery/pkg/labels"
3132
"k8s.io/apimachinery/pkg/runtime/schema"
3233
"k8s.io/apimachinery/pkg/types"
3334
utilrand "k8s.io/apimachinery/pkg/util/rand"
@@ -485,21 +486,22 @@ func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageNa
485486
// The image serves its hostname which is checked for each replica.
486487
func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework.Framework, test string, image string) {
487488
name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
489+
rcLabels := map[string]string{"name": name}
488490
replicas := int32(1)
489491

490492
// Create a replication controller for a service
491493
// that serves its hostname.
492494
// The source for the Docker container kubernetes/serve_hostname is
493495
// in contrib/for-demos/serve_hostname
494496
ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
495-
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
497+
newRC := newRC(name, replicas, rcLabels, name, image, []string{"serve-hostname"})
496498
newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
497499
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, newRC, metav1.CreateOptions{})
498500
framework.ExpectNoError(err)
499501

500502
// Check that pods for the new RC were created.
501503
// TODO: Maybe switch PodsCreated to just check owner references.
502-
pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, name, replicas)
504+
pods, err := e2epod.PodsCreatedByLabel(ctx, f.ClientSet, f.Namespace.Name, name, replicas, labels.SelectorFromSet(rcLabels))
503505
framework.ExpectNoError(err)
504506

505507
// Wait for the pods to enter the running state and are Ready. Waiting loops until the pods
@@ -529,7 +531,7 @@ func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework
529531

530532
// Verify that something is listening.
531533
framework.Logf("Trying to dial the pod")
532-
framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, true, 2*time.Minute, pods))
534+
framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, labels.SelectorFromSet(rcLabels), true, 2*time.Minute, pods))
533535
}
534536

535537
// 1. Create a quota restricting pods in the current namespace to 2.
@@ -666,15 +668,16 @@ func testRCAdoptMatchingOrphans(ctx context.Context, f *framework.Framework) {
666668

667669
func testRCReleaseControlledNotMatching(ctx context.Context, f *framework.Framework) {
668670
name := "pod-release"
671+
rcLabels := map[string]string{"name": name}
669672
ginkgo.By("Given a ReplicationController is created")
670673
replicas := int32(1)
671-
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
672-
rcSt.Spec.Selector = map[string]string{"name": name}
674+
rcSt := newRC(name, replicas, rcLabels, name, WebserverImage, nil)
675+
rcSt.Spec.Selector = rcLabels
673676
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, rcSt, metav1.CreateOptions{})
674677
framework.ExpectNoError(err)
675678

676679
ginkgo.By("When the matched label of one of its pods change")
677-
pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rc.Name, replicas)
680+
pods, err := e2epod.PodsCreatedByLabel(ctx, f.ClientSet, f.Namespace.Name, rc.Name, replicas, labels.SelectorFromSet(rcLabels))
678681
framework.ExpectNoError(err)
679682

680683
p := pods.Items[0]

test/e2e/apps/replica_set.go

Lines changed: 22 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -183,20 +183,21 @@ var _ = SIGDescribe("ReplicaSet", func() {
183183
// image serves its hostname which is checked for each replica.
184184
func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework, test string, image string) {
185185
name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
186+
rsLabels := map[string]string{"name": name}
186187
replicas := int32(1)
187188

188189
// Create a ReplicaSet for a service that serves its hostname.
189190
// The source for the Docker container kubernetes/serve_hostname is
190191
// in contrib/for-demos/serve_hostname
191192
framework.Logf("Creating ReplicaSet %s", name)
192-
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
193+
newRS := newRS(name, replicas, rsLabels, name, image, []string{"serve-hostname"})
193194
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
194195
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(ctx, newRS, metav1.CreateOptions{})
195196
framework.ExpectNoError(err)
196197

197198
// Check that pods for the new RS were created.
198199
// TODO: Maybe switch PodsCreated to just check owner references.
199-
pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, name, replicas)
200+
pods, err := e2epod.PodsCreatedByLabel(ctx, f.ClientSet, f.Namespace.Name, name, replicas, labels.SelectorFromSet(rsLabels))
200201
framework.ExpectNoError(err)
201202

202203
// Wait for the pods to enter the running state. Waiting loops until the pods
@@ -226,7 +227,7 @@ func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework,
226227

227228
// Verify that something is listening.
228229
framework.Logf("Trying to dial the pod")
229-
framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, true, 2*time.Minute, pods))
230+
framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, labels.SelectorFromSet(rsLabels), true, 2*time.Minute, pods))
230231
}
231232

232233
// 1. Create a quota restricting pods in the current namespace to 2.
@@ -317,13 +318,12 @@ func testReplicaSetConditionCheck(ctx context.Context, f *framework.Framework) {
317318

318319
func testRSAdoptMatchingAndReleaseNotMatching(ctx context.Context, f *framework.Framework) {
319320
name := "pod-adoption-release"
321+
rsLabels := map[string]string{"name": name}
320322
ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
321323
p := e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{
322324
ObjectMeta: metav1.ObjectMeta{
323-
Name: name,
324-
Labels: map[string]string{
325-
"name": name,
326-
},
325+
Name: name,
326+
Labels: rsLabels,
327327
},
328328
Spec: v1.PodSpec{
329329
Containers: []v1.Container{
@@ -337,8 +337,8 @@ func testRSAdoptMatchingAndReleaseNotMatching(ctx context.Context, f *framework.
337337

338338
ginkgo.By("When a replicaset with a matching selector is created")
339339
replicas := int32(1)
340-
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
341-
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
340+
rsSt := newRS(name, replicas, rsLabels, name, WebserverImage, nil)
341+
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: rsLabels}
342342
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(ctx, rsSt, metav1.CreateOptions{})
343343
framework.ExpectNoError(err)
344344

@@ -362,7 +362,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(ctx context.Context, f *framework.
362362
framework.ExpectNoError(err)
363363

364364
ginkgo.By("When the matched label of one of its pods change")
365-
pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rs.Name, replicas)
365+
pods, err := e2epod.PodsCreatedByLabel(ctx, f.ClientSet, f.Namespace.Name, rs.Name, replicas, labels.SelectorFromSet(rsLabels))
366366
framework.ExpectNoError(err)
367367

368368
p = &pods.Items[0]
@@ -403,8 +403,9 @@ func testRSScaleSubresources(ctx context.Context, f *framework.Framework) {
403403
c := f.ClientSet
404404

405405
// Create webserver pods.
406+
podName := "sample-pod"
406407
rsPodLabels := map[string]string{
407-
"name": "sample-pod",
408+
"name": podName,
408409
"pod": WebserverImageName,
409410
}
410411

@@ -416,7 +417,7 @@ func testRSScaleSubresources(ctx context.Context, f *framework.Framework) {
416417
framework.ExpectNoError(err)
417418

418419
// Verify that the required pods have come up.
419-
err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
420+
err = e2epod.VerifyPodsRunning(ctx, c, ns, podName, labels.SelectorFromSet(map[string]string{"name": podName}), false, replicas)
420421
framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
421422

422423
ginkgo.By("getting scale subresource")
@@ -468,8 +469,9 @@ func testRSLifeCycle(ctx context.Context, f *framework.Framework) {
468469
zero := int64(0)
469470

470471
// Create webserver pods.
472+
podName := "sample-pod"
471473
rsPodLabels := map[string]string{
472-
"name": "sample-pod",
474+
"name": podName,
473475
"pod": WebserverImageName,
474476
}
475477

@@ -494,7 +496,7 @@ func testRSLifeCycle(ctx context.Context, f *framework.Framework) {
494496
framework.ExpectNoError(err)
495497

496498
// Verify that the required pods have come up.
497-
err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
499+
err = e2epod.VerifyPodsRunning(ctx, c, ns, podName, labels.SelectorFromSet(map[string]string{"name": podName}), false, replicas)
498500
framework.ExpectNoError(err, "Failed to create pods: %s", err)
499501

500502
// Scale the ReplicaSet
@@ -564,8 +566,9 @@ func listRSDeleteCollection(ctx context.Context, f *framework.Framework) {
564566
e2eValue := rand.String(5)
565567

566568
// Define ReplicaSet Labels
569+
podName := "sample-pod"
567570
rsPodLabels := map[string]string{
568-
"name": "sample-pod",
571+
"name": podName,
569572
"pod": WebserverImageName,
570573
"e2e": e2eValue,
571574
}
@@ -576,7 +579,7 @@ func listRSDeleteCollection(ctx context.Context, f *framework.Framework) {
576579
framework.ExpectNoError(err)
577580

578581
ginkgo.By("Verify that the required pods have come up")
579-
err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
582+
err = e2epod.VerifyPodsRunning(ctx, c, ns, podName, labels.SelectorFromSet(map[string]string{"name": podName}), false, replicas)
580583
framework.ExpectNoError(err, "Failed to create pods: %s", err)
581584
r, err := rsClient.Get(ctx, rsName, metav1.GetOptions{})
582585
framework.ExpectNoError(err, "failed to get ReplicaSets")
@@ -603,8 +606,9 @@ func testRSStatus(ctx context.Context, f *framework.Framework) {
603606
rsClient := c.AppsV1().ReplicaSets(ns)
604607

605608
// Define ReplicaSet Labels
609+
podName := "sample-pod"
606610
rsPodLabels := map[string]string{
607-
"name": "sample-pod",
611+
"name": podName,
608612
"pod": WebserverImageName,
609613
}
610614
labelSelector := labels.SelectorFromSet(rsPodLabels).String()
@@ -627,7 +631,7 @@ func testRSStatus(ctx context.Context, f *framework.Framework) {
627631
framework.ExpectNoError(err)
628632

629633
ginkgo.By("Verify that the required pods have come up.")
630-
err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
634+
err = e2epod.VerifyPodsRunning(ctx, c, ns, podName, labels.SelectorFromSet(map[string]string{"name": podName}), false, replicas)
631635
framework.ExpectNoError(err, "Failed to create pods: %s", err)
632636

633637
ginkgo.By("Getting /status")

test/e2e/cloud/gcp/ha_master.go

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ import (
2929
"github.com/onsi/ginkgo/v2"
3030
v1 "k8s.io/api/core/v1"
3131
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
32+
"k8s.io/apimachinery/pkg/labels"
3233
clientset "k8s.io/client-go/kubernetes"
3334
"k8s.io/kubernetes/test/e2e/common"
3435
"k8s.io/kubernetes/test/e2e/feature"
@@ -75,14 +76,14 @@ func removeWorkerNodes(zone string) error {
7576
return nil
7677
}
7778

78-
func verifyRCs(ctx context.Context, c clientset.Interface, ns string, names []string) {
79-
for _, name := range names {
80-
framework.ExpectNoError(e2epod.VerifyPods(ctx, c, ns, name, true, 1))
79+
func verifyRCs(ctx context.Context, c clientset.Interface, ns string, labelSets []map[string]string) {
80+
for _, rcLabels := range labelSets {
81+
framework.ExpectNoError(e2epod.VerifyPods(ctx, c, ns, labels.FormatLabels(rcLabels), labels.SelectorFromSet(rcLabels), true, 1))
8182
}
8283
}
8384

84-
func createNewRC(c clientset.Interface, ns string, name string) {
85-
_, err := common.NewRCByName(c, ns, name, 1, nil, nil)
85+
func createNewRC(c clientset.Interface, ns string, name string, rcLabels map[string]string) {
86+
_, err := common.NewRCByName(c, ns, name, 1, nil, nil, rcLabels)
8687
framework.ExpectNoError(err)
8788
}
8889

@@ -168,15 +169,15 @@ var _ = SIGDescribe("HA-master", feature.HAMaster, func() {
168169
var ns string
169170
var additionalReplicaZones []string
170171
var additionalNodesZones []string
171-
var existingRCs []string
172+
var existingRCLabelSets []map[string]string
172173

173174
ginkgo.BeforeEach(func(ctx context.Context) {
174175
e2eskipper.SkipUnlessProviderIs("gce")
175176
c = f.ClientSet
176177
ns = f.Namespace.Name
177178
framework.ExpectNoError(waitForMasters(ctx, framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute))
178179
additionalReplicaZones = make([]string, 0)
179-
existingRCs = make([]string, 0)
180+
existingRCLabelSets = make([]map[string]string, 0)
180181
})
181182

182183
ginkgo.AfterEach(func(ctx context.Context) {
@@ -222,10 +223,13 @@ var _ = SIGDescribe("HA-master", feature.HAMaster, func() {
222223
framework.ExpectNoError(e2enode.AllNodesReady(ctx, c, 5*time.Minute))
223224

224225
// Verify that API server works correctly with HA master.
225-
rcName := "ha-master-" + strconv.Itoa(len(existingRCs))
226-
createNewRC(c, ns, rcName)
227-
existingRCs = append(existingRCs, rcName)
228-
verifyRCs(ctx, c, ns, existingRCs)
226+
rcName := "ha-master-" + strconv.Itoa(len(existingRCLabelSets))
227+
rcLabels := map[string]string{"name": rcName}
228+
229+
createNewRC(c, ns, rcName, rcLabels)
230+
existingRCLabelSets = append(existingRCLabelSets, rcLabels)
231+
232+
verifyRCs(ctx, c, ns, existingRCLabelSets)
229233
}
230234

231235
f.It("survive addition/removal replicas same zone", f.WithSerial(), f.WithDisruptive(), func(ctx context.Context) {

0 commit comments

Comments
 (0)