Skip to content

Commit 0c68aa6

Browse files
committed
test(network): fix jig deployment scaling
1 parent d728eb4 commit 0c68aa6

File tree

2 files changed

+16
-60
lines changed

2 files changed

+16
-60
lines changed

test/e2e/framework/service/jig.go

Lines changed: 12 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,6 @@ import (
2020
"context"
2121
"errors"
2222
"fmt"
23-
appsv1 "k8s.io/api/apps/v1"
24-
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
2523
"math/rand"
2624
"net"
2725
"strconv"
@@ -30,26 +28,27 @@ import (
3028
"time"
3129

3230
"github.com/onsi/ginkgo/v2"
31+
appsv1 "k8s.io/api/apps/v1"
3332
v1 "k8s.io/api/core/v1"
3433
discoveryv1 "k8s.io/api/discovery/v1"
3534
policyv1 "k8s.io/api/policy/v1"
3635
apierrors "k8s.io/apimachinery/pkg/api/errors"
3736
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
38-
"k8s.io/apimachinery/pkg/labels"
3937
"k8s.io/apimachinery/pkg/util/intstr"
4038
utilnet "k8s.io/apimachinery/pkg/util/net"
4139
"k8s.io/apimachinery/pkg/util/sets"
4240
"k8s.io/apimachinery/pkg/util/uuid"
4341
"k8s.io/apimachinery/pkg/util/wait"
4442
clientset "k8s.io/client-go/kubernetes"
4543
"k8s.io/kubernetes/test/e2e/framework"
44+
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
4645
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
47-
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
4846
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
4947
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
5048
testutils "k8s.io/kubernetes/test/utils"
5149
imageutils "k8s.io/kubernetes/test/utils/image"
5250
netutils "k8s.io/utils/net"
51+
"k8s.io/utils/ptr"
5352
)
5453

5554
// NodePortRange should match whatever the default/configured range is
@@ -782,26 +781,20 @@ func (j *TestJig) Run(ctx context.Context, tweak func(rc *appsv1.Deployment)) (*
782781
}
783782

784783
// Scale scales pods to the given replicas
785-
func (j *TestJig) Scale(ctx context.Context, replicas int) error {
786-
rc := j.Name
787-
scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(ctx, rc, metav1.GetOptions{})
784+
func (j *TestJig) Scale(replicas int) error {
785+
deployment, err := e2edeployment.UpdateDeploymentWithRetries(j.Client, j.Namespace, j.Name, func(deployment *appsv1.Deployment) {
786+
deployment.Spec.Replicas = ptr.To(int32(replicas))
787+
})
788788
if err != nil {
789-
return fmt.Errorf("failed to get scale for RC %q: %w", rc, err)
789+
return fmt.Errorf("failed to scale Deployment %q: %w", j.Name, err)
790790
}
791791

792-
scale.ResourceVersion = "" // indicate the scale update should be unconditional
793-
scale.Spec.Replicas = int32(replicas)
794-
_, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(ctx, rc, scale, metav1.UpdateOptions{})
795-
if err != nil {
796-
return fmt.Errorf("failed to scale RC %q: %w", rc, err)
797-
}
798-
pods, err := j.waitForPodsCreated(ctx, replicas)
792+
err = e2edeployment.WaitForDeploymentComplete(j.Client, deployment)
793+
799794
if err != nil {
800-
return fmt.Errorf("failed waiting for pods: %w", err)
801-
}
802-
if err := j.waitForPodsReady(ctx, pods); err != nil {
803-
return fmt.Errorf("failed waiting for pods to be running: %w", err)
795+
return fmt.Errorf("failed waiting for Deployment %q: %w", j.Name, err)
804796
}
797+
805798
return nil
806799
}
807800

@@ -820,43 +813,6 @@ func (j *TestJig) waitForPdbReady(ctx context.Context) error {
820813
return fmt.Errorf("timeout waiting for PDB %q to be ready", j.Name)
821814
}
822815

823-
func (j *TestJig) waitForPodsCreated(ctx context.Context, replicas int) ([]string, error) {
824-
// TODO (pohly): replace with gomega.Eventually
825-
timeout := 2 * time.Minute
826-
// List the pods, making sure we observe all the replicas.
827-
label := labels.SelectorFromSet(labels.Set(j.Labels))
828-
framework.Logf("Waiting up to %v for %d pods to be created", timeout, replicas)
829-
for start := time.Now(); time.Since(start) < timeout && ctx.Err() == nil; time.Sleep(2 * time.Second) {
830-
options := metav1.ListOptions{LabelSelector: label.String()}
831-
pods, err := j.Client.CoreV1().Pods(j.Namespace).List(ctx, options)
832-
if err != nil {
833-
return nil, err
834-
}
835-
836-
found := []string{}
837-
for _, pod := range pods.Items {
838-
if pod.DeletionTimestamp != nil {
839-
continue
840-
}
841-
found = append(found, pod.Name)
842-
}
843-
if len(found) == replicas {
844-
framework.Logf("Found all %d pods", replicas)
845-
return found, nil
846-
}
847-
framework.Logf("Found %d/%d pods - will retry", len(found), replicas)
848-
}
849-
return nil, fmt.Errorf("timeout waiting for %d pods to be created", replicas)
850-
}
851-
852-
func (j *TestJig) waitForPodsReady(ctx context.Context, pods []string) error {
853-
timeout := 2 * time.Minute
854-
if !e2epod.CheckPodsRunningReady(ctx, j.Client, j.Namespace, pods, timeout) {
855-
return fmt.Errorf("timeout waiting for %d pods to be ready", len(pods))
856-
}
857-
return nil
858-
}
859-
860816
func testReachabilityOverServiceName(ctx context.Context, serviceName string, sp v1.ServicePort, execPod *v1.Pod) error {
861817
return testEndpointReachability(ctx, serviceName, sp.Port, sp.Protocol, execPod)
862818
}

test/e2e/network/loadbalancer.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -235,14 +235,14 @@ var _ = common.SIGDescribe("LoadBalancers", feature.LoadBalancer, func() {
235235
e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout)
236236

237237
ginkgo.By("Scaling the pods to 0")
238-
err = tcpJig.Scale(ctx, 0)
238+
err = tcpJig.Scale(0)
239239
framework.ExpectNoError(err)
240240

241241
ginkgo.By("hitting the TCP service's LoadBalancer with no backends, no answer expected")
242242
testNotReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout)
243243

244244
ginkgo.By("Scaling the pods to 1")
245-
err = tcpJig.Scale(ctx, 1)
245+
err = tcpJig.Scale(1)
246246
framework.ExpectNoError(err)
247247

248248
ginkgo.By("hitting the TCP service's LoadBalancer")
@@ -383,14 +383,14 @@ var _ = common.SIGDescribe("LoadBalancers", feature.LoadBalancer, func() {
383383
testReachableUDP(ctx, udpIngressIP, svcPort, loadBalancerCreateTimeout)
384384

385385
ginkgo.By("Scaling the pods to 0")
386-
err = udpJig.Scale(ctx, 0)
386+
err = udpJig.Scale(0)
387387
framework.ExpectNoError(err)
388388

389389
ginkgo.By("checking that the UDP service's LoadBalancer is not reachable")
390390
testNotReachableUDP(ctx, udpIngressIP, svcPort, loadBalancerCreateTimeout)
391391

392392
ginkgo.By("Scaling the pods to 1")
393-
err = udpJig.Scale(ctx, 1)
393+
err = udpJig.Scale(1)
394394
framework.ExpectNoError(err)
395395

396396
ginkgo.By("hitting the UDP service's NodePort")

0 commit comments

Comments
 (0)