Skip to content

Commit e15fd43

Browse files
committed
test(network): replace calls to e2erc.RunRC with Deployments in service latency tests
See kubernetes#119021
1 parent cd0df97 commit e15fd43

File tree

1 file changed

+10
-15
lines changed

1 file changed

+10
-15
lines changed

test/e2e/network/service_latency.go

Lines changed: 10 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import (
2323
"strings"
2424
"time"
2525

26+
appsv1 "k8s.io/api/apps/v1"
2627
v1 "k8s.io/api/core/v1"
2728
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2829
"k8s.io/apimachinery/pkg/runtime"
@@ -32,9 +33,8 @@ import (
3233
"k8s.io/client-go/tools/cache"
3334
"k8s.io/client-go/util/flowcontrol"
3435
"k8s.io/kubernetes/test/e2e/framework"
35-
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
36+
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
3637
"k8s.io/kubernetes/test/e2e/network/common"
37-
testutils "k8s.io/kubernetes/test/utils"
3838
imageutils "k8s.io/kubernetes/test/utils/image"
3939
admissionapi "k8s.io/pod-security-admission/api"
4040

@@ -135,18 +135,13 @@ var _ = common.SIGDescribe("Service endpoints latency", func() {
135135
})
136136

137137
func runServiceLatencies(ctx context.Context, f *framework.Framework, inParallel, total int, acceptableFailureRatio float32) (output []time.Duration, err error) {
138-
cfg := testutils.RCConfig{
139-
Client: f.ClientSet,
140-
Image: imageutils.GetPauseImageName(),
141-
Name: "svc-latency-rc",
142-
Namespace: f.Namespace.Name,
143-
Replicas: 1,
144-
PollInterval: time.Second,
145-
}
146-
if err := e2erc.RunRC(ctx, cfg); err != nil {
147-
return nil, err
148-
}
138+
name := "svc-latency-rc"
139+
deploymentConf := e2edeployment.NewDeployment(name, 1, map[string]string{"name": name}, name, imageutils.GetPauseImageName(), appsv1.RecreateDeploymentStrategyType)
140+
deployment, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(ctx, deploymentConf, metav1.CreateOptions{})
141+
framework.ExpectNoError(err)
149142

143+
err = e2edeployment.WaitForDeploymentComplete(f.ClientSet, deployment)
144+
framework.ExpectNoError(err)
150145
// Run a single watcher, to reduce the number of API calls we have to
151146
// make; this is to minimize the timing error. It's how kube-proxy
152147
// consumes the endpoints data, so it seems like the right thing to
@@ -157,7 +152,7 @@ func runServiceLatencies(ctx context.Context, f *framework.Framework, inParallel
157152

158153
// run one test and throw it away-- this is to make sure that the pod's
159154
// ready status has propagated.
160-
_, err = singleServiceLatency(ctx, f, cfg.Name, endpointQueries)
155+
_, err = singleServiceLatency(ctx, f, name, endpointQueries)
161156
framework.ExpectNoError(err)
162157

163158
// These channels are never closed, and each attempt sends on exactly
@@ -172,7 +167,7 @@ func runServiceLatencies(ctx context.Context, f *framework.Framework, inParallel
172167
defer ginkgo.GinkgoRecover()
173168
blocker <- struct{}{}
174169
defer func() { <-blocker }()
175-
if d, err := singleServiceLatency(ctx, f, cfg.Name, endpointQueries); err != nil {
170+
if d, err := singleServiceLatency(ctx, f, name, endpointQueries); err != nil {
176171
errs <- err
177172
} else {
178173
durations <- d

0 commit comments

Comments
 (0)