@@ -23,6 +23,7 @@ import (
23
23
"strings"
24
24
"time"
25
25
26
+ appsv1 "k8s.io/api/apps/v1"
26
27
v1 "k8s.io/api/core/v1"
27
28
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
28
29
"k8s.io/apimachinery/pkg/runtime"
@@ -32,9 +33,8 @@ import (
32
33
"k8s.io/client-go/tools/cache"
33
34
"k8s.io/client-go/util/flowcontrol"
34
35
"k8s.io/kubernetes/test/e2e/framework"
35
- e2erc "k8s.io/kubernetes/test/e2e/framework/rc "
36
+ e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment "
36
37
"k8s.io/kubernetes/test/e2e/network/common"
37
- testutils "k8s.io/kubernetes/test/utils"
38
38
imageutils "k8s.io/kubernetes/test/utils/image"
39
39
admissionapi "k8s.io/pod-security-admission/api"
40
40
@@ -135,18 +135,13 @@ var _ = common.SIGDescribe("Service endpoints latency", func() {
135
135
})
136
136
137
137
func runServiceLatencies (ctx context.Context , f * framework.Framework , inParallel , total int , acceptableFailureRatio float32 ) (output []time.Duration , err error ) {
138
- cfg := testutils.RCConfig {
139
- Client : f .ClientSet ,
140
- Image : imageutils .GetPauseImageName (),
141
- Name : "svc-latency-rc" ,
142
- Namespace : f .Namespace .Name ,
143
- Replicas : 1 ,
144
- PollInterval : time .Second ,
145
- }
146
- if err := e2erc .RunRC (ctx , cfg ); err != nil {
147
- return nil , err
148
- }
138
+ name := "svc-latency-rc"
139
+ deploymentConf := e2edeployment .NewDeployment (name , 1 , map [string ]string {"name" : name }, name , imageutils .GetPauseImageName (), appsv1 .RecreateDeploymentStrategyType )
140
+ deployment , err := f .ClientSet .AppsV1 ().Deployments (f .Namespace .Name ).Create (ctx , deploymentConf , metav1.CreateOptions {})
141
+ framework .ExpectNoError (err )
149
142
143
+ err = e2edeployment .WaitForDeploymentComplete (f .ClientSet , deployment )
144
+ framework .ExpectNoError (err )
150
145
// Run a single watcher, to reduce the number of API calls we have to
151
146
// make; this is to minimize the timing error. It's how kube-proxy
152
147
// consumes the endpoints data, so it seems like the right thing to
@@ -157,7 +152,7 @@ func runServiceLatencies(ctx context.Context, f *framework.Framework, inParallel
157
152
158
153
// run one test and throw it away-- this is to make sure that the pod's
159
154
// ready status has propagated.
160
- _ , err = singleServiceLatency (ctx , f , cfg . Name , endpointQueries )
155
+ _ , err = singleServiceLatency (ctx , f , name , endpointQueries )
161
156
framework .ExpectNoError (err )
162
157
163
158
// These channels are never closed, and each attempt sends on exactly
@@ -172,7 +167,7 @@ func runServiceLatencies(ctx context.Context, f *framework.Framework, inParallel
172
167
defer ginkgo .GinkgoRecover ()
173
168
blocker <- struct {}{}
174
169
defer func () { <- blocker }()
175
- if d , err := singleServiceLatency (ctx , f , cfg . Name , endpointQueries ); err != nil {
170
+ if d , err := singleServiceLatency (ctx , f , name , endpointQueries ); err != nil {
176
171
errs <- err
177
172
} else {
178
173
durations <- d
0 commit comments