Skip to content

Commit afc66a5

Browse files
committed
Move scalability, upgrade, and common packages to framework/log
This is part of the transition to using framework/log instead of the Logf inside the framework package. This will help with import size/cycles when importing the framework or subpackages.
1 parent d5245b9 commit afc66a5

21 files changed

+168
-146
lines changed

test/e2e/common/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@ go_library(
7878
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
7979
"//test/e2e/framework:go_default_library",
8080
"//test/e2e/framework/deployment:go_default_library",
81+
"//test/e2e/framework/log:go_default_library",
8182
"//test/e2e/framework/replicaset:go_default_library",
8283
"//test/e2e/framework/volume:go_default_library",
8384
"//test/utils:go_default_library",

test/e2e/common/autoscaling_utils.go

Lines changed: 25 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import (
2424
"time"
2525

2626
autoscalingv1 "k8s.io/api/autoscaling/v1"
27-
"k8s.io/api/core/v1"
27+
v1 "k8s.io/api/core/v1"
2828
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2929
"k8s.io/apimachinery/pkg/runtime/schema"
3030
"k8s.io/apimachinery/pkg/util/intstr"
@@ -33,6 +33,7 @@ import (
3333
api "k8s.io/kubernetes/pkg/apis/core"
3434
"k8s.io/kubernetes/test/e2e/framework"
3535
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
36+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3637
"k8s.io/kubernetes/test/e2e/framework/replicaset"
3738
testutils "k8s.io/kubernetes/test/utils"
3839

@@ -170,19 +171,19 @@ func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, repl
170171

171172
// ConsumeCPU consumes given number of CPU
172173
func (rc *ResourceConsumer) ConsumeCPU(millicores int) {
173-
framework.Logf("RC %s: consume %v millicores in total", rc.name, millicores)
174+
e2elog.Logf("RC %s: consume %v millicores in total", rc.name, millicores)
174175
rc.cpu <- millicores
175176
}
176177

177178
// ConsumeMem consumes given number of Mem
178179
func (rc *ResourceConsumer) ConsumeMem(megabytes int) {
179-
framework.Logf("RC %s: consume %v MB in total", rc.name, megabytes)
180+
e2elog.Logf("RC %s: consume %v MB in total", rc.name, megabytes)
180181
rc.mem <- megabytes
181182
}
182183

183184
// ConsumeMem consumes given number of custom metric
184185
func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) {
185-
framework.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
186+
e2elog.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
186187
rc.customMetric <- amount
187188
}
188189

@@ -195,13 +196,13 @@ func (rc *ResourceConsumer) makeConsumeCPURequests() {
195196
for {
196197
select {
197198
case millicores = <-rc.cpu:
198-
framework.Logf("RC %s: setting consumption to %v millicores in total", rc.name, millicores)
199+
e2elog.Logf("RC %s: setting consumption to %v millicores in total", rc.name, millicores)
199200
case <-time.After(sleepTime):
200-
framework.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores)
201+
e2elog.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores)
201202
rc.sendConsumeCPURequest(millicores)
202203
sleepTime = rc.sleepTime
203204
case <-rc.stopCPU:
204-
framework.Logf("RC %s: stopping CPU consumer", rc.name)
205+
e2elog.Logf("RC %s: stopping CPU consumer", rc.name)
205206
return
206207
}
207208
}
@@ -216,13 +217,13 @@ func (rc *ResourceConsumer) makeConsumeMemRequests() {
216217
for {
217218
select {
218219
case megabytes = <-rc.mem:
219-
framework.Logf("RC %s: setting consumption to %v MB in total", rc.name, megabytes)
220+
e2elog.Logf("RC %s: setting consumption to %v MB in total", rc.name, megabytes)
220221
case <-time.After(sleepTime):
221-
framework.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes)
222+
e2elog.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes)
222223
rc.sendConsumeMemRequest(megabytes)
223224
sleepTime = rc.sleepTime
224225
case <-rc.stopMem:
225-
framework.Logf("RC %s: stopping mem consumer", rc.name)
226+
e2elog.Logf("RC %s: stopping mem consumer", rc.name)
226227
return
227228
}
228229
}
@@ -237,13 +238,13 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() {
237238
for {
238239
select {
239240
case delta = <-rc.customMetric:
240-
framework.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta)
241+
e2elog.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta)
241242
case <-time.After(sleepTime):
242-
framework.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName)
243+
e2elog.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName)
243244
rc.sendConsumeCustomMetric(delta)
244245
sleepTime = rc.sleepTime
245246
case <-rc.stopCustomMetric:
246-
framework.Logf("RC %s: stopping metric consumer", rc.name)
247+
e2elog.Logf("RC %s: stopping metric consumer", rc.name)
247248
return
248249
}
249250
}
@@ -263,10 +264,10 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
263264
Param("millicores", strconv.Itoa(millicores)).
264265
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
265266
Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores))
266-
framework.Logf("ConsumeCPU URL: %v", *req.URL())
267+
e2elog.Logf("ConsumeCPU URL: %v", *req.URL())
267268
_, err = req.DoRaw()
268269
if err != nil {
269-
framework.Logf("ConsumeCPU failure: %v", err)
270+
e2elog.Logf("ConsumeCPU failure: %v", err)
270271
return false, nil
271272
}
272273
return true, nil
@@ -290,10 +291,10 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
290291
Param("megabytes", strconv.Itoa(megabytes)).
291292
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
292293
Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes))
293-
framework.Logf("ConsumeMem URL: %v", *req.URL())
294+
e2elog.Logf("ConsumeMem URL: %v", *req.URL())
294295
_, err = req.DoRaw()
295296
if err != nil {
296-
framework.Logf("ConsumeMem failure: %v", err)
297+
e2elog.Logf("ConsumeMem failure: %v", err)
297298
return false, nil
298299
}
299300
return true, nil
@@ -318,10 +319,10 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
318319
Param("delta", strconv.Itoa(delta)).
319320
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
320321
Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric))
321-
framework.Logf("ConsumeCustomMetric URL: %v", *req.URL())
322+
e2elog.Logf("ConsumeCustomMetric URL: %v", *req.URL())
322323
_, err = req.DoRaw()
323324
if err != nil {
324-
framework.Logf("ConsumeCustomMetric failure: %v", err)
325+
e2elog.Logf("ConsumeCustomMetric failure: %v", err)
325326
return false, nil
326327
}
327328
return true, nil
@@ -366,7 +367,7 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.D
366367
interval := 20 * time.Second
367368
err := wait.PollImmediate(interval, duration, func() (bool, error) {
368369
replicas := rc.GetReplicas()
369-
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
370+
e2elog.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
370371
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
371372
})
372373
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
@@ -380,12 +381,12 @@ func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, max
380381
interval := 10 * time.Second
381382
err := wait.PollImmediate(interval, duration, func() (bool, error) {
382383
replicas := rc.GetReplicas()
383-
framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
384+
e2elog.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
384385
as, err := rc.GetHpa(hpaName)
385386
if err != nil {
386-
framework.Logf("Error getting HPA: %s", err)
387+
e2elog.Logf("Error getting HPA: %s", err)
387388
} else {
388-
framework.Logf("HPA status: %+v", as.Status)
389+
e2elog.Logf("HPA status: %+v", as.Status)
389390
}
390391
if replicas < minDesiredReplicas {
391392
return false, fmt.Errorf("number of replicas below target")
@@ -397,7 +398,7 @@ func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, max
397398
})
398399
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
399400
if err == wait.ErrWaitTimeout {
400-
framework.Logf("Number of replicas was stable over %v", duration)
401+
e2elog.Logf("Number of replicas was stable over %v", duration)
401402
return
402403
}
403404
framework.ExpectNoErrorWithOffset(1, err)

test/e2e/common/container_probe.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ import (
2929
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
3030
"k8s.io/kubernetes/pkg/kubelet/events"
3131
"k8s.io/kubernetes/test/e2e/framework"
32+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3233
testutils "k8s.io/kubernetes/test/utils"
3334

3435
. "github.com/onsi/ginkgo"
@@ -74,7 +75,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
7475
startedTime, err := getContainerStartedTime(p, probTestContainerName)
7576
framework.ExpectNoError(err)
7677

77-
framework.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
78+
e2elog.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
7879
initialDelay := probTestInitialDelaySeconds * time.Second
7980
if readyTime.Sub(startedTime) < initialDelay {
8081
framework.Failf("Pod became ready before it's %v initial delay", initialDelay)
@@ -484,14 +485,14 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
484485
// 'Terminated' which can cause indefinite blocking.)
485486
framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name),
486487
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
487-
framework.Logf("Started pod %s in namespace %s", pod.Name, ns)
488+
e2elog.Logf("Started pod %s in namespace %s", pod.Name, ns)
488489

489490
// Check the pod's current state and verify that restartCount is present.
490491
By("checking the pod's current state and verifying that restartCount is present")
491492
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
492493
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
493494
initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
494-
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
495+
e2elog.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
495496

496497
// Wait for the restart state to be as desired.
497498
deadline := time.Now().Add(timeout)
@@ -502,7 +503,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
502503
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
503504
restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
504505
if restartCount != lastRestartCount {
505-
framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
506+
e2elog.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
506507
ns, pod.Name, restartCount, time.Since(start))
507508
if restartCount < lastRestartCount {
508509
framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",

test/e2e/common/expansion.go

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,15 @@ package common
1818

1919
import (
2020
"fmt"
21-
"k8s.io/api/core/v1"
21+
"time"
22+
23+
v1 "k8s.io/api/core/v1"
2224
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2325
"k8s.io/apimachinery/pkg/util/uuid"
2426
"k8s.io/apimachinery/pkg/util/wait"
2527
"k8s.io/kubernetes/test/e2e/framework"
28+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2629
imageutils "k8s.io/kubernetes/test/utils/image"
27-
"time"
2830

2931
. "github.com/onsi/ginkgo"
3032
. "github.com/onsi/gomega"
@@ -658,7 +660,7 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
658660
By("Failing liveness probe")
659661
stdout, stderr, err := f.ExecShellInPodWithFullOutput(pod.Name, fmt.Sprintf("rm %v", volumeMount))
660662

661-
framework.Logf("Pod exec output: %v / %v", stdout, stderr)
663+
e2elog.Logf("Pod exec output: %v / %v", stdout, stderr)
662664
Expect(err).ToNot(HaveOccurred(), "while failing liveness probe")
663665

664666
// Check that container has restarted
@@ -671,10 +673,10 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
671673
}
672674
for _, status := range pod.Status.ContainerStatuses {
673675
if status.Name == pod.Spec.Containers[0].Name {
674-
framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
676+
e2elog.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
675677
restarts = status.RestartCount
676678
if restarts > 0 {
677-
framework.Logf("Container has restart count: %v", restarts)
679+
e2elog.Logf("Container has restart count: %v", restarts)
678680
return true, nil
679681
}
680682
}
@@ -686,7 +688,7 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
686688
// Fix liveness probe
687689
By("Rewriting the file")
688690
stdout, _, err = f.ExecShellInPodWithFullOutput(pod.Name, fmt.Sprintf("echo test-after > %v", volumeMount))
689-
framework.Logf("Pod exec output: %v", stdout)
691+
e2elog.Logf("Pod exec output: %v", stdout)
690692
Expect(err).ToNot(HaveOccurred(), "while rewriting the probe file")
691693

692694
// Wait for container restarts to stabilize
@@ -703,13 +705,13 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
703705
if status.RestartCount == restarts {
704706
stableCount++
705707
if stableCount > stableThreshold {
706-
framework.Logf("Container restart has stabilized")
708+
e2elog.Logf("Container restart has stabilized")
707709
return true, nil
708710
}
709711
} else {
710712
restarts = status.RestartCount
711713
stableCount = 0
712-
framework.Logf("Container has restart count: %v", restarts)
714+
e2elog.Logf("Container has restart count: %v", restarts)
713715
}
714716
break
715717
}

test/e2e/common/init_container.go

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ import (
2222
"strconv"
2323
"time"
2424

25-
"k8s.io/api/core/v1"
25+
v1 "k8s.io/api/core/v1"
2626
"k8s.io/apimachinery/pkg/api/resource"
2727
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2828
"k8s.io/apimachinery/pkg/util/uuid"
@@ -31,6 +31,7 @@ import (
3131
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
3232
"k8s.io/kubernetes/pkg/client/conditions"
3333
"k8s.io/kubernetes/test/e2e/framework"
34+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3435
imageutils "k8s.io/kubernetes/test/utils/image"
3536

3637
. "github.com/onsi/ginkgo"
@@ -87,7 +88,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
8788
},
8889
},
8990
}
90-
framework.Logf("PodSpec: initContainers in spec.initContainers")
91+
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
9192
startedPod := podClient.Create(pod)
9293
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
9394
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
@@ -158,7 +159,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
158159
},
159160
},
160161
}
161-
framework.Logf("PodSpec: initContainers in spec.initContainers")
162+
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
162163
startedPod := podClient.Create(pod)
163164
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
164165
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
@@ -230,7 +231,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
230231
},
231232
},
232233
}
233-
framework.Logf("PodSpec: initContainers in spec.initContainers")
234+
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
234235
startedPod := podClient.Create(pod)
235236
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
236237
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
@@ -280,7 +281,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
280281
if status.RestartCount < 3 {
281282
return false, nil
282283
}
283-
framework.Logf("init container has failed twice: %#v", t)
284+
e2elog.Logf("init container has failed twice: %#v", t)
284285
// TODO: more conditions
285286
return true, nil
286287
default:
@@ -347,7 +348,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
347348
},
348349
},
349350
}
350-
framework.Logf("PodSpec: initContainers in spec.initContainers")
351+
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
351352
startedPod := podClient.Create(pod)
352353

353354
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))

test/e2e/common/node_lease.go

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ import (
2828
"k8s.io/apimachinery/pkg/util/wait"
2929
clientset "k8s.io/client-go/kubernetes"
3030
"k8s.io/kubernetes/test/e2e/framework"
31+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3132
testutils "k8s.io/kubernetes/test/utils"
3233

3334
. "github.com/onsi/ginkgo"
@@ -115,23 +116,23 @@ var _ = framework.KubeDescribe("NodeLease", func() {
115116
if currentHeartbeatTime == lastHeartbeatTime {
116117
if currentObserved.Sub(lastObserved) > 2*leaseDuration {
117118
// heartbeat hasn't changed while watching for at least 2*leaseDuration, success!
118-
framework.Logf("node status heartbeat is unchanged for %s, was waiting for at least %s, success!", currentObserved.Sub(lastObserved), 2*leaseDuration)
119+
e2elog.Logf("node status heartbeat is unchanged for %s, was waiting for at least %s, success!", currentObserved.Sub(lastObserved), 2*leaseDuration)
119120
return true, nil
120121
}
121-
framework.Logf("node status heartbeat is unchanged for %s, waiting for %s", currentObserved.Sub(lastObserved), 2*leaseDuration)
122+
e2elog.Logf("node status heartbeat is unchanged for %s, waiting for %s", currentObserved.Sub(lastObserved), 2*leaseDuration)
122123
return false, nil
123124
}
124125

125126
if currentHeartbeatTime.Sub(lastHeartbeatTime) >= leaseDuration {
126127
// heartbeat time changed, but the diff was greater than leaseDuration, success!
127-
framework.Logf("node status heartbeat changed in %s, was waiting for at least %s, success!", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
128+
e2elog.Logf("node status heartbeat changed in %s, was waiting for at least %s, success!", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
128129
return true, nil
129130
}
130131

131132
if !apiequality.Semantic.DeepEqual(lastStatus, currentStatus) {
132133
// heartbeat time changed, but there were relevant changes in the status, keep waiting
133-
framework.Logf("node status heartbeat changed in %s (with other status changes), waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
134-
framework.Logf("%s", diff.ObjectReflectDiff(lastStatus, currentStatus))
134+
e2elog.Logf("node status heartbeat changed in %s (with other status changes), waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
135+
e2elog.Logf("%s", diff.ObjectReflectDiff(lastStatus, currentStatus))
135136
lastHeartbeatTime = currentHeartbeatTime
136137
lastObserved = currentObserved
137138
lastStatus = currentStatus

0 commit comments

Comments
 (0)