Skip to content

Commit 9ed2f63

Browse files
committed
use log functions of core framework
1 parent 45f7f70 commit 9ed2f63

18 files changed

+142
-160
lines changed

test/e2e/common/BUILD

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,6 @@ go_library(
7979
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
8080
"//test/e2e/framework:go_default_library",
8181
"//test/e2e/framework/kubelet:go_default_library",
82-
"//test/e2e/framework/log:go_default_library",
8382
"//test/e2e/framework/node:go_default_library",
8483
"//test/e2e/framework/pod:go_default_library",
8584
"//test/e2e/framework/replicaset:go_default_library",

test/e2e/common/autoscaling_utils.go

Lines changed: 28 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ import (
3232
clientset "k8s.io/client-go/kubernetes"
3333
api "k8s.io/kubernetes/pkg/apis/core"
3434
"k8s.io/kubernetes/test/e2e/framework"
35-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3635
"k8s.io/kubernetes/test/e2e/framework/replicaset"
3736
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
3837
testutils "k8s.io/kubernetes/test/utils"
@@ -171,19 +170,19 @@ func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, repl
171170

172171
// ConsumeCPU consumes given number of CPU
173172
func (rc *ResourceConsumer) ConsumeCPU(millicores int) {
174-
e2elog.Logf("RC %s: consume %v millicores in total", rc.name, millicores)
173+
framework.Logf("RC %s: consume %v millicores in total", rc.name, millicores)
175174
rc.cpu <- millicores
176175
}
177176

178177
// ConsumeMem consumes given number of Mem
179178
func (rc *ResourceConsumer) ConsumeMem(megabytes int) {
180-
e2elog.Logf("RC %s: consume %v MB in total", rc.name, megabytes)
179+
framework.Logf("RC %s: consume %v MB in total", rc.name, megabytes)
181180
rc.mem <- megabytes
182181
}
183182

184183
// ConsumeMem consumes given number of custom metric
185184
func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) {
186-
e2elog.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
185+
framework.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
187186
rc.customMetric <- amount
188187
}
189188

@@ -196,13 +195,13 @@ func (rc *ResourceConsumer) makeConsumeCPURequests() {
196195
for {
197196
select {
198197
case millicores = <-rc.cpu:
199-
e2elog.Logf("RC %s: setting consumption to %v millicores in total", rc.name, millicores)
198+
framework.Logf("RC %s: setting consumption to %v millicores in total", rc.name, millicores)
200199
case <-time.After(sleepTime):
201-
e2elog.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores)
200+
framework.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores)
202201
rc.sendConsumeCPURequest(millicores)
203202
sleepTime = rc.sleepTime
204203
case <-rc.stopCPU:
205-
e2elog.Logf("RC %s: stopping CPU consumer", rc.name)
204+
framework.Logf("RC %s: stopping CPU consumer", rc.name)
206205
return
207206
}
208207
}
@@ -217,13 +216,13 @@ func (rc *ResourceConsumer) makeConsumeMemRequests() {
217216
for {
218217
select {
219218
case megabytes = <-rc.mem:
220-
e2elog.Logf("RC %s: setting consumption to %v MB in total", rc.name, megabytes)
219+
framework.Logf("RC %s: setting consumption to %v MB in total", rc.name, megabytes)
221220
case <-time.After(sleepTime):
222-
e2elog.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes)
221+
framework.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes)
223222
rc.sendConsumeMemRequest(megabytes)
224223
sleepTime = rc.sleepTime
225224
case <-rc.stopMem:
226-
e2elog.Logf("RC %s: stopping mem consumer", rc.name)
225+
framework.Logf("RC %s: stopping mem consumer", rc.name)
227226
return
228227
}
229228
}
@@ -238,13 +237,13 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() {
238237
for {
239238
select {
240239
case delta = <-rc.customMetric:
241-
e2elog.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta)
240+
framework.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta)
242241
case <-time.After(sleepTime):
243-
e2elog.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName)
242+
framework.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName)
244243
rc.sendConsumeCustomMetric(delta)
245244
sleepTime = rc.sleepTime
246245
case <-rc.stopCustomMetric:
247-
e2elog.Logf("RC %s: stopping metric consumer", rc.name)
246+
framework.Logf("RC %s: stopping metric consumer", rc.name)
248247
return
249248
}
250249
}
@@ -264,10 +263,10 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
264263
Param("millicores", strconv.Itoa(millicores)).
265264
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
266265
Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores))
267-
e2elog.Logf("ConsumeCPU URL: %v", *req.URL())
266+
framework.Logf("ConsumeCPU URL: %v", *req.URL())
268267
_, err = req.DoRaw()
269268
if err != nil {
270-
e2elog.Logf("ConsumeCPU failure: %v", err)
269+
framework.Logf("ConsumeCPU failure: %v", err)
271270
return false, nil
272271
}
273272
return true, nil
@@ -291,10 +290,10 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
291290
Param("megabytes", strconv.Itoa(megabytes)).
292291
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
293292
Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes))
294-
e2elog.Logf("ConsumeMem URL: %v", *req.URL())
293+
framework.Logf("ConsumeMem URL: %v", *req.URL())
295294
_, err = req.DoRaw()
296295
if err != nil {
297-
e2elog.Logf("ConsumeMem failure: %v", err)
296+
framework.Logf("ConsumeMem failure: %v", err)
298297
return false, nil
299298
}
300299
return true, nil
@@ -319,10 +318,10 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
319318
Param("delta", strconv.Itoa(delta)).
320319
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
321320
Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric))
322-
e2elog.Logf("ConsumeCustomMetric URL: %v", *req.URL())
321+
framework.Logf("ConsumeCustomMetric URL: %v", *req.URL())
323322
_, err = req.DoRaw()
324323
if err != nil {
325-
e2elog.Logf("ConsumeCustomMetric failure: %v", err)
324+
framework.Logf("ConsumeCustomMetric failure: %v", err)
326325
return false, nil
327326
}
328327
return true, nil
@@ -336,25 +335,25 @@ func (rc *ResourceConsumer) GetReplicas() int {
336335
replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{})
337336
framework.ExpectNoError(err)
338337
if replicationController == nil {
339-
e2elog.Failf(rcIsNil)
338+
framework.Failf(rcIsNil)
340339
}
341340
return int(replicationController.Status.ReadyReplicas)
342341
case KindDeployment:
343342
deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{})
344343
framework.ExpectNoError(err)
345344
if deployment == nil {
346-
e2elog.Failf(deploymentIsNil)
345+
framework.Failf(deploymentIsNil)
347346
}
348347
return int(deployment.Status.ReadyReplicas)
349348
case KindReplicaSet:
350349
rs, err := rc.clientSet.AppsV1().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{})
351350
framework.ExpectNoError(err)
352351
if rs == nil {
353-
e2elog.Failf(rsIsNil)
352+
framework.Failf(rsIsNil)
354353
}
355354
return int(rs.Status.ReadyReplicas)
356355
default:
357-
e2elog.Failf(invalidKind)
356+
framework.Failf(invalidKind)
358357
}
359358
return 0
360359
}
@@ -367,7 +366,7 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.D
367366
interval := 20 * time.Second
368367
err := wait.PollImmediate(interval, duration, func() (bool, error) {
369368
replicas := rc.GetReplicas()
370-
e2elog.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
369+
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
371370
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
372371
})
373372
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
@@ -381,12 +380,12 @@ func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, max
381380
interval := 10 * time.Second
382381
err := wait.PollImmediate(interval, duration, func() (bool, error) {
383382
replicas := rc.GetReplicas()
384-
e2elog.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
383+
framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
385384
as, err := rc.GetHpa(hpaName)
386385
if err != nil {
387-
e2elog.Logf("Error getting HPA: %s", err)
386+
framework.Logf("Error getting HPA: %s", err)
388387
} else {
389-
e2elog.Logf("HPA status: %+v", as.Status)
388+
framework.Logf("HPA status: %+v", as.Status)
390389
}
391390
if replicas < minDesiredReplicas {
392391
return false, fmt.Errorf("number of replicas below target")
@@ -398,7 +397,7 @@ func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, max
398397
})
399398
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
400399
if err == wait.ErrWaitTimeout {
401-
e2elog.Logf("Number of replicas was stable over %v", duration)
400+
framework.Logf("Number of replicas was stable over %v", duration)
402401
return
403402
}
404403
framework.ExpectNoErrorWithOffset(1, err)
@@ -491,7 +490,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
491490
framework.ExpectNoError(replicaset.RunReplicaSet(rsConfig))
492491
break
493492
default:
494-
e2elog.Failf(invalidKind)
493+
framework.Failf(invalidKind)
495494
}
496495

497496
ginkgo.By(fmt.Sprintf("Running controller"))

test/e2e/common/configmap.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ import (
2323
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2424
"k8s.io/apimachinery/pkg/util/uuid"
2525
"k8s.io/kubernetes/test/e2e/framework"
26-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2726
imageutils "k8s.io/kubernetes/test/utils/image"
2827

2928
"github.com/onsi/ginkgo"
@@ -43,7 +42,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() {
4342
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
4443
var err error
4544
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
46-
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
45+
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
4746
}
4847

4948
pod := &v1.Pod{
@@ -91,7 +90,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() {
9190
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
9291
var err error
9392
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
94-
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
93+
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
9594
}
9695

9796
pod := &v1.Pod{

test/e2e/common/configmap_volume.go

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ import (
2626
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2727
"k8s.io/apimachinery/pkg/util/uuid"
2828
"k8s.io/kubernetes/test/e2e/framework"
29-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3029
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3130
imageutils "k8s.io/kubernetes/test/utils/image"
3231
)
@@ -140,7 +139,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
140139
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
141140
var err error
142141
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
143-
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
142+
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
144143
}
145144

146145
pod := &v1.Pod{
@@ -227,7 +226,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
227226
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
228227
var err error
229228
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
230-
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
229+
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
231230
}
232231

233232
pod := &v1.Pod{
@@ -345,12 +344,12 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
345344
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
346345
var err error
347346
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
348-
e2elog.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
347+
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
349348
}
350349

351350
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
352351
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
353-
e2elog.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
352+
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
354353
}
355354

356355
pod := &v1.Pod{
@@ -465,7 +464,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
465464

466465
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
467466
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
468-
e2elog.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
467+
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
469468
}
470469

471470
ginkgo.By("waiting to observe update in volume")
@@ -493,7 +492,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
493492
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
494493
var err error
495494
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
496-
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
495+
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
497496
}
498497

499498
pod := &v1.Pod{
@@ -601,7 +600,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
601600
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
602601
var err error
603602
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
604-
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
603+
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
605604
}
606605

607606
one := int64(1)
@@ -678,7 +677,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
678677

679678
var err error
680679
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
681-
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
680+
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
682681
}
683682

684683
one := int64(1)
@@ -813,7 +812,7 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount
813812
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
814813
var err error
815814
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
816-
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
815+
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
817816
}
818817
//creating a pod with configMap object, but with different key which is not present in configMap object.
819818
pod := &v1.Pod{

test/e2e/common/container_probe.go

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ import (
2929
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
3030
"k8s.io/kubernetes/pkg/kubelet/events"
3131
"k8s.io/kubernetes/test/e2e/framework"
32-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3332
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3433
testutils "k8s.io/kubernetes/test/utils"
3534

@@ -76,10 +75,10 @@ var _ = framework.KubeDescribe("Probing container", func() {
7675
startedTime, err := GetContainerStartedTime(p, containerName)
7776
framework.ExpectNoError(err)
7877

79-
e2elog.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
78+
framework.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
8079
initialDelay := probeTestInitialDelaySeconds * time.Second
8180
if readyTime.Sub(startedTime) < initialDelay {
82-
e2elog.Failf("Pod became ready before it's %v initial delay", initialDelay)
81+
framework.Failf("Pod became ready before it's %v initial delay", initialDelay)
8382
}
8483

8584
restartCount := getRestartCount(p)
@@ -422,14 +421,14 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
422421
// 'Terminated' which can cause indefinite blocking.)
423422
framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, ns, pod.Name),
424423
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
425-
e2elog.Logf("Started pod %s in namespace %s", pod.Name, ns)
424+
framework.Logf("Started pod %s in namespace %s", pod.Name, ns)
426425

427426
// Check the pod's current state and verify that restartCount is present.
428427
ginkgo.By("checking the pod's current state and verifying that restartCount is present")
429428
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
430429
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
431430
initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
432-
e2elog.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
431+
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
433432

434433
// Wait for the restart state to be as desired.
435434
deadline := time.Now().Add(timeout)
@@ -440,10 +439,10 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
440439
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
441440
restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
442441
if restartCount != lastRestartCount {
443-
e2elog.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
442+
framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
444443
ns, pod.Name, restartCount, time.Since(start))
445444
if restartCount < lastRestartCount {
446-
e2elog.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
445+
framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
447446
ns, pod.Name, lastRestartCount, restartCount)
448447
}
449448
}
@@ -459,7 +458,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
459458
// If we expected n restarts (n > 0), fail if we observed < n restarts.
460459
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
461460
int(observedRestarts) < expectNumRestarts) {
462-
e2elog.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
461+
framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
463462
ns, pod.Name, expectNumRestarts, observedRestarts)
464463
}
465464
}

0 commit comments

Comments
 (0)