Skip to content

Commit d92a250

Browse files
authored
Merge pull request kubernetes#81685 from oomichi/e2elog-framework-p-r
Use log functions of core framework on [r-u]
2 parents 354a812 + 6499f93 commit d92a250

File tree

7 files changed

+180
-188
lines changed

7 files changed

+180
-188
lines changed

test/e2e/framework/BUILD

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,6 @@ go_library(
7676
"//staging/src/k8s.io/component-base/version:go_default_library",
7777
"//test/e2e/framework/ginkgowrapper:go_default_library",
7878
"//test/e2e/framework/kubelet:go_default_library",
79-
"//test/e2e/framework/log:go_default_library",
8079
"//test/e2e/framework/metrics:go_default_library",
8180
"//test/e2e/framework/node:go_default_library",
8281
"//test/e2e/framework/pod:go_default_library",

test/e2e/framework/rc_util.go

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ import (
3030
clientset "k8s.io/client-go/kubernetes"
3131
scaleclient "k8s.io/client-go/scale"
3232
api "k8s.io/kubernetes/pkg/apis/core"
33-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3433
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3534
testutils "k8s.io/kubernetes/test/utils"
3635
)
@@ -101,7 +100,7 @@ func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, na
101100
// Apply the update, then attempt to push it to the apiserver.
102101
applyUpdate(rc)
103102
if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(rc); err == nil {
104-
e2elog.Logf("Updating replication controller %q", name)
103+
Logf("Updating replication controller %q", name)
105104
return true, nil
106105
}
107106
updateErr = err
@@ -147,10 +146,10 @@ func WaitForReplicationController(c clientset.Interface, namespace, name string,
147146
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
148147
_, err := c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
149148
if err != nil {
150-
e2elog.Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
149+
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
151150
return !exist, nil
152151
}
153-
e2elog.Logf("ReplicationController %s in namespace %s found.", name, namespace)
152+
Logf("ReplicationController %s in namespace %s found.", name, namespace)
154153
return exist, nil
155154
})
156155
if err != nil {
@@ -167,13 +166,13 @@ func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace s
167166
rcs, err := c.CoreV1().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
168167
switch {
169168
case len(rcs.Items) != 0:
170-
e2elog.Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace)
169+
Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace)
171170
return exist, nil
172171
case len(rcs.Items) == 0:
173-
e2elog.Logf("ReplicationController with %s in namespace %s disappeared.", selector.String(), namespace)
172+
Logf("ReplicationController with %s in namespace %s disappeared.", selector.String(), namespace)
174173
return !exist, nil
175174
default:
176-
e2elog.Logf("List ReplicationController with %s in namespace %s failed: %v", selector.String(), namespace, err)
175+
Logf("List ReplicationController with %s in namespace %s failed: %v", selector.String(), namespace, err)
177176
return false, nil
178177
}
179178
})
@@ -230,25 +229,25 @@ waitLoop:
230229
for _, podID := range pods {
231230
running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns))
232231
if running != "true" {
233-
e2elog.Logf("%s is created but not running", podID)
232+
Logf("%s is created but not running", podID)
234233
continue waitLoop
235234
}
236235

237236
currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns))
238237
currentImage = trimDockerRegistry(currentImage)
239238
if currentImage != containerImage {
240-
e2elog.Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
239+
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
241240
continue waitLoop
242241
}
243242

244243
// Call the generic validator function here.
245244
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
246245
if err := validator(c, podID); err != nil {
247-
e2elog.Logf("%s is running right image but validator function failed: %v", podID, err)
246+
Logf("%s is running right image but validator function failed: %v", podID, err)
248247
continue waitLoop
249248
}
250249

251-
e2elog.Logf("%s is verified up and running", podID)
250+
Logf("%s is verified up and running", podID)
252251
runningPods = append(runningPods, podID)
253252
}
254253
// If we reach here, then all our checks passed.
@@ -257,5 +256,5 @@ waitLoop:
257256
}
258257
}
259258
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
260-
e2elog.Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname)
259+
Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname)
261260
}

test/e2e/framework/resource_usage_gatherer.go

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ import (
3232
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
3333
clientset "k8s.io/client-go/kubernetes"
3434
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
35-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3635
"k8s.io/kubernetes/test/e2e/system"
3736
)
3837

@@ -180,13 +179,13 @@ func (w *resourceGatherWorker) singleProbe() {
180179
} else {
181180
nodeUsage, err := e2ekubelet.GetOneTimeResourceUsageOnNode(w.c, w.nodeName, w.probeDuration, func() []string { return w.containerIDs })
182181
if err != nil {
183-
e2elog.Logf("Error while reading data from %v: %v", w.nodeName, err)
182+
Logf("Error while reading data from %v: %v", w.nodeName, err)
184183
return
185184
}
186185
for k, v := range nodeUsage {
187186
data[k] = v
188187
if w.printVerboseLogs {
189-
e2elog.Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
188+
Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
190189
}
191190
}
192191
}
@@ -196,7 +195,7 @@ func (w *resourceGatherWorker) singleProbe() {
196195
func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
197196
defer utilruntime.HandleCrash()
198197
defer w.wg.Done()
199-
defer e2elog.Logf("Closing worker for %v", w.nodeName)
198+
defer Logf("Closing worker for %v", w.nodeName)
200199
defer func() { w.finished = true }()
201200
select {
202201
case <-time.After(initialSleep):
@@ -273,7 +272,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
273272
if pods == nil {
274273
pods, err = c.CoreV1().Pods("kube-system").List(metav1.ListOptions{})
275274
if err != nil {
276-
e2elog.Logf("Error while listing Pods: %v", err)
275+
Logf("Error while listing Pods: %v", err)
277276
return nil, err
278277
}
279278
}
@@ -297,7 +296,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
297296
}
298297
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
299298
if err != nil {
300-
e2elog.Logf("Error while listing Nodes: %v", err)
299+
Logf("Error while listing Nodes: %v", err)
301300
return nil, err
302301
}
303302

@@ -345,27 +344,27 @@ func (g *ContainerResourceGatherer) StartGatheringData() {
345344
// specified resource constraints.
346345
func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) {
347346
close(g.stopCh)
348-
e2elog.Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
347+
Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
349348
finished := make(chan struct{})
350349
go func() {
351350
g.workerWg.Wait()
352351
finished <- struct{}{}
353352
}()
354353
select {
355354
case <-finished:
356-
e2elog.Logf("Waitgroup finished.")
355+
Logf("Waitgroup finished.")
357356
case <-time.After(2 * time.Minute):
358357
unfinished := make([]string, 0)
359358
for i := range g.workers {
360359
if !g.workers[i].finished {
361360
unfinished = append(unfinished, g.workers[i].nodeName)
362361
}
363362
}
364-
e2elog.Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
363+
Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
365364
}
366365

367366
if len(percentiles) == 0 {
368-
e2elog.Logf("Warning! Empty percentile list for stopAndPrintData.")
367+
Logf("Warning! Empty percentile list for stopAndPrintData.")
369368
return &ResourceUsageSummary{}, fmt.Errorf("Failed to get any resource usage data")
370369
}
371370
data := make(map[int]e2ekubelet.ResourceUsagePerContainer)

test/e2e/framework/size.go

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,6 @@ package framework
1919
import (
2020
"fmt"
2121
"time"
22-
23-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2422
)
2523

2624
const (
@@ -53,14 +51,14 @@ func WaitForGroupSize(group string, size int32) error {
5351
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
5452
currentSize, err := GroupSize(group)
5553
if err != nil {
56-
e2elog.Logf("Failed to get node instance group size: %v", err)
54+
Logf("Failed to get node instance group size: %v", err)
5755
continue
5856
}
5957
if currentSize != int(size) {
60-
e2elog.Logf("Waiting for node instance group size %d, current size %d", size, currentSize)
58+
Logf("Waiting for node instance group size %d, current size %d", size, currentSize)
6159
continue
6260
}
63-
e2elog.Logf("Node instance group has reached the desired size %d", size)
61+
Logf("Node instance group has reached the desired size %d", size)
6462
return nil
6563
}
6664
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)

test/e2e/framework/suites.go

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ import (
2727
v1 "k8s.io/api/core/v1"
2828
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2929
"k8s.io/component-base/version"
30-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3130
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
3231
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
3332
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@@ -66,11 +65,11 @@ func SetupSuite() {
6665
v1.NamespaceNodeLease,
6766
})
6867
if err != nil {
69-
e2elog.Failf("Error deleting orphaned namespaces: %v", err)
68+
Failf("Error deleting orphaned namespaces: %v", err)
7069
}
7170
klog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
7271
if err := WaitForNamespacesDeleted(c, deleted, NamespaceCleanupTimeout); err != nil {
73-
e2elog.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
72+
Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
7473
}
7574
}
7675

@@ -97,26 +96,26 @@ func SetupSuite() {
9796
// number equal to the number of allowed not-ready nodes).
9897
if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(TestContext.MinStartupPods), int32(TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
9998
DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
100-
LogFailedContainers(c, metav1.NamespaceSystem, e2elog.Logf)
99+
LogFailedContainers(c, metav1.NamespaceSystem, Logf)
101100
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
102-
e2elog.Failf("Error waiting for all pods to be running and ready: %v", err)
101+
Failf("Error waiting for all pods to be running and ready: %v", err)
103102
}
104103

105104
if err := WaitForDaemonSets(c, metav1.NamespaceSystem, int32(TestContext.AllowedNotReadyNodes), TestContext.SystemDaemonsetStartupTimeout); err != nil {
106-
e2elog.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
105+
Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
107106
}
108107

109108
// Log the version of the server and this client.
110-
e2elog.Logf("e2e test version: %s", version.Get().GitVersion)
109+
Logf("e2e test version: %s", version.Get().GitVersion)
111110

112111
dc := c.DiscoveryClient
113112

114113
serverVersion, serverErr := dc.ServerVersion()
115114
if serverErr != nil {
116-
e2elog.Logf("Unexpected server error retrieving version: %v", serverErr)
115+
Logf("Unexpected server error retrieving version: %v", serverErr)
117116
}
118117
if serverVersion != nil {
119-
e2elog.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
118+
Logf("kube-apiserver version: %s", serverVersion.GitVersion)
120119
}
121120

122121
if TestContext.NodeKiller.Enabled {
@@ -142,7 +141,7 @@ func SetupSuitePerGinkgoNode() {
142141
klog.Fatal("Error loading client: ", err)
143142
}
144143
TestContext.IPFamily = getDefaultClusterIPFamily(c)
145-
e2elog.Logf("Cluster IP family: %s", TestContext.IPFamily)
144+
Logf("Cluster IP family: %s", TestContext.IPFamily)
146145
}
147146

148147
// CleanupSuite is the boilerplate that can be used after tests on ginkgo were run, on the SynchronizedAfterSuite step.
@@ -151,20 +150,20 @@ func SetupSuitePerGinkgoNode() {
151150
// and then the function that only runs on the first Ginkgo node.
152151
func CleanupSuite() {
153152
// Run on all Ginkgo nodes
154-
e2elog.Logf("Running AfterSuite actions on all nodes")
153+
Logf("Running AfterSuite actions on all nodes")
155154
RunCleanupActions()
156155
}
157156

158157
// AfterSuiteActions are actions that are run on ginkgo's SynchronizedAfterSuite
159158
func AfterSuiteActions() {
160159
// Run only Ginkgo on node 1
161-
e2elog.Logf("Running AfterSuite actions on node 1")
160+
Logf("Running AfterSuite actions on node 1")
162161
if TestContext.ReportDir != "" {
163162
CoreDump(TestContext.ReportDir)
164163
}
165164
if TestContext.GatherSuiteMetricsAfterTest {
166165
if err := gatherTestSuiteMetrics(); err != nil {
167-
e2elog.Logf("Error gathering metrics: %v", err)
166+
Logf("Error gathering metrics: %v", err)
168167
}
169168
}
170169
if TestContext.NodeKiller.Enabled {
@@ -173,7 +172,7 @@ func AfterSuiteActions() {
173172
}
174173

175174
func gatherTestSuiteMetrics() error {
176-
e2elog.Logf("Gathering metrics")
175+
Logf("Gathering metrics")
177176
c, err := LoadClientset()
178177
if err != nil {
179178
return fmt.Errorf("error loading client: %v", err)
@@ -198,7 +197,7 @@ func gatherTestSuiteMetrics() error {
198197
return fmt.Errorf("error writing to %q: %v", filePath, err)
199198
}
200199
} else {
201-
e2elog.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)
200+
Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)
202201
}
203202

204203
return nil

test/e2e/framework/test_context.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ import (
3333
cliflag "k8s.io/component-base/cli/flag"
3434
"k8s.io/klog"
3535
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
36-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3736
)
3837

3938
const (
@@ -441,7 +440,7 @@ func AfterReadingAllFlags(t *TestContextType) {
441440
if TestContext.Provider == "" {
442441
// Some users of the e2e.test binary pass --provider=.
443442
// We need to support that, changing it would break those usages.
444-
e2elog.Logf("The --provider flag is not set. Continuing as if --provider=skeleton had been used.")
443+
Logf("The --provider flag is not set. Continuing as if --provider=skeleton had been used.")
445444
TestContext.Provider = "skeleton"
446445
}
447446

0 commit comments

Comments
 (0)