Skip to content

Commit f7d92fb

Browse files
authored
Merge pull request kubernetes#77427 from draveness/feature/use-logf-in-log-pkg
refactor: use e2elog.Logf instead of framework.Logf
2 parents 22b6c69 + da75075 commit f7d92fb

17 files changed

+133
-118
lines changed

test/e2e/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ go_library(
6666
"//test/e2e/framework:go_default_library",
6767
"//test/e2e/framework/auth:go_default_library",
6868
"//test/e2e/framework/ginkgowrapper:go_default_library",
69+
"//test/e2e/framework/log:go_default_library",
6970
"//test/e2e/framework/metrics:go_default_library",
7071
"//test/e2e/framework/providers/aws:go_default_library",
7172
"//test/e2e/framework/providers/azure:go_default_library",

test/e2e/e2e.go

Lines changed: 18 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ import (
3838
commontest "k8s.io/kubernetes/test/e2e/common"
3939
"k8s.io/kubernetes/test/e2e/framework"
4040
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
41+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
4142
"k8s.io/kubernetes/test/e2e/framework/metrics"
4243
"k8s.io/kubernetes/test/e2e/manifest"
4344
testutils "k8s.io/kubernetes/test/utils"
@@ -119,26 +120,26 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
119120
// number equal to the number of allowed not-ready nodes).
120121
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
121122
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
122-
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
123+
framework.LogFailedContainers(c, metav1.NamespaceSystem, e2elog.Logf)
123124
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
124125
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
125126
}
126127

127128
if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {
128-
framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
129+
e2elog.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
129130
}
130131

131132
// Log the version of the server and this client.
132-
framework.Logf("e2e test version: %s", version.Get().GitVersion)
133+
e2elog.Logf("e2e test version: %s", version.Get().GitVersion)
133134

134135
dc := c.DiscoveryClient
135136

136137
serverVersion, serverErr := dc.ServerVersion()
137138
if serverErr != nil {
138-
framework.Logf("Unexpected server error retrieving version: %v", serverErr)
139+
e2elog.Logf("Unexpected server error retrieving version: %v", serverErr)
139140
}
140141
if serverVersion != nil {
141-
framework.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
142+
e2elog.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
142143
}
143144

144145
// Reference common test to make the import valid.
@@ -160,17 +161,17 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
160161
// and then the function that only runs on the first Ginkgo node.
161162
var _ = ginkgo.SynchronizedAfterSuite(func() {
162163
// Run on all Ginkgo nodes
163-
framework.Logf("Running AfterSuite actions on all nodes")
164+
e2elog.Logf("Running AfterSuite actions on all nodes")
164165
framework.RunCleanupActions()
165166
}, func() {
166167
// Run only Ginkgo on node 1
167-
framework.Logf("Running AfterSuite actions on node 1")
168+
e2elog.Logf("Running AfterSuite actions on node 1")
168169
if framework.TestContext.ReportDir != "" {
169170
framework.CoreDump(framework.TestContext.ReportDir)
170171
}
171172
if framework.TestContext.GatherSuiteMetricsAfterTest {
172173
if err := gatherTestSuiteMetrics(); err != nil {
173-
framework.Logf("Error gathering metrics: %v", err)
174+
e2elog.Logf("Error gathering metrics: %v", err)
174175
}
175176
}
176177
if framework.TestContext.NodeKiller.Enabled {
@@ -179,7 +180,7 @@ var _ = ginkgo.SynchronizedAfterSuite(func() {
179180
})
180181

181182
func gatherTestSuiteMetrics() error {
182-
framework.Logf("Gathering metrics")
183+
e2elog.Logf("Gathering metrics")
183184
c, err := framework.LoadClientset()
184185
if err != nil {
185186
return fmt.Errorf("error loading client: %v", err)
@@ -204,7 +205,7 @@ func gatherTestSuiteMetrics() error {
204205
return fmt.Errorf("error writing to %q: %v", filePath, err)
205206
}
206207
} else {
207-
framework.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)
208+
e2elog.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)
208209
}
209210

210211
return nil
@@ -246,31 +247,31 @@ func RunE2ETests(t *testing.T) {
246247
// to flip to Ready, log its output and delete it.
247248
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
248249
path := "test/images/clusterapi-tester/pod.yaml"
249-
framework.Logf("Parsing pod from %v", path)
250+
e2elog.Logf("Parsing pod from %v", path)
250251
p, err := manifest.PodFromManifest(path)
251252
if err != nil {
252-
framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
253+
e2elog.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
253254
return
254255
}
255256
p.Namespace = ns
256257
if _, err := c.CoreV1().Pods(ns).Create(p); err != nil {
257-
framework.Logf("Failed to create %v: %v", p.Name, err)
258+
e2elog.Logf("Failed to create %v: %v", p.Name, err)
258259
return
259260
}
260261
defer func() {
261262
if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil {
262-
framework.Logf("Failed to delete pod %v: %v", p.Name, err)
263+
e2elog.Logf("Failed to delete pod %v: %v", p.Name, err)
263264
}
264265
}()
265266
timeout := 5 * time.Minute
266267
if err := framework.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
267-
framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
268+
e2elog.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
268269
return
269270
}
270271
logs, err := framework.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
271272
if err != nil {
272-
framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
273+
e2elog.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
273274
} else {
274-
framework.Logf("Output of clusterapi-tester:\n%v", logs)
275+
e2elog.Logf("Output of clusterapi-tester:\n%v", logs)
275276
}
276277
}

test/e2e/examples.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ import (
3131
commonutils "k8s.io/kubernetes/test/e2e/common"
3232
"k8s.io/kubernetes/test/e2e/framework"
3333
"k8s.io/kubernetes/test/e2e/framework/auth"
34+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3435
"k8s.io/kubernetes/test/e2e/framework/testfiles"
3536

3637
. "github.com/onsi/ginkgo"
@@ -82,14 +83,14 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
8283
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
8384
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
8485
stat := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName)
85-
framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)
86+
e2elog.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)
8687
if stat.RestartCount > 0 {
87-
framework.Logf("Saw %v restart, succeeded...", podName)
88+
e2elog.Logf("Saw %v restart, succeeded...", podName)
8889
wg.Done()
8990
return
9091
}
9192
}
92-
framework.Logf("Failed waiting for %v restart! ", podName)
93+
e2elog.Logf("Failed waiting for %v restart! ", podName)
9394
passed = false
9495
wg.Done()
9596
}

test/e2e/gke_local_ssd.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2525
"k8s.io/apimachinery/pkg/util/uuid"
2626
"k8s.io/kubernetes/test/e2e/framework"
27+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2728

2829
. "github.com/onsi/ginkgo"
2930
)
@@ -37,14 +38,14 @@ var _ = framework.KubeDescribe("GKE local SSD [Feature:GKELocalSSD]", func() {
3738
})
3839

3940
It("should write and read from node local SSD [Feature:GKELocalSSD]", func() {
40-
framework.Logf("Start local SSD test")
41+
e2elog.Logf("Start local SSD test")
4142
createNodePoolWithLocalSsds("np-ssd")
4243
doTestWriteAndReadToLocalSsd(f)
4344
})
4445
})
4546

4647
func createNodePoolWithLocalSsds(nodePoolName string) {
47-
framework.Logf("Create node pool: %s with local SSDs in cluster: %s ",
48+
e2elog.Logf("Create node pool: %s with local SSDs in cluster: %s ",
4849
nodePoolName, framework.TestContext.CloudConfig.Cluster)
4950
out, err := exec.Command("gcloud", "alpha", "container", "node-pools", "create",
5051
nodePoolName,
@@ -53,7 +54,7 @@ func createNodePoolWithLocalSsds(nodePoolName string) {
5354
if err != nil {
5455
framework.Failf("Failed to create node pool %s: Err: %v\n%v", nodePoolName, err, string(out))
5556
}
56-
framework.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out))
57+
e2elog.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out))
5758
}
5859

5960
func doTestWriteAndReadToLocalSsd(f *framework.Framework) {

test/e2e/gke_node_pools.go

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import (
2121
"os/exec"
2222

2323
"k8s.io/kubernetes/test/e2e/framework"
24+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2425

2526
. "github.com/onsi/ginkgo"
2627
)
@@ -34,64 +35,64 @@ var _ = framework.KubeDescribe("GKE node pools [Feature:GKENodePool]", func() {
3435
})
3536

3637
It("should create a cluster with multiple node pools [Feature:GKENodePool]", func() {
37-
framework.Logf("Start create node pool test")
38+
e2elog.Logf("Start create node pool test")
3839
testCreateDeleteNodePool(f, "test-pool")
3940
})
4041
})
4142

4243
func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
43-
framework.Logf("Create node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster)
44+
e2elog.Logf("Create node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster)
4445

4546
clusterStr := fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster)
4647

4748
out, err := exec.Command("gcloud", "container", "node-pools", "create",
4849
poolName,
4950
clusterStr,
5051
"--num-nodes=2").CombinedOutput()
51-
framework.Logf("\n%s", string(out))
52+
e2elog.Logf("\n%s", string(out))
5253
if err != nil {
5354
framework.Failf("Failed to create node pool %q. Err: %v\n%v", poolName, err, string(out))
5455
}
55-
framework.Logf("Successfully created node pool %q.", poolName)
56+
e2elog.Logf("Successfully created node pool %q.", poolName)
5657

5758
out, err = exec.Command("gcloud", "container", "node-pools", "list",
5859
clusterStr).CombinedOutput()
5960
if err != nil {
6061
framework.Failf("Failed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out))
6162
}
62-
framework.Logf("Node pools:\n%s", string(out))
63+
e2elog.Logf("Node pools:\n%s", string(out))
6364

64-
framework.Logf("Checking that 2 nodes have the correct node pool label.")
65+
e2elog.Logf("Checking that 2 nodes have the correct node pool label.")
6566
nodeCount := nodesWithPoolLabel(f, poolName)
6667
if nodeCount != 2 {
6768
framework.Failf("Wanted 2 nodes with node pool label, got: %v", nodeCount)
6869
}
69-
framework.Logf("Success, found 2 nodes with correct node pool labels.")
70+
e2elog.Logf("Success, found 2 nodes with correct node pool labels.")
7071

71-
framework.Logf("Deleting node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster)
72+
e2elog.Logf("Deleting node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster)
7273
out, err = exec.Command("gcloud", "container", "node-pools", "delete",
7374
poolName,
7475
clusterStr,
7576
"-q").CombinedOutput()
76-
framework.Logf("\n%s", string(out))
77+
e2elog.Logf("\n%s", string(out))
7778
if err != nil {
7879
framework.Failf("Failed to delete node pool %q. Err: %v\n%v", poolName, err, string(out))
7980
}
80-
framework.Logf("Successfully deleted node pool %q.", poolName)
81+
e2elog.Logf("Successfully deleted node pool %q.", poolName)
8182

8283
out, err = exec.Command("gcloud", "container", "node-pools", "list",
8384
clusterStr).CombinedOutput()
8485
if err != nil {
8586
framework.Failf("\nFailed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out))
8687
}
87-
framework.Logf("\nNode pools:\n%s", string(out))
88+
e2elog.Logf("\nNode pools:\n%s", string(out))
8889

89-
framework.Logf("Checking that no nodes have the deleted node pool's label.")
90+
e2elog.Logf("Checking that no nodes have the deleted node pool's label.")
9091
nodeCount = nodesWithPoolLabel(f, poolName)
9192
if nodeCount != 0 {
9293
framework.Failf("Wanted 0 nodes with node pool label, got: %v", nodeCount)
9394
}
94-
framework.Logf("Success, found no nodes with the deleted node pool's label.")
95+
e2elog.Logf("Success, found no nodes with the deleted node pool's label.")
9596
}
9697

9798
// nodesWithPoolLabel returns the number of nodes that have the "gke-nodepool"

test/e2e/scheduling/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ go_library(
4545
"//test/e2e/common:go_default_library",
4646
"//test/e2e/framework:go_default_library",
4747
"//test/e2e/framework/gpu:go_default_library",
48+
"//test/e2e/framework/log:go_default_library",
4849
"//test/e2e/framework/providers/gce:go_default_library",
4950
"//test/e2e/framework/replicaset:go_default_library",
5051
"//test/utils:go_default_library",

test/e2e/scheduling/equivalence_cache_predicates.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ import (
2727
clientset "k8s.io/client-go/kubernetes"
2828
api "k8s.io/kubernetes/pkg/apis/core"
2929
"k8s.io/kubernetes/test/e2e/framework"
30+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3031
testutils "k8s.io/kubernetes/test/utils"
3132
imageutils "k8s.io/kubernetes/test/utils/image"
3233

@@ -72,7 +73,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
7273
Expect(err).NotTo(HaveOccurred())
7374

7475
for _, node := range nodeList.Items {
75-
framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
76+
e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
7677
framework.PrintAllKubeletPods(cs, node.Name)
7778
}
7879

test/e2e/scheduling/limit_range.go

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ import (
2828
"k8s.io/apimachinery/pkg/util/wait"
2929
"k8s.io/apimachinery/pkg/watch"
3030
"k8s.io/kubernetes/test/e2e/framework"
31+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3132

3233
. "github.com/onsi/ginkgo"
3334
. "github.com/onsi/gomega"
@@ -100,7 +101,7 @@ var _ = SIGDescribe("LimitRange", func() {
100101
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
101102
if err != nil {
102103
// Print the pod to help in debugging.
103-
framework.Logf("Pod %+v does not have the expected requirements", pod)
104+
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
104105
Expect(err).NotTo(HaveOccurred())
105106
}
106107
}
@@ -121,7 +122,7 @@ var _ = SIGDescribe("LimitRange", func() {
121122
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
122123
if err != nil {
123124
// Print the pod to help in debugging.
124-
framework.Logf("Pod %+v does not have the expected requirements", pod)
125+
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
125126
Expect(err).NotTo(HaveOccurred())
126127
}
127128
}
@@ -170,18 +171,18 @@ var _ = SIGDescribe("LimitRange", func() {
170171
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
171172

172173
if err != nil {
173-
framework.Logf("Unable to retrieve LimitRanges: %v", err)
174+
e2elog.Logf("Unable to retrieve LimitRanges: %v", err)
174175
return false, nil
175176
}
176177

177178
if len(limitRanges.Items) == 0 {
178-
framework.Logf("limitRange is already deleted")
179+
e2elog.Logf("limitRange is already deleted")
179180
return true, nil
180181
}
181182

182183
if len(limitRanges.Items) > 0 {
183184
if limitRanges.Items[0].ObjectMeta.DeletionTimestamp == nil {
184-
framework.Logf("deletion has not yet been observed")
185+
e2elog.Logf("deletion has not yet been observed")
185186
return false, nil
186187
}
187188
return true, nil
@@ -200,12 +201,12 @@ var _ = SIGDescribe("LimitRange", func() {
200201
})
201202

202203
func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error {
203-
framework.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
204+
e2elog.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
204205
err := equalResourceList(expected.Requests, actual.Requests)
205206
if err != nil {
206207
return err
207208
}
208-
framework.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits)
209+
e2elog.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits)
209210
err = equalResourceList(expected.Limits, actual.Limits)
210211
return err
211212
}

0 commit comments

Comments
 (0)