Skip to content

Commit d1290ff

Browse files
committed
clean up test code
1 parent c4c6467 commit d1290ff

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+214
-258
lines changed

test/e2e/examples.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ import (
3131
commonutils "k8s.io/kubernetes/test/e2e/common"
3232
"k8s.io/kubernetes/test/e2e/framework"
3333
"k8s.io/kubernetes/test/e2e/framework/auth"
34-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3534
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3635
"k8s.io/kubernetes/test/e2e/framework/testfiles"
3736

@@ -83,14 +82,14 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
8382
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
8483
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
8584
stat := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName)
86-
e2elog.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)
85+
framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)
8786
if stat.RestartCount > 0 {
88-
e2elog.Logf("Saw %v restart, succeeded...", podName)
87+
framework.Logf("Saw %v restart, succeeded...", podName)
8988
wg.Done()
9089
return
9190
}
9291
}
93-
e2elog.Logf("Failed waiting for %v restart! ", podName)
92+
framework.Logf("Failed waiting for %v restart! ", podName)
9493
passed = false
9594
wg.Done()
9695
}
@@ -106,7 +105,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
106105
}
107106
wg.Wait()
108107
if !passed {
109-
e2elog.Failf("At least one liveness example failed. See the logs above.")
108+
framework.Failf("At least one liveness example failed. See the logs above.")
110109
}
111110
})
112111
})

test/e2e/gke_local_ssd.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@ import (
2424
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2525
"k8s.io/apimachinery/pkg/util/uuid"
2626
"k8s.io/kubernetes/test/e2e/framework"
27-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2827

2928
"github.com/onsi/ginkgo"
3029
)
@@ -38,23 +37,23 @@ var _ = framework.KubeDescribe("GKE local SSD [Feature:GKELocalSSD]", func() {
3837
})
3938

4039
ginkgo.It("should write and read from node local SSD [Feature:GKELocalSSD]", func() {
41-
e2elog.Logf("Start local SSD test")
40+
framework.Logf("Start local SSD test")
4241
createNodePoolWithLocalSsds("np-ssd")
4342
doTestWriteAndReadToLocalSsd(f)
4443
})
4544
})
4645

4746
func createNodePoolWithLocalSsds(nodePoolName string) {
48-
e2elog.Logf("Create node pool: %s with local SSDs in cluster: %s ",
47+
framework.Logf("Create node pool: %s with local SSDs in cluster: %s ",
4948
nodePoolName, framework.TestContext.CloudConfig.Cluster)
5049
out, err := exec.Command("gcloud", "alpha", "container", "node-pools", "create",
5150
nodePoolName,
5251
fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster),
5352
"--local-ssd-count=1").CombinedOutput()
5453
if err != nil {
55-
e2elog.Failf("Failed to create node pool %s: Err: %v\n%v", nodePoolName, err, string(out))
54+
framework.Failf("Failed to create node pool %s: Err: %v\n%v", nodePoolName, err, string(out))
5655
}
57-
e2elog.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out))
56+
framework.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out))
5857
}
5958

6059
func doTestWriteAndReadToLocalSsd(f *framework.Framework) {

test/e2e/node/BUILD

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@ go_library(
4242
"//test/e2e/framework:go_default_library",
4343
"//test/e2e/framework/job:go_default_library",
4444
"//test/e2e/framework/kubelet:go_default_library",
45-
"//test/e2e/framework/log:go_default_library",
4645
"//test/e2e/framework/metrics:go_default_library",
4746
"//test/e2e/framework/node:go_default_library",
4847
"//test/e2e/framework/perf:go_default_library",

test/e2e/node/apparmor.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ package node
1919
import (
2020
"k8s.io/kubernetes/test/e2e/common"
2121
"k8s.io/kubernetes/test/e2e/framework"
22-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2322

2423
"github.com/onsi/ginkgo"
2524
)
@@ -36,7 +35,7 @@ var _ = SIGDescribe("AppArmor", func() {
3635
if !ginkgo.CurrentGinkgoTestDescription().Failed {
3736
return
3837
}
39-
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
38+
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
4039
})
4140

4241
ginkgo.It("should enforce an AppArmor profile", func() {

test/e2e/node/crictl.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@ import (
2121
"strings"
2222

2323
"k8s.io/kubernetes/test/e2e/framework"
24-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2524
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
2625

2726
"github.com/onsi/ginkgo"
@@ -42,7 +41,7 @@ var _ = SIGDescribe("crictl", func() {
4241
ginkgo.By("Getting all nodes' SSH-able IP addresses")
4342
hosts, err := e2essh.NodeSSHHosts(f.ClientSet)
4443
if err != nil {
45-
e2elog.Failf("Error getting node hostnames: %v", err)
44+
framework.Failf("Error getting node hostnames: %v", err)
4645
}
4746

4847
testCases := []struct {
@@ -60,15 +59,15 @@ var _ = SIGDescribe("crictl", func() {
6059
result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider)
6160
stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr)
6261
if err != nil {
63-
e2elog.Failf("Ran %q on %q, got error %v", testCase.cmd, host, err)
62+
framework.Failf("Ran %q on %q, got error %v", testCase.cmd, host, err)
6463
}
6564
// Log the stdout/stderr output.
6665
// TODO: Verify the output.
6766
if len(stdout) > 0 {
68-
e2elog.Logf("Got stdout from %q:\n %s\n", host, strings.TrimSpace(stdout))
67+
framework.Logf("Got stdout from %q:\n %s\n", host, strings.TrimSpace(stdout))
6968
}
7069
if len(stderr) > 0 {
71-
e2elog.Logf("Got stderr from %q:\n %s\n", host, strings.TrimSpace(stderr))
70+
framework.Logf("Got stderr from %q:\n %s\n", host, strings.TrimSpace(stderr))
7271
}
7372
}
7473
})

test/e2e/node/events.go

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ import (
2727
"k8s.io/apimachinery/pkg/util/uuid"
2828
"k8s.io/apimachinery/pkg/util/wait"
2929
"k8s.io/kubernetes/test/e2e/framework"
30-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3130

3231
"github.com/onsi/ginkgo"
3332
)
@@ -73,7 +72,7 @@ var _ = SIGDescribe("Events", func() {
7372
podClient.Delete(pod.Name, nil)
7473
}()
7574
if _, err := podClient.Create(pod); err != nil {
76-
e2elog.Failf("Failed to create pod: %v", err)
75+
framework.Failf("Failed to create pod: %v", err)
7776
}
7877

7978
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
@@ -87,9 +86,9 @@ var _ = SIGDescribe("Events", func() {
8786
ginkgo.By("retrieving the pod")
8887
podWithUID, err := podClient.Get(pod.Name, metav1.GetOptions{})
8988
if err != nil {
90-
e2elog.Failf("Failed to get pod: %v", err)
89+
framework.Failf("Failed to get pod: %v", err)
9190
}
92-
e2elog.Logf("%+v\n", podWithUID)
91+
framework.Logf("%+v\n", podWithUID)
9392
var events *v1.EventList
9493
// Check for scheduler event about the pod.
9594
ginkgo.By("checking for scheduler event about the pod")
@@ -106,7 +105,7 @@ var _ = SIGDescribe("Events", func() {
106105
return false, err
107106
}
108107
if len(events.Items) > 0 {
109-
e2elog.Logf("Saw scheduler event for our pod.")
108+
framework.Logf("Saw scheduler event for our pod.")
110109
return true, nil
111110
}
112111
return false, nil
@@ -126,7 +125,7 @@ var _ = SIGDescribe("Events", func() {
126125
return false, err
127126
}
128127
if len(events.Items) > 0 {
129-
e2elog.Logf("Saw kubelet event for our pod.")
128+
framework.Logf("Saw kubelet event for our pod.")
130129
return true, nil
131130
}
132131
return false, nil

test/e2e/node/kubelet.go

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ import (
3030
clientset "k8s.io/client-go/kubernetes"
3131
"k8s.io/kubernetes/test/e2e/framework"
3232
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
33-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3433
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3534
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
3635
"k8s.io/kubernetes/test/e2e/framework/volume"
@@ -53,10 +52,10 @@ const (
5352
// podNamePrefix and namespace.
5453
func getPodMatches(c clientset.Interface, nodeName string, podNamePrefix string, namespace string) sets.String {
5554
matches := sets.NewString()
56-
e2elog.Logf("Checking pods on node %v via /runningpods endpoint", nodeName)
55+
framework.Logf("Checking pods on node %v via /runningpods endpoint", nodeName)
5756
runningPods, err := e2ekubelet.GetKubeletPods(c, nodeName)
5857
if err != nil {
59-
e2elog.Logf("Error checking running pods on %v: %v", nodeName, err)
58+
framework.Logf("Error checking running pods on %v: %v", nodeName, err)
6059
return matches
6160
}
6261
for _, pod := range runningPods.Items {
@@ -93,7 +92,7 @@ func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, p
9392
if seen.Len() == targetNumPods {
9493
return true, nil
9594
}
96-
e2elog.Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
95+
framework.Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
9796
return false, nil
9897
})
9998
}
@@ -214,7 +213,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
214213
}
215214

216215
for _, test := range tests {
217-
e2elog.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
216+
framework.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
218217
err = wait.Poll(poll, timeout, func() (bool, error) {
219218
result, err := e2essh.NodeExec(nodeIP, test.cmd, framework.TestContext.Provider)
220219
framework.ExpectNoError(err)
@@ -232,9 +231,9 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
232231
}
233232

234233
if expectClean {
235-
e2elog.Logf("Pod's host has been cleaned up")
234+
framework.Logf("Pod's host has been cleaned up")
236235
} else {
237-
e2elog.Logf("Pod's host has not been cleaned up (per expectation)")
236+
framework.Logf("Pod's host has not been cleaned up (per expectation)")
238237
}
239238
}
240239

@@ -349,7 +348,7 @@ var _ = SIGDescribe("kubelet", func() {
349348
start := time.Now()
350349
err = waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0, itArg.timeout)
351350
framework.ExpectNoError(err)
352-
e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
351+
framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
353352
time.Since(start))
354353
if resourceMonitor != nil {
355354
resourceMonitor.LogCPUSummary()

test/e2e/node/kubelet_perf.go

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ import (
2727
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
2828
"k8s.io/kubernetes/test/e2e/framework"
2929
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
30-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3130
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
3231
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
3332
"k8s.io/kubernetes/test/e2e/perftype"
@@ -58,10 +57,10 @@ func logPodsOnNodes(c clientset.Interface, nodeNames []string) {
5857
for _, n := range nodeNames {
5958
podList, err := e2ekubelet.GetKubeletRunningPods(c, n)
6059
if err != nil {
61-
e2elog.Logf("Unable to retrieve kubelet pods for node %v", n)
60+
framework.Logf("Unable to retrieve kubelet pods for node %v", n)
6261
continue
6362
}
64-
e2elog.Logf("%d pods are running on node %v", len(podList.Items), n)
63+
framework.Logf("%d pods are running on node %v", len(podList.Items), n)
6564
}
6665
}
6766

@@ -95,7 +94,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
9594
deadline := time.Now().Add(monitoringTime)
9695
for time.Now().Before(deadline) {
9796
timeLeft := deadline.Sub(time.Now())
98-
e2elog.Logf("Still running...%v left", timeLeft)
97+
framework.Logf("Still running...%v left", timeLeft)
9998
if timeLeft < reportingPeriod {
10099
time.Sleep(timeLeft)
101100
} else {
@@ -109,13 +108,13 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
109108
usageSummary, err := rm.GetLatest()
110109
framework.ExpectNoError(err)
111110
// TODO(random-liu): Remove the original log when we migrate to new perfdash
112-
e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary))
111+
framework.Logf("%s", rm.FormatResourceUsage(usageSummary))
113112
// Log perf result
114113
printPerfData(e2eperf.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
115114
verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)
116115

117116
cpuSummary := rm.GetCPUSummary()
118-
e2elog.Logf("%s", rm.FormatCPUSummary(cpuSummary))
117+
framework.Logf("%s", rm.FormatCPUSummary(cpuSummary))
119118
// Log perf result
120119
printPerfData(e2eperf.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
121120
verifyCPULimits(expectedCPU, cpuSummary)
@@ -149,14 +148,14 @@ func verifyMemoryLimits(c clientset.Interface, expected e2ekubelet.ResourceUsage
149148
errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
150149
heapStats, err := e2ekubelet.GetKubeletHeapStats(c, nodeName)
151150
if err != nil {
152-
e2elog.Logf("Unable to get heap stats from %q", nodeName)
151+
framework.Logf("Unable to get heap stats from %q", nodeName)
153152
} else {
154-
e2elog.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
153+
framework.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
155154
}
156155
}
157156
}
158157
if len(errList) > 0 {
159-
e2elog.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n"))
158+
framework.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n"))
160159
}
161160
}
162161

@@ -190,7 +189,7 @@ func verifyCPULimits(expected e2ekubelet.ContainersCPUSummary, actual e2ekubelet
190189
}
191190
}
192191
if len(errList) > 0 {
193-
e2elog.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n"))
192+
framework.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n"))
194193
}
195194
}
196195

@@ -215,7 +214,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
215214
ginkgo.AfterEach(func() {
216215
rm.Stop()
217216
result := om.GetLatestRuntimeOperationErrorRate()
218-
e2elog.Logf("runtime operation error metrics:\n%s", e2ekubelet.FormatRuntimeOperationErrorRate(result))
217+
framework.Logf("runtime operation error metrics:\n%s", e2ekubelet.FormatRuntimeOperationErrorRate(result))
219218
})
220219
SIGDescribe("regular resource usage tracking [Feature:RegularResourceUsageTracking]", func() {
221220
// We assume that the scheduler will make reasonable scheduling choices
@@ -287,6 +286,6 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
287286
func printPerfData(p *perftype.PerfData) {
288287
// Notice that we must make sure the perftype.PerfResultEnd is in a new line.
289288
if str := e2emetrics.PrettyPrintJSON(p); str != "" {
290-
e2elog.Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
289+
framework.Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
291290
}
292291
}

test/e2e/node/mount_propagation.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ import (
2323
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2424
"k8s.io/apimachinery/pkg/util/sets"
2525
"k8s.io/kubernetes/test/e2e/framework"
26-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2726
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
2827
imageutils "k8s.io/kubernetes/test/utils/image"
2928

@@ -166,7 +165,7 @@ var _ = SIGDescribe("Mount propagation", func() {
166165
for _, mountName := range dirNames {
167166
cmd := fmt.Sprintf("cat /mnt/test/%s/file", mountName)
168167
stdout, stderr, err := f.ExecShellInPodWithFullOutput(podName, cmd)
169-
e2elog.Logf("pod %s mount %s: stdout: %q, stderr: %q error: %v", podName, mountName, stdout, stderr, err)
168+
framework.Logf("pod %s mount %s: stdout: %q, stderr: %q error: %v", podName, mountName, stdout, stderr, err)
170169
msg := fmt.Sprintf("When checking pod %s and directory %s", podName, mountName)
171170
shouldBeVisible := mounts.Has(mountName)
172171
if shouldBeVisible {

test/e2e/node/node_problem_detector.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ import (
2929
"k8s.io/apimachinery/pkg/fields"
3030
"k8s.io/kubernetes/test/e2e/framework"
3131
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
32-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3332
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
3433
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
3534
testutils "k8s.io/kubernetes/test/utils"
@@ -188,7 +187,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
188187
workingSetStatsMsg += fmt.Sprintf(" %s[%.1f|%.1f|%.1f];", nodes.Items[i].Name,
189188
workingSetStats[host][0], workingSetStats[host][len(workingSetStats[host])/2], workingSetStats[host][len(workingSetStats[host])-1])
190189
}
191-
e2elog.Logf("Node-Problem-Detector CPU and Memory Stats:\n\t%s\n\t%s\n\t%s", cpuStatsMsg, rssStatsMsg, workingSetStatsMsg)
190+
framework.Logf("Node-Problem-Detector CPU and Memory Stats:\n\t%s\n\t%s\n\t%s", cpuStatsMsg, rssStatsMsg, workingSetStatsMsg)
192191
})
193192
})
194193

0 commit comments

Comments
 (0)