Skip to content

Commit a9fc975

Browse files
authored
Merge pull request kubernetes#77413 from johnSchnake/frameworkLogRefactoringnodeWindowsAutoscale
Move node, windows, and autoscaling tests to framework/log
2 parents c9dda34 + 2ede81c commit a9fc975

19 files changed

+84
-65
lines changed

test/e2e/autoscaling/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ go_library(
4040
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
4141
"//test/e2e/common:go_default_library",
4242
"//test/e2e/framework:go_default_library",
43+
"//test/e2e/framework/log:go_default_library",
4344
"//test/e2e/instrumentation/monitoring:go_default_library",
4445
"//test/e2e/scheduling:go_default_library",
4546
"//test/utils:go_default_library",

test/e2e/autoscaling/cluster_size_autoscaling.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ import (
4343
clientset "k8s.io/client-go/kubernetes"
4444
api "k8s.io/kubernetes/pkg/apis/core"
4545
"k8s.io/kubernetes/test/e2e/framework"
46+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
4647
"k8s.io/kubernetes/test/e2e/scheduling"
4748
testutils "k8s.io/kubernetes/test/utils"
4849
imageutils "k8s.io/kubernetes/test/utils/image"
@@ -915,10 +916,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
915916
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
916917
time.Sleep(scaleUpTimeout)
917918
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
918-
framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
919+
e2elog.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
919920
Expect(len(currentNodes.Items)).Should(Equal(len(nodes.Items) - nodesToBreakCount))
920921
status, err := getClusterwideStatus(c)
921-
framework.Logf("Clusterwide status: %v", status)
922+
e2elog.Logf("Clusterwide status: %v", status)
922923
framework.ExpectNoError(err)
923924
Expect(status).Should(Equal("Unhealthy"))
924925
}

test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
"k8s.io/apimachinery/pkg/util/wait"
3131
clientset "k8s.io/client-go/kubernetes"
3232
"k8s.io/kubernetes/test/e2e/framework"
33+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3334
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
3435

3536
. "github.com/onsi/ginkgo"
@@ -237,7 +238,7 @@ func (tc *CustomMetricTestCase) Run() {
237238
// and uncomment following lines:
238239
/*
239240
ts, err := google.DefaultTokenSource(oauth2.NoContext)
240-
framework.Logf("Couldn't get application default credentials, %v", err)
241+
e2elog.Logf("Couldn't get application default credentials, %v", err)
241242
if err != nil {
242243
framework.Failf("Error accessing application default credentials, %v", err)
243244
}
@@ -442,7 +443,7 @@ func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, t
442443
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
443444
}
444445
replicas := int(deployment.Status.ReadyReplicas)
445-
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
446+
e2elog.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
446447
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
447448
})
448449
if err != nil {

test/e2e/autoscaling/dns_autoscaling.go

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,14 @@ import (
2222
"strings"
2323
"time"
2424

25-
"k8s.io/api/core/v1"
25+
v1 "k8s.io/api/core/v1"
2626
"k8s.io/apimachinery/pkg/api/resource"
2727
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2828
"k8s.io/apimachinery/pkg/labels"
2929
"k8s.io/apimachinery/pkg/util/wait"
3030
clientset "k8s.io/client-go/kubernetes"
3131
"k8s.io/kubernetes/test/e2e/framework"
32+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3233

3334
. "github.com/onsi/ginkgo"
3435
. "github.com/onsi/gomega"
@@ -240,7 +241,7 @@ func getScheduableCores(nodes []v1.Node) int64 {
240241

241242
scInt64, scOk := sc.AsInt64()
242243
if !scOk {
243-
framework.Logf("Unable to compute integer values of schedulable cores in the cluster")
244+
e2elog.Logf("Unable to compute integer values of schedulable cores in the cluster")
244245
return 0
245246
}
246247
return scInt64
@@ -258,7 +259,7 @@ func deleteDNSScalingConfigMap(c clientset.Interface) error {
258259
if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
259260
return err
260261
}
261-
framework.Logf("DNS autoscaling ConfigMap deleted.")
262+
e2elog.Logf("DNS autoscaling ConfigMap deleted.")
262263
return nil
263264
}
264265

@@ -285,7 +286,7 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) e
285286
if err != nil {
286287
return err
287288
}
288-
framework.Logf("DNS autoscaling ConfigMap updated.")
289+
e2elog.Logf("DNS autoscaling ConfigMap updated.")
289290
return nil
290291
}
291292

@@ -319,22 +320,22 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error {
319320
if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil {
320321
return err
321322
}
322-
framework.Logf("DNS autoscaling pod %v deleted.", podName)
323+
e2elog.Logf("DNS autoscaling pod %v deleted.", podName)
323324
return nil
324325
}
325326

326327
func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) {
327328
var current int
328329
var expected int
329-
framework.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout)
330+
e2elog.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout)
330331
condition := func() (bool, error) {
331332
current, err = getDNSReplicas(c)
332333
if err != nil {
333334
return false, err
334335
}
335336
expected = getExpected(c)
336337
if current != expected {
337-
framework.Logf("Replicas not as expected: got %v, expected %v", current, expected)
338+
e2elog.Logf("Replicas not as expected: got %v, expected %v", current, expected)
338339
return false, nil
339340
}
340341
return true, nil
@@ -343,12 +344,12 @@ func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectRep
343344
if err = wait.Poll(2*time.Second, timeout, condition); err != nil {
344345
return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %v", expected, current, err)
345346
}
346-
framework.Logf("kube-dns reaches expected replicas: %v", expected)
347+
e2elog.Logf("kube-dns reaches expected replicas: %v", expected)
347348
return nil
348349
}
349350

350351
func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) {
351-
framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
352+
e2elog.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
352353
condition := func() (bool, error) {
353354
configMap, err = fetchDNSScalingConfigMap(c)
354355
if err != nil {

test/e2e/node/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ go_library(
3737
"//test/e2e/common:go_default_library",
3838
"//test/e2e/framework:go_default_library",
3939
"//test/e2e/framework/job:go_default_library",
40+
"//test/e2e/framework/log:go_default_library",
4041
"//test/e2e/framework/volume:go_default_library",
4142
"//test/utils:go_default_library",
4243
"//test/utils/image:go_default_library",

test/e2e/node/apparmor.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ package node
1919
import (
2020
"k8s.io/kubernetes/test/e2e/common"
2121
"k8s.io/kubernetes/test/e2e/framework"
22+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2223

2324
. "github.com/onsi/ginkgo"
2425
)
@@ -35,7 +36,7 @@ var _ = SIGDescribe("AppArmor", func() {
3536
if !CurrentGinkgoTestDescription().Failed {
3637
return
3738
}
38-
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
39+
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
3940
})
4041

4142
It("should enforce an AppArmor profile", func() {

test/e2e/node/crictl.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import (
2121
"strings"
2222

2323
"k8s.io/kubernetes/test/e2e/framework"
24+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2425

2526
. "github.com/onsi/ginkgo"
2627
)
@@ -63,10 +64,10 @@ var _ = SIGDescribe("crictl", func() {
6364
// Log the stdout/stderr output.
6465
// TODO: Verify the output.
6566
if len(stdout) > 0 {
66-
framework.Logf("Got stdout from %q:\n %s\n", host, strings.TrimSpace(stdout))
67+
e2elog.Logf("Got stdout from %q:\n %s\n", host, strings.TrimSpace(stdout))
6768
}
6869
if len(stderr) > 0 {
69-
framework.Logf("Got stderr from %q:\n %s\n", host, strings.TrimSpace(stderr))
70+
e2elog.Logf("Got stderr from %q:\n %s\n", host, strings.TrimSpace(stderr))
7071
}
7172
}
7273
})

test/e2e/node/events.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,14 @@ import (
2020
"strconv"
2121
"time"
2222

23-
"k8s.io/api/core/v1"
23+
v1 "k8s.io/api/core/v1"
2424
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2525
"k8s.io/apimachinery/pkg/fields"
2626
"k8s.io/apimachinery/pkg/labels"
2727
"k8s.io/apimachinery/pkg/util/uuid"
2828
"k8s.io/apimachinery/pkg/util/wait"
2929
"k8s.io/kubernetes/test/e2e/framework"
30+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3031

3132
. "github.com/onsi/ginkgo"
3233
. "github.com/onsi/gomega"
@@ -88,7 +89,7 @@ var _ = SIGDescribe("Events", func() {
8889
if err != nil {
8990
framework.Failf("Failed to get pod: %v", err)
9091
}
91-
framework.Logf("%+v\n", podWithUid)
92+
e2elog.Logf("%+v\n", podWithUid)
9293
var events *v1.EventList
9394
// Check for scheduler event about the pod.
9495
By("checking for scheduler event about the pod")
@@ -105,7 +106,7 @@ var _ = SIGDescribe("Events", func() {
105106
return false, err
106107
}
107108
if len(events.Items) > 0 {
108-
framework.Logf("Saw scheduler event for our pod.")
109+
e2elog.Logf("Saw scheduler event for our pod.")
109110
return true, nil
110111
}
111112
return false, nil
@@ -125,7 +126,7 @@ var _ = SIGDescribe("Events", func() {
125126
return false, err
126127
}
127128
if len(events.Items) > 0 {
128-
framework.Logf("Saw kubelet event for our pod.")
129+
e2elog.Logf("Saw kubelet event for our pod.")
129130
return true, nil
130131
}
131132
return false, nil

test/e2e/node/kubelet.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,14 @@ import (
2222
"strings"
2323
"time"
2424

25-
"k8s.io/api/core/v1"
25+
v1 "k8s.io/api/core/v1"
2626
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2727
"k8s.io/apimachinery/pkg/util/sets"
2828
"k8s.io/apimachinery/pkg/util/uuid"
2929
"k8s.io/apimachinery/pkg/util/wait"
3030
clientset "k8s.io/client-go/kubernetes"
3131
"k8s.io/kubernetes/test/e2e/framework"
32+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3233
"k8s.io/kubernetes/test/e2e/framework/volume"
3334
testutils "k8s.io/kubernetes/test/utils"
3435
imageutils "k8s.io/kubernetes/test/utils/image"
@@ -50,10 +51,10 @@ const (
5051
// podNamePrefix and namespace.
5152
func getPodMatches(c clientset.Interface, nodeName string, podNamePrefix string, namespace string) sets.String {
5253
matches := sets.NewString()
53-
framework.Logf("Checking pods on node %v via /runningpods endpoint", nodeName)
54+
e2elog.Logf("Checking pods on node %v via /runningpods endpoint", nodeName)
5455
runningPods, err := framework.GetKubeletPods(c, nodeName)
5556
if err != nil {
56-
framework.Logf("Error checking running pods on %v: %v", nodeName, err)
57+
e2elog.Logf("Error checking running pods on %v: %v", nodeName, err)
5758
return matches
5859
}
5960
for _, pod := range runningPods.Items {
@@ -90,7 +91,7 @@ func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, p
9091
if seen.Len() == targetNumPods {
9192
return true, nil
9293
}
93-
framework.Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
94+
e2elog.Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
9495
return false, nil
9596
})
9697
}
@@ -211,7 +212,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
211212
}
212213

213214
for _, test := range tests {
214-
framework.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
215+
e2elog.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
215216
err = wait.Poll(poll, timeout, func() (bool, error) {
216217
result, err := framework.NodeExec(nodeIP, test.cmd)
217218
Expect(err).NotTo(HaveOccurred())
@@ -229,9 +230,9 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
229230
}
230231

231232
if expectClean {
232-
framework.Logf("Pod's host has been cleaned up")
233+
e2elog.Logf("Pod's host has been cleaned up")
233234
} else {
234-
framework.Logf("Pod's host has not been cleaned up (per expectation)")
235+
e2elog.Logf("Pod's host has not been cleaned up (per expectation)")
235236
}
236237
}
237238

@@ -345,7 +346,7 @@ var _ = SIGDescribe("kubelet", func() {
345346
start := time.Now()
346347
Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0,
347348
itArg.timeout)).NotTo(HaveOccurred())
348-
framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
349+
e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
349350
time.Since(start))
350351
if resourceMonitor != nil {
351352
resourceMonitor.LogCPUSummary()

test/e2e/node/kubelet_perf.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import (
2626
clientset "k8s.io/client-go/kubernetes"
2727
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
2828
"k8s.io/kubernetes/test/e2e/framework"
29+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2930
testutils "k8s.io/kubernetes/test/utils"
3031
imageutils "k8s.io/kubernetes/test/utils/image"
3132

@@ -54,10 +55,10 @@ func logPodsOnNodes(c clientset.Interface, nodeNames []string) {
5455
for _, n := range nodeNames {
5556
podList, err := framework.GetKubeletRunningPods(c, n)
5657
if err != nil {
57-
framework.Logf("Unable to retrieve kubelet pods for node %v", n)
58+
e2elog.Logf("Unable to retrieve kubelet pods for node %v", n)
5859
continue
5960
}
60-
framework.Logf("%d pods are running on node %v", len(podList.Items), n)
61+
e2elog.Logf("%d pods are running on node %v", len(podList.Items), n)
6162
}
6263
}
6364

@@ -90,7 +91,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
9091
deadline := time.Now().Add(monitoringTime)
9192
for time.Now().Before(deadline) {
9293
timeLeft := deadline.Sub(time.Now())
93-
framework.Logf("Still running...%v left", timeLeft)
94+
e2elog.Logf("Still running...%v left", timeLeft)
9495
if timeLeft < reportingPeriod {
9596
time.Sleep(timeLeft)
9697
} else {
@@ -104,13 +105,13 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
104105
usageSummary, err := rm.GetLatest()
105106
Expect(err).NotTo(HaveOccurred())
106107
// TODO(random-liu): Remove the original log when we migrate to new perfdash
107-
framework.Logf("%s", rm.FormatResourceUsage(usageSummary))
108+
e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary))
108109
// Log perf result
109110
framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
110111
verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)
111112

112113
cpuSummary := rm.GetCPUSummary()
113-
framework.Logf("%s", rm.FormatCPUSummary(cpuSummary))
114+
e2elog.Logf("%s", rm.FormatCPUSummary(cpuSummary))
114115
// Log perf result
115116
framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
116117
verifyCPULimits(expectedCPU, cpuSummary)
@@ -144,9 +145,9 @@ func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsageP
144145
errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
145146
heapStats, err := framework.GetKubeletHeapStats(c, nodeName)
146147
if err != nil {
147-
framework.Logf("Unable to get heap stats from %q", nodeName)
148+
e2elog.Logf("Unable to get heap stats from %q", nodeName)
148149
} else {
149-
framework.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
150+
e2elog.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
150151
}
151152
}
152153
}
@@ -210,7 +211,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
210211
AfterEach(func() {
211212
rm.Stop()
212213
result := om.GetLatestRuntimeOperationErrorRate()
213-
framework.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
214+
e2elog.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
214215
})
215216
SIGDescribe("regular resource usage tracking", func() {
216217
// We assume that the scheduler will make reasonable scheduling choices

0 commit comments

Comments
 (0)