Skip to content

Commit 4e3e4e0

Browse files
committed
use log funcs of core framework in the test/e2e/scalability and test/e2e/cloud package
1 parent 1afcd7d commit 4e3e4e0

File tree

5 files changed

+43
-48
lines changed

5 files changed

+43
-48
lines changed

test/e2e/cloud/BUILD

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@ go_library(
1313
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
1414
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
1515
"//test/e2e/framework:go_default_library",
16-
"//test/e2e/framework/log:go_default_library",
1716
"//test/e2e/framework/node:go_default_library",
1817
"//vendor/github.com/onsi/ginkgo:go_default_library",
1918
"//vendor/github.com/onsi/gomega:go_default_library",

test/e2e/cloud/nodes.go

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ import (
2323
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2424
clientset "k8s.io/client-go/kubernetes"
2525
"k8s.io/kubernetes/test/e2e/framework"
26-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2726
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
2827

2928
"github.com/onsi/ginkgo"
@@ -49,16 +48,16 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
4948

5049
origNodes, err := e2enode.GetReadyNodesIncludingTainted(c)
5150
if err != nil {
52-
e2elog.Logf("Unexpected error occurred: %v", err)
51+
framework.Logf("Unexpected error occurred: %v", err)
5352
}
5453
// TODO: write a wrapper for ExpectNoErrorWithOffset()
5554
framework.ExpectNoErrorWithOffset(0, err)
5655

57-
e2elog.Logf("Original number of ready nodes: %d", len(origNodes.Items))
56+
framework.Logf("Original number of ready nodes: %d", len(origNodes.Items))
5857

5958
err = framework.DeleteNodeOnCloudProvider(&nodeToDelete)
6059
if err != nil {
61-
e2elog.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err)
60+
framework.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err)
6261
}
6362

6463
newNodes, err := e2enode.CheckReady(c, len(origNodes.Items)-1, 5*time.Minute)
@@ -67,9 +66,9 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
6766

6867
_, err = c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{})
6968
if err == nil {
70-
e2elog.Failf("node %q still exists when it should be deleted", nodeToDelete.Name)
69+
framework.Failf("node %q still exists when it should be deleted", nodeToDelete.Name)
7170
} else if !apierrs.IsNotFound(err) {
72-
e2elog.Failf("failed to get node %q err: %q", nodeToDelete.Name, err)
71+
framework.Failf("failed to get node %q err: %q", nodeToDelete.Name, err)
7372
}
7473

7574
})

test/e2e/scalability/BUILD

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@ go_library(
3838
"//staging/src/k8s.io/client-go/transport:go_default_library",
3939
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
4040
"//test/e2e/framework:go_default_library",
41-
"//test/e2e/framework/log:go_default_library",
4241
"//test/e2e/framework/metrics:go_default_library",
4342
"//test/e2e/framework/node:go_default_library",
4443
"//test/e2e/framework/pod:go_default_library",

test/e2e/scalability/density.go

Lines changed: 25 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,6 @@ import (
5151
api "k8s.io/kubernetes/pkg/apis/core"
5252
"k8s.io/kubernetes/pkg/apis/extensions"
5353
"k8s.io/kubernetes/test/e2e/framework"
54-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
5554
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
5655
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
5756
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@@ -163,7 +162,7 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC
163162
controllerMem = math.MaxUint64
164163
schedulerCPU := math.MaxFloat32
165164
schedulerMem = math.MaxUint64
166-
e2elog.Logf("Setting resource constraints for provider: %s", framework.TestContext.Provider)
165+
framework.Logf("Setting resource constraints for provider: %s", framework.TestContext.Provider)
167166
if framework.ProviderIs("kubemark") {
168167
if numNodes <= 5 {
169168
apiserverCPU = 0.35
@@ -301,7 +300,7 @@ func logPodStartupStatus(
301300
}
302301
// Log status of the pods.
303302
startupStatus := testutils.ComputeRCStartupStatus(podStore.List(), expectedPods)
304-
e2elog.Logf(startupStatus.String("Density"))
303+
framework.Logf(startupStatus.String("Density"))
305304
// Compute scheduling throughput for the latest time period.
306305
throughput := float64(startupStatus.Scheduled-lastScheduledCount) / float64(period/time.Second)
307306
*scheduleThroughputs = append(*scheduleThroughputs, throughput)
@@ -346,8 +345,8 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
346345
startupTime := time.Since(startTime)
347346
close(logStopCh)
348347
close(schedulerProfilingStopCh)
349-
e2elog.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
350-
e2elog.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
348+
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
349+
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
351350
replicationCtrlStartupPhase.End()
352351

353352
// Grabbing scheduler memory profile after cluster saturation finished.
@@ -376,7 +375,7 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
376375
}
377376
sort.Strings(nodeNames)
378377
for _, node := range nodeNames {
379-
e2elog.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
378+
framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
380379
}
381380
defer printPodAllocationPhase.End()
382381
return startupTime
@@ -449,7 +448,7 @@ var _ = SIGDescribe("Density", func() {
449448
NumberOfPods: totalPods,
450449
Throughput: float32(totalPods) / float32(e2eStartupTime/time.Second),
451450
}
452-
e2elog.Logf("Cluster saturation time: %s", e2emetrics.PrettyPrintJSON(saturationData))
451+
framework.Logf("Cluster saturation time: %s", e2emetrics.PrettyPrintJSON(saturationData))
453452

454453
summaries := make([]framework.TestDataSummary, 0, 2)
455454
// Verify latency metrics.
@@ -525,7 +524,7 @@ var _ = SIGDescribe("Density", func() {
525524

526525
_, nodes, err = e2enode.GetMasterAndWorkerNodes(c)
527526
if err != nil {
528-
e2elog.Logf("Unexpected error occurred: %v", err)
527+
framework.Logf("Unexpected error occurred: %v", err)
529528
}
530529
// TODO: write a wrapper for ExpectNoErrorWithOffset()
531530
framework.ExpectNoErrorWithOffset(0, err)
@@ -548,7 +547,7 @@ var _ = SIGDescribe("Density", func() {
548547
framework.ExpectNoError(e2emetrics.ResetMetrics(c))
549548
framework.ExpectNoError(os.Mkdir(fmt.Sprintf(framework.TestContext.OutputDir+"/%s", uuid), 0777))
550549

551-
e2elog.Logf("Listing nodes for easy debugging:\n")
550+
framework.Logf("Listing nodes for easy debugging:\n")
552551
for _, node := range nodes.Items {
553552
var internalIP, externalIP string
554553
for _, address := range node.Status.Addresses {
@@ -559,7 +558,7 @@ var _ = SIGDescribe("Density", func() {
559558
externalIP = address.Address
560559
}
561560
}
562-
e2elog.Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP)
561+
framework.Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP)
563562
}
564563

565564
// Start apiserver CPU profile gatherer with frequency based on cluster size.
@@ -688,7 +687,7 @@ var _ = SIGDescribe("Density", func() {
688687
Client: clients[i],
689688
Name: secretName,
690689
Namespace: nsName,
691-
LogFunc: e2elog.Logf,
690+
LogFunc: framework.Logf,
692691
})
693692
secretNames = append(secretNames, secretName)
694693
}
@@ -700,7 +699,7 @@ var _ = SIGDescribe("Density", func() {
700699
Client: clients[i],
701700
Name: configMapName,
702701
Namespace: nsName,
703-
LogFunc: e2elog.Logf,
702+
LogFunc: framework.Logf,
704703
})
705704
configMapNames = append(configMapNames, configMapName)
706705
}
@@ -720,7 +719,7 @@ var _ = SIGDescribe("Density", func() {
720719
MemRequest: nodeMemCapacity / 100,
721720
MaxContainerFailures: &MaxContainerFailures,
722721
Silent: true,
723-
LogFunc: e2elog.Logf,
722+
LogFunc: framework.Logf,
724723
SecretNames: secretNames,
725724
ConfigMapNames: configMapNames,
726725
ServiceAccountTokenProjections: itArg.svcacctTokenProjectionsPerPod,
@@ -748,7 +747,7 @@ var _ = SIGDescribe("Density", func() {
748747
case batch.Kind("Job"):
749748
configs[i] = &testutils.JobConfig{RCConfig: *baseConfig}
750749
default:
751-
e2elog.Failf("Unsupported kind: %v", itArg.kind)
750+
framework.Failf("Unsupported kind: %v", itArg.kind)
752751
}
753752
}
754753

@@ -772,7 +771,7 @@ var _ = SIGDescribe("Density", func() {
772771
Client: f.ClientSet,
773772
Name: fmt.Sprintf("density-daemon-%v", i),
774773
Namespace: f.Namespace.Name,
775-
LogFunc: e2elog.Logf,
774+
LogFunc: framework.Logf,
776775
})
777776
}
778777
e2eStartupTime = runDensityTest(dConfig, testPhaseDurations, &scheduleThroughputs)
@@ -812,7 +811,7 @@ var _ = SIGDescribe("Density", func() {
812811
if startTime != metav1.NewTime(time.Time{}) {
813812
runTimes[p.Name] = startTime
814813
} else {
815-
e2elog.Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
814+
framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
816815
}
817816
}
818817
}
@@ -842,15 +841,15 @@ var _ = SIGDescribe("Density", func() {
842841
AddFunc: func(obj interface{}) {
843842
p, ok := obj.(*v1.Pod)
844843
if !ok {
845-
e2elog.Logf("Failed to cast observed object to *v1.Pod.")
844+
framework.Logf("Failed to cast observed object to *v1.Pod.")
846845
}
847846
framework.ExpectEqual(ok, true)
848847
go checkPod(p)
849848
},
850849
UpdateFunc: func(oldObj, newObj interface{}) {
851850
p, ok := newObj.(*v1.Pod)
852851
if !ok {
853-
e2elog.Logf("Failed to cast observed object to *v1.Pod.")
852+
framework.Logf("Failed to cast observed object to *v1.Pod.")
854853
}
855854
framework.ExpectEqual(ok, true)
856855
go checkPod(p)
@@ -863,7 +862,7 @@ var _ = SIGDescribe("Density", func() {
863862
}
864863
for latencyPodsIteration := 0; latencyPodsIteration < latencyPodsIterations; latencyPodsIteration++ {
865864
podIndexOffset := latencyPodsIteration * nodeCount
866-
e2elog.Logf("Creating %d latency pods in range [%d, %d]", nodeCount, podIndexOffset+1, podIndexOffset+nodeCount)
865+
framework.Logf("Creating %d latency pods in range [%d, %d]", nodeCount, podIndexOffset+1, podIndexOffset+nodeCount)
867866

868867
watchTimesLen := len(watchTimes)
869868

@@ -901,7 +900,7 @@ var _ = SIGDescribe("Density", func() {
901900
waitTimeout := 10 * time.Minute
902901
for start := time.Now(); len(watchTimes) < watchTimesLen+nodeCount; time.Sleep(10 * time.Second) {
903902
if time.Since(start) < waitTimeout {
904-
e2elog.Failf("Timeout reached waiting for all Pods being observed by the watch.")
903+
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
905904
}
906905
}
907906

@@ -913,7 +912,7 @@ var _ = SIGDescribe("Density", func() {
913912
}
914913
for node, count := range nodeToLatencyPods {
915914
if count > 1 {
916-
e2elog.Logf("%d latency pods scheduled on %s", count, node)
915+
framework.Logf("%d latency pods scheduled on %s", count, node)
917916
}
918917
}
919918
}
@@ -961,22 +960,22 @@ var _ = SIGDescribe("Density", func() {
961960
for name, create := range createTimes {
962961
sched, ok := scheduleTimes[name]
963962
if !ok {
964-
e2elog.Logf("Failed to find schedule time for %v", name)
963+
framework.Logf("Failed to find schedule time for %v", name)
965964
missingMeasurements++
966965
}
967966
run, ok := runTimes[name]
968967
if !ok {
969-
e2elog.Logf("Failed to find run time for %v", name)
968+
framework.Logf("Failed to find run time for %v", name)
970969
missingMeasurements++
971970
}
972971
watch, ok := watchTimes[name]
973972
if !ok {
974-
e2elog.Logf("Failed to find watch time for %v", name)
973+
framework.Logf("Failed to find watch time for %v", name)
975974
missingMeasurements++
976975
}
977976
node, ok := nodeNames[name]
978977
if !ok {
979-
e2elog.Logf("Failed to find node for %v", name)
978+
framework.Logf("Failed to find node for %v", name)
980979
missingMeasurements++
981980
}
982981

@@ -1062,5 +1061,5 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns,
10621061
}
10631062
framework.ExpectNoError(testutils.CreateRCWithRetries(c, ns, rc))
10641063
framework.ExpectNoError(e2epod.WaitForControlledPodsRunning(c, ns, name, api.Kind("ReplicationController")))
1065-
e2elog.Logf("Found pod '%s' running", name)
1064+
framework.Logf("Found pod '%s' running", name)
10661065
}

test/e2e/scalability/load.go

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,6 @@ import (
5858
api "k8s.io/kubernetes/pkg/apis/core"
5959
"k8s.io/kubernetes/pkg/apis/extensions"
6060
"k8s.io/kubernetes/test/e2e/framework"
61-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
6261
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
6362
"k8s.io/kubernetes/test/e2e/framework/timer"
6463
testutils "k8s.io/kubernetes/test/utils"
@@ -251,27 +250,27 @@ var _ = SIGDescribe("Load capacity", func() {
251250
serviceCreationPhase := testPhaseDurations.StartPhase(120, "services creation")
252251
defer serviceCreationPhase.End()
253252
if itArg.services {
254-
e2elog.Logf("Creating services")
253+
framework.Logf("Creating services")
255254
services := generateServicesForConfigs(configs)
256255
createService := func(i int) {
257256
defer ginkgo.GinkgoRecover()
258257
framework.ExpectNoError(testutils.CreateServiceWithRetries(clientset, services[i].Namespace, services[i]))
259258
}
260259
workqueue.ParallelizeUntil(context.TODO(), serviceOperationsParallelism, len(services), createService)
261-
e2elog.Logf("%v Services created.", len(services))
260+
framework.Logf("%v Services created.", len(services))
262261
defer func(services []*v1.Service) {
263262
serviceCleanupPhase := testPhaseDurations.StartPhase(800, "services deletion")
264263
defer serviceCleanupPhase.End()
265-
e2elog.Logf("Starting to delete services...")
264+
framework.Logf("Starting to delete services...")
266265
deleteService := func(i int) {
267266
defer ginkgo.GinkgoRecover()
268267
framework.ExpectNoError(testutils.DeleteResourceWithRetries(clientset, api.Kind("Service"), services[i].Namespace, services[i].Name, nil))
269268
}
270269
workqueue.ParallelizeUntil(context.TODO(), serviceOperationsParallelism, len(services), deleteService)
271-
e2elog.Logf("Services deleted")
270+
framework.Logf("Services deleted")
272271
}(services)
273272
} else {
274-
e2elog.Logf("Skipping service creation")
273+
framework.Logf("Skipping service creation")
275274
}
276275
serviceCreationPhase.End()
277276
// Create all secrets.
@@ -299,7 +298,7 @@ var _ = SIGDescribe("Load capacity", func() {
299298
Client: f.ClientSet,
300299
Name: daemonName,
301300
Namespace: f.Namespace.Name,
302-
LogFunc: e2elog.Logf,
301+
LogFunc: framework.Logf,
303302
}
304303
daemonConfig.Run()
305304
defer func(config *testutils.DaemonConfig) {
@@ -328,7 +327,7 @@ var _ = SIGDescribe("Load capacity", func() {
328327
// to make it possible to create/schedule them in the meantime.
329328
// Currently we assume <throughput> pods/second average throughput.
330329
// We may want to revisit it in the future.
331-
e2elog.Logf("Starting to create %v objects...", itArg.kind)
330+
framework.Logf("Starting to create %v objects...", itArg.kind)
332331
creatingTime := time.Duration(totalPods/throughput) * time.Second
333332

334333
createAllResources(configs, creatingTime, testPhaseDurations.StartPhase(200, "load pods creation"))
@@ -341,15 +340,15 @@ var _ = SIGDescribe("Load capacity", func() {
341340
// The expected number of created/deleted pods is totalPods/4 when scaling,
342341
// as each RC changes its size from X to a uniform random value in [X/2, 3X/2].
343342
scalingTime := time.Duration(totalPods/(4*throughput)) * time.Second
344-
e2elog.Logf("Starting to scale %v objects first time...", itArg.kind)
343+
framework.Logf("Starting to scale %v objects first time...", itArg.kind)
345344
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(300, "scaling first time"))
346345
ginkgo.By("============================================================================")
347346

348347
// Cleanup all created replication controllers.
349348
// Currently we assume <throughput> pods/second average deletion throughput.
350349
// We may want to revisit it in the future.
351350
deletingTime := time.Duration(totalPods/throughput) * time.Second
352-
e2elog.Logf("Starting to delete %v objects...", itArg.kind)
351+
framework.Logf("Starting to delete %v objects...", itArg.kind)
353352
deleteAllResources(configs, deletingTime, testPhaseDurations.StartPhase(500, "load pods deletion"))
354353
})
355354
}
@@ -531,7 +530,7 @@ func GenerateConfigsForGroup(
531530
Client: nil, // this will be overwritten later
532531
Name: secretName,
533532
Namespace: namespace,
534-
LogFunc: e2elog.Logf,
533+
LogFunc: framework.Logf,
535534
})
536535
secretNames = append(secretNames, secretName)
537536
}
@@ -543,7 +542,7 @@ func GenerateConfigsForGroup(
543542
Client: nil, // this will be overwritten later
544543
Name: configMapName,
545544
Namespace: namespace,
546-
LogFunc: e2elog.Logf,
545+
LogFunc: framework.Logf,
547546
})
548547
configMapNames = append(configMapNames, configMapName)
549548
}
@@ -592,7 +591,7 @@ func GenerateConfigsForGroup(
592591
case batch.Kind("Job"):
593592
config = &testutils.JobConfig{RCConfig: *baseConfig}
594593
default:
595-
e2elog.Failf("Unsupported kind for config creation: %v", kind)
594+
framework.Failf("Unsupported kind for config creation: %v", kind)
596595
}
597596
configs = append(configs, config)
598597
}
@@ -705,7 +704,7 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling
705704
if err == nil {
706705
return true, nil
707706
}
708-
e2elog.Logf("Failed to list pods from %v %v due to: %v", config.GetKind(), config.GetName(), err)
707+
framework.Logf("Failed to list pods from %v %v due to: %v", config.GetKind(), config.GetName(), err)
709708
if testutils.IsRetryableAPIError(err) {
710709
return false, nil
711710
}

0 commit comments

Comments
 (0)