Skip to content

Commit e60fd89

Browse files
authored
Merge pull request kubernetes#81982 from s-ito-ts/81427_e2e_scheduling
Use log functions of core framework on test/e2e/scheduling
2 parents 3bf2b12 + 8745f02 commit e60fd89

File tree

10 files changed

+104
-114
lines changed

10 files changed

+104
-114
lines changed

test/e2e/scheduling/BUILD

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@ go_library(
4646
"//test/e2e/framework/gpu:go_default_library",
4747
"//test/e2e/framework/job:go_default_library",
4848
"//test/e2e/framework/kubelet:go_default_library",
49-
"//test/e2e/framework/log:go_default_library",
5049
"//test/e2e/framework/node:go_default_library",
5150
"//test/e2e/framework/pod:go_default_library",
5251
"//test/e2e/framework/providers/gce:go_default_library",

test/e2e/scheduling/limit_range.go

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ import (
3333
"k8s.io/client-go/tools/cache"
3434
watchtools "k8s.io/client-go/tools/watch"
3535
"k8s.io/kubernetes/test/e2e/framework"
36-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3736
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
3837

3938
"github.com/onsi/ginkgo"
@@ -86,10 +85,10 @@ var _ = SIGDescribe("LimitRange", func() {
8685
if err == nil {
8786
select {
8887
case listCompleted <- true:
89-
e2elog.Logf("observed the limitRanges list")
88+
framework.Logf("observed the limitRanges list")
9089
return limitRanges, err
9190
default:
92-
e2elog.Logf("channel blocked")
91+
framework.Logf("channel blocked")
9392
}
9493
}
9594
return limitRanges, err
@@ -112,13 +111,13 @@ var _ = SIGDescribe("LimitRange", func() {
112111
select {
113112
case event, _ := <-w.ResultChan():
114113
if event.Type != watch.Added {
115-
e2elog.Failf("Failed to observe limitRange creation : %v", event)
114+
framework.Failf("Failed to observe limitRange creation : %v", event)
116115
}
117116
case <-time.After(e2eservice.RespondingTimeout):
118-
e2elog.Failf("Timeout while waiting for LimitRange creation")
117+
framework.Failf("Timeout while waiting for LimitRange creation")
119118
}
120119
case <-time.After(e2eservice.RespondingTimeout):
121-
e2elog.Failf("Timeout while waiting for LimitRange list complete")
120+
framework.Failf("Timeout while waiting for LimitRange list complete")
122121
}
123122

124123
ginkgo.By("Fetching the LimitRange to ensure it has proper values")
@@ -141,7 +140,7 @@ var _ = SIGDescribe("LimitRange", func() {
141140
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
142141
if err != nil {
143142
// Print the pod to help in debugging.
144-
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
143+
framework.Logf("Pod %+v does not have the expected requirements", pod)
145144
framework.ExpectNoError(err)
146145
}
147146
}
@@ -162,7 +161,7 @@ var _ = SIGDescribe("LimitRange", func() {
162161
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
163162
if err != nil {
164163
// Print the pod to help in debugging.
165-
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
164+
framework.Logf("Pod %+v does not have the expected requirements", pod)
166165
framework.ExpectNoError(err)
167166
}
168167
}
@@ -212,18 +211,18 @@ var _ = SIGDescribe("LimitRange", func() {
212211
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
213212

214213
if err != nil {
215-
e2elog.Logf("Unable to retrieve LimitRanges: %v", err)
214+
framework.Logf("Unable to retrieve LimitRanges: %v", err)
216215
return false, nil
217216
}
218217

219218
if len(limitRanges.Items) == 0 {
220-
e2elog.Logf("limitRange is already deleted")
219+
framework.Logf("limitRange is already deleted")
221220
return true, nil
222221
}
223222

224223
if len(limitRanges.Items) > 0 {
225224
if limitRanges.Items[0].ObjectMeta.DeletionTimestamp == nil {
226-
e2elog.Logf("deletion has not yet been observed")
225+
framework.Logf("deletion has not yet been observed")
227226
return false, nil
228227
}
229228
return true, nil
@@ -244,12 +243,12 @@ var _ = SIGDescribe("LimitRange", func() {
244243
})
245244

246245
func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error {
247-
e2elog.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
246+
framework.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
248247
err := equalResourceList(expected.Requests, actual.Requests)
249248
if err != nil {
250249
return err
251250
}
252-
e2elog.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits)
251+
framework.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits)
253252
err = equalResourceList(expected.Limits, actual.Limits)
254253
return err
255254
}

test/e2e/scheduling/nvidia-gpus.go

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ import (
2929
"k8s.io/kubernetes/test/e2e/framework"
3030
"k8s.io/kubernetes/test/e2e/framework/gpu"
3131
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
32-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3332
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
3433
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3534
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
@@ -87,25 +86,25 @@ func logOSImages(f *framework.Framework) {
8786
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
8887
framework.ExpectNoError(err, "getting node list")
8988
for _, node := range nodeList.Items {
90-
e2elog.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage)
89+
framework.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage)
9190
}
9291
}
9392

9493
func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool {
95-
e2elog.Logf("Getting list of Nodes from API server")
94+
framework.Logf("Getting list of Nodes from API server")
9695
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
9796
framework.ExpectNoError(err, "getting node list")
9897
for _, node := range nodeList.Items {
9998
if node.Spec.Unschedulable {
10099
continue
101100
}
102-
e2elog.Logf("gpuResourceName %s", gpuResourceName)
101+
framework.Logf("gpuResourceName %s", gpuResourceName)
103102
if val, ok := node.Status.Capacity[gpuResourceName]; !ok || val.Value() == 0 {
104-
e2elog.Logf("Nvidia GPUs not available on Node: %q", node.Name)
103+
framework.Logf("Nvidia GPUs not available on Node: %q", node.Name)
105104
return false
106105
}
107106
}
108-
e2elog.Logf("Nvidia GPUs exist on all schedulable nodes")
107+
framework.Logf("Nvidia GPUs exist on all schedulable nodes")
109108
return true
110109
}
111110

@@ -133,34 +132,34 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
133132
}
134133
gpuResourceName = gpu.NVIDIAGPUResourceName
135134

136-
e2elog.Logf("Using %v", dsYamlURL)
135+
framework.Logf("Using %v", dsYamlURL)
137136
// Creates the DaemonSet that installs Nvidia Drivers.
138137
ds, err := framework.DsFromManifest(dsYamlURL)
139138
framework.ExpectNoError(err)
140139
ds.Namespace = f.Namespace.Name
141140
_, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds)
142141
framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset")
143-
e2elog.Logf("Successfully created daemonset to install Nvidia drivers.")
142+
framework.Logf("Successfully created daemonset to install Nvidia drivers.")
144143

145144
pods, err := e2epod.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet"))
146145
framework.ExpectNoError(err, "failed to get pods controlled by the nvidia-driver-installer daemonset")
147146

148147
devicepluginPods, err := e2epod.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet"))
149148
if err == nil {
150-
e2elog.Logf("Adding deviceplugin addon pod.")
149+
framework.Logf("Adding deviceplugin addon pod.")
151150
pods.Items = append(pods.Items, devicepluginPods.Items...)
152151
}
153152

154153
var rsgather *framework.ContainerResourceGatherer
155154
if setupResourceGatherer {
156-
e2elog.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
155+
framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
157156
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, Nodes: framework.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods)
158157
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
159158
go rsgather.StartGatheringData()
160159
}
161160

162161
// Wait for Nvidia GPUs to be available on nodes
163-
e2elog.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
162+
framework.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
164163
gomega.Eventually(func() bool {
165164
return areGPUsAvailableOnAllSchedulableNodes(f)
166165
}, driverInstallTimeout, time.Second).Should(gomega.BeTrue())
@@ -182,19 +181,19 @@ func getGPUsPerPod() int64 {
182181
func testNvidiaGPUs(f *framework.Framework) {
183182
rsgather := SetupNVIDIAGPUNode(f, true)
184183
gpuPodNum := getGPUsAvailable(f) / getGPUsPerPod()
185-
e2elog.Logf("Creating %d pods and have the pods run a CUDA app", gpuPodNum)
184+
framework.Logf("Creating %d pods and have the pods run a CUDA app", gpuPodNum)
186185
podList := []*v1.Pod{}
187186
for i := int64(0); i < gpuPodNum; i++ {
188187
podList = append(podList, f.PodClient().Create(makeCudaAdditionDevicePluginTestPod()))
189188
}
190-
e2elog.Logf("Wait for all test pods to succeed")
189+
framework.Logf("Wait for all test pods to succeed")
191190
// Wait for all pods to succeed
192191
for _, pod := range podList {
193192
f.PodClient().WaitForSuccess(pod.Name, 5*time.Minute)
194193
logContainers(f, pod)
195194
}
196195

197-
e2elog.Logf("Stopping ResourceUsageGather")
196+
framework.Logf("Stopping ResourceUsageGather")
198197
constraints := make(map[string]framework.ResourceConstraint)
199198
// For now, just gets summary. Can pass valid constraints in the future.
200199
summary, err := rsgather.StopAndSummarize([]int{50, 90, 100}, constraints)
@@ -206,7 +205,7 @@ func logContainers(f *framework.Framework, pod *v1.Pod) {
206205
for _, container := range pod.Spec.Containers {
207206
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, container.Name)
208207
framework.ExpectNoError(err, "Should be able to get container logs for container: %s", container.Name)
209-
e2elog.Logf("Got container logs for %s:\n%v", container.Name, logs)
208+
framework.Logf("Got container logs for %s:\n%v", container.Name, logs)
210209
}
211210
}
212211

@@ -273,7 +272,7 @@ func StartJob(f *framework.Framework, completions int32) {
273272
ns := f.Namespace.Name
274273
_, err := jobutil.CreateJob(f.ClientSet, ns, testJob)
275274
framework.ExpectNoError(err)
276-
e2elog.Logf("Created job %v", testJob)
275+
framework.Logf("Created job %v", testJob)
277276
}
278277

279278
// VerifyJobNCompletions verifies that the job has completions number of successful pods
@@ -283,7 +282,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) {
283282
framework.ExpectNoError(err)
284283
createdPods := pods.Items
285284
createdPodNames := podNames(createdPods)
286-
e2elog.Logf("Got the following pods for job cuda-add: %v", createdPodNames)
285+
framework.Logf("Got the following pods for job cuda-add: %v", createdPodNames)
287286

288287
successes := int32(0)
289288
for _, podName := range createdPodNames {
@@ -296,7 +295,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) {
296295
}
297296
}
298297
if successes != completions {
299-
e2elog.Failf("Only got %v completions. Expected %v completions.", successes, completions)
298+
framework.Failf("Only got %v completions. Expected %v completions.", successes, completions)
300299
}
301300
}
302301

test/e2e/scheduling/predicates.go

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ import (
3131
"k8s.io/kubernetes/test/e2e/common"
3232
"k8s.io/kubernetes/test/e2e/framework"
3333
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
34-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3534
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
3635
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3736
testutils "k8s.io/kubernetes/test/utils"
@@ -95,11 +94,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
9594
// NOTE: Here doesn't get nodeList for supporting a master nodes which can host workload pods.
9695
masterNodes, _, err = e2enode.GetMasterAndWorkerNodes(cs)
9796
if err != nil {
98-
e2elog.Logf("Unexpected error occurred: %v", err)
97+
framework.Logf("Unexpected error occurred: %v", err)
9998
}
10099
nodeList, err = e2enode.GetReadySchedulableNodesOrDie(cs)
101100
if err != nil {
102-
e2elog.Logf("Unexpected error occurred: %v", err)
101+
framework.Logf("Unexpected error occurred: %v", err)
103102
}
104103

105104
// TODO: write a wrapper for ExpectNoErrorWithOffset()
@@ -109,7 +108,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
109108
framework.ExpectNoError(err)
110109

111110
for _, node := range nodeList.Items {
112-
e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
111+
framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
113112
e2ekubelet.PrintAllKubeletPods(cs, node.Name)
114113
}
115114

@@ -124,7 +123,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
124123
totalPodCapacity = 0
125124

126125
for _, node := range nodeList.Items {
127-
e2elog.Logf("Node: %v", node)
126+
framework.Logf("Node: %v", node)
128127
podCapacity, found := node.Status.Capacity[v1.ResourcePods]
129128
framework.ExpectEqual(found, true)
130129
totalPodCapacity += podCapacity.Value()
@@ -144,7 +143,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
144143
*initPausePod(f, pausePodConfig{
145144
Name: "",
146145
Labels: map[string]string{"name": ""},
147-
}), true, e2elog.Logf))
146+
}), true, framework.Logf))
148147
}
149148
podName := "additional-pod"
150149
WaitForSchedulerAfterAction(f, createPausePodAction(f, pausePodConfig{
@@ -179,7 +178,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
179178
for _, pod := range pods.Items {
180179
_, found := nodeToAllocatableMap[pod.Spec.NodeName]
181180
if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
182-
e2elog.Logf("Pod %v requesting local ephemeral resource =%v on Node %v", pod.Name, getRequestedStorageEphemeralStorage(pod), pod.Spec.NodeName)
181+
framework.Logf("Pod %v requesting local ephemeral resource =%v on Node %v", pod.Name, getRequestedStorageEphemeralStorage(pod), pod.Spec.NodeName)
183182
nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedStorageEphemeralStorage(pod)
184183
}
185184
}
@@ -189,9 +188,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
189188

190189
ephemeralStoragePerPod = nodeMaxAllocatable / maxNumberOfPods
191190

192-
e2elog.Logf("Using pod capacity: %v", ephemeralStoragePerPod)
191+
framework.Logf("Using pod capacity: %v", ephemeralStoragePerPod)
193192
for name, leftAllocatable := range nodeToAllocatableMap {
194-
e2elog.Logf("Node: %v has local ephemeral resource allocatable: %v", name, leftAllocatable)
193+
framework.Logf("Node: %v has local ephemeral resource allocatable: %v", name, leftAllocatable)
195194
podsNeededForSaturation += (int)(leftAllocatable / ephemeralStoragePerPod)
196195
}
197196

@@ -214,7 +213,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
214213
v1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStoragePerPod, "DecimalSI"),
215214
},
216215
},
217-
}), true, e2elog.Logf))
216+
}), true, framework.Logf))
218217
}
219218
podName := "additional-pod"
220219
conf := pausePodConfig{
@@ -284,7 +283,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
284283
for _, pod := range pods.Items {
285284
_, found := nodeToAllocatableMap[pod.Spec.NodeName]
286285
if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
287-
e2elog.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
286+
framework.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
288287
nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedCPU(pod)
289288
}
290289
}
@@ -294,7 +293,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
294293
fillerPods := []*v1.Pod{}
295294
for nodeName, cpu := range nodeToAllocatableMap {
296295
requestedCPU := cpu * 7 / 10
297-
e2elog.Logf("Creating a pod which consumes cpu=%vm on Node %v", requestedCPU, nodeName)
296+
framework.Logf("Creating a pod which consumes cpu=%vm on Node %v", requestedCPU, nodeName)
298297
fillerPods = append(fillerPods, createPausePod(f, pausePodConfig{
299298
Name: "filler-pod-" + string(uuid.NewUUID()),
300299
Resources: &v1.ResourceRequirements{

0 commit comments

Comments
 (0)