@@ -29,7 +29,6 @@ import (
29
29
"k8s.io/kubernetes/test/e2e/framework"
30
30
"k8s.io/kubernetes/test/e2e/framework/gpu"
31
31
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
32
- e2elog "k8s.io/kubernetes/test/e2e/framework/log"
33
32
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
34
33
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
35
34
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
@@ -87,25 +86,25 @@ func logOSImages(f *framework.Framework) {
87
86
nodeList , err := f .ClientSet .CoreV1 ().Nodes ().List (metav1.ListOptions {})
88
87
framework .ExpectNoError (err , "getting node list" )
89
88
for _ , node := range nodeList .Items {
90
- e2elog .Logf ("Nodename: %v, OS Image: %v" , node .Name , node .Status .NodeInfo .OSImage )
89
+ framework .Logf ("Nodename: %v, OS Image: %v" , node .Name , node .Status .NodeInfo .OSImage )
91
90
}
92
91
}
93
92
94
93
func areGPUsAvailableOnAllSchedulableNodes (f * framework.Framework ) bool {
95
- e2elog .Logf ("Getting list of Nodes from API server" )
94
+ framework .Logf ("Getting list of Nodes from API server" )
96
95
nodeList , err := f .ClientSet .CoreV1 ().Nodes ().List (metav1.ListOptions {})
97
96
framework .ExpectNoError (err , "getting node list" )
98
97
for _ , node := range nodeList .Items {
99
98
if node .Spec .Unschedulable {
100
99
continue
101
100
}
102
- e2elog .Logf ("gpuResourceName %s" , gpuResourceName )
101
+ framework .Logf ("gpuResourceName %s" , gpuResourceName )
103
102
if val , ok := node .Status .Capacity [gpuResourceName ]; ! ok || val .Value () == 0 {
104
- e2elog .Logf ("Nvidia GPUs not available on Node: %q" , node .Name )
103
+ framework .Logf ("Nvidia GPUs not available on Node: %q" , node .Name )
105
104
return false
106
105
}
107
106
}
108
- e2elog .Logf ("Nvidia GPUs exist on all schedulable nodes" )
107
+ framework .Logf ("Nvidia GPUs exist on all schedulable nodes" )
109
108
return true
110
109
}
111
110
@@ -133,34 +132,34 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
133
132
}
134
133
gpuResourceName = gpu .NVIDIAGPUResourceName
135
134
136
- e2elog .Logf ("Using %v" , dsYamlURL )
135
+ framework .Logf ("Using %v" , dsYamlURL )
137
136
// Creates the DaemonSet that installs Nvidia Drivers.
138
137
ds , err := framework .DsFromManifest (dsYamlURL )
139
138
framework .ExpectNoError (err )
140
139
ds .Namespace = f .Namespace .Name
141
140
_ , err = f .ClientSet .AppsV1 ().DaemonSets (f .Namespace .Name ).Create (ds )
142
141
framework .ExpectNoError (err , "failed to create nvidia-driver-installer daemonset" )
143
- e2elog .Logf ("Successfully created daemonset to install Nvidia drivers." )
142
+ framework .Logf ("Successfully created daemonset to install Nvidia drivers." )
144
143
145
144
pods , err := e2epod .WaitForControlledPods (f .ClientSet , ds .Namespace , ds .Name , extensionsinternal .Kind ("DaemonSet" ))
146
145
framework .ExpectNoError (err , "failed to get pods controlled by the nvidia-driver-installer daemonset" )
147
146
148
147
devicepluginPods , err := e2epod .WaitForControlledPods (f .ClientSet , "kube-system" , "nvidia-gpu-device-plugin" , extensionsinternal .Kind ("DaemonSet" ))
149
148
if err == nil {
150
- e2elog .Logf ("Adding deviceplugin addon pod." )
149
+ framework .Logf ("Adding deviceplugin addon pod." )
151
150
pods .Items = append (pods .Items , devicepluginPods .Items ... )
152
151
}
153
152
154
153
var rsgather * framework.ContainerResourceGatherer
155
154
if setupResourceGatherer {
156
- e2elog .Logf ("Starting ResourceUsageGather for the created DaemonSet pods." )
155
+ framework .Logf ("Starting ResourceUsageGather for the created DaemonSet pods." )
157
156
rsgather , err = framework .NewResourceUsageGatherer (f .ClientSet , framework.ResourceGathererOptions {InKubemark : false , Nodes : framework .AllNodes , ResourceDataGatheringPeriod : 2 * time .Second , ProbeDuration : 2 * time .Second , PrintVerboseLogs : true }, pods )
158
157
framework .ExpectNoError (err , "creating ResourceUsageGather for the daemonset pods" )
159
158
go rsgather .StartGatheringData ()
160
159
}
161
160
162
161
// Wait for Nvidia GPUs to be available on nodes
163
- e2elog .Logf ("Waiting for drivers to be installed and GPUs to be available in Node Capacity..." )
162
+ framework .Logf ("Waiting for drivers to be installed and GPUs to be available in Node Capacity..." )
164
163
gomega .Eventually (func () bool {
165
164
return areGPUsAvailableOnAllSchedulableNodes (f )
166
165
}, driverInstallTimeout , time .Second ).Should (gomega .BeTrue ())
@@ -182,19 +181,19 @@ func getGPUsPerPod() int64 {
182
181
func testNvidiaGPUs (f * framework.Framework ) {
183
182
rsgather := SetupNVIDIAGPUNode (f , true )
184
183
gpuPodNum := getGPUsAvailable (f ) / getGPUsPerPod ()
185
- e2elog .Logf ("Creating %d pods and have the pods run a CUDA app" , gpuPodNum )
184
+ framework .Logf ("Creating %d pods and have the pods run a CUDA app" , gpuPodNum )
186
185
podList := []* v1.Pod {}
187
186
for i := int64 (0 ); i < gpuPodNum ; i ++ {
188
187
podList = append (podList , f .PodClient ().Create (makeCudaAdditionDevicePluginTestPod ()))
189
188
}
190
- e2elog .Logf ("Wait for all test pods to succeed" )
189
+ framework .Logf ("Wait for all test pods to succeed" )
191
190
// Wait for all pods to succeed
192
191
for _ , pod := range podList {
193
192
f .PodClient ().WaitForSuccess (pod .Name , 5 * time .Minute )
194
193
logContainers (f , pod )
195
194
}
196
195
197
- e2elog .Logf ("Stopping ResourceUsageGather" )
196
+ framework .Logf ("Stopping ResourceUsageGather" )
198
197
constraints := make (map [string ]framework.ResourceConstraint )
199
198
// For now, just gets summary. Can pass valid constraints in the future.
200
199
summary , err := rsgather .StopAndSummarize ([]int {50 , 90 , 100 }, constraints )
@@ -206,7 +205,7 @@ func logContainers(f *framework.Framework, pod *v1.Pod) {
206
205
for _ , container := range pod .Spec .Containers {
207
206
logs , err := e2epod .GetPodLogs (f .ClientSet , f .Namespace .Name , pod .Name , container .Name )
208
207
framework .ExpectNoError (err , "Should be able to get container logs for container: %s" , container .Name )
209
- e2elog .Logf ("Got container logs for %s:\n %v" , container .Name , logs )
208
+ framework .Logf ("Got container logs for %s:\n %v" , container .Name , logs )
210
209
}
211
210
}
212
211
@@ -273,7 +272,7 @@ func StartJob(f *framework.Framework, completions int32) {
273
272
ns := f .Namespace .Name
274
273
_ , err := jobutil .CreateJob (f .ClientSet , ns , testJob )
275
274
framework .ExpectNoError (err )
276
- e2elog .Logf ("Created job %v" , testJob )
275
+ framework .Logf ("Created job %v" , testJob )
277
276
}
278
277
279
278
// VerifyJobNCompletions verifies that the job has completions number of successful pods
@@ -283,7 +282,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) {
283
282
framework .ExpectNoError (err )
284
283
createdPods := pods .Items
285
284
createdPodNames := podNames (createdPods )
286
- e2elog .Logf ("Got the following pods for job cuda-add: %v" , createdPodNames )
285
+ framework .Logf ("Got the following pods for job cuda-add: %v" , createdPodNames )
287
286
288
287
successes := int32 (0 )
289
288
for _ , podName := range createdPodNames {
@@ -296,7 +295,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) {
296
295
}
297
296
}
298
297
if successes != completions {
299
- e2elog .Failf ("Only got %v completions. Expected %v completions." , successes , completions )
298
+ framework .Failf ("Only got %v completions. Expected %v completions." , successes , completions )
300
299
}
301
300
}
302
301
0 commit comments