@@ -30,7 +30,6 @@ import (
30
30
clientset "k8s.io/client-go/kubernetes"
31
31
api "k8s.io/kubernetes/pkg/apis/core"
32
32
"k8s.io/kubernetes/test/e2e/framework"
33
- e2elog "k8s.io/kubernetes/test/e2e/framework/log"
34
33
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
35
34
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
36
35
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
@@ -74,7 +73,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
74
73
framework .ExpectNoError (err )
75
74
76
75
for _ , e := range events .Items {
77
- e2elog .Logf ("event for %v: %v %v: %v" , e .InvolvedObject .Name , e .Source , e .Reason , e .Message )
76
+ framework .Logf ("event for %v: %v %v: %v" , e .InvolvedObject .Name , e .Source , e .Reason , e .Message )
78
77
}
79
78
}
80
79
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
@@ -138,7 +137,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
138
137
nodelist := framework .GetReadySchedulableNodesOrDie (c )
139
138
if hook != nil {
140
139
defer func () {
141
- e2elog .Logf ("Executing termination hook on nodes" )
140
+ framework .Logf ("Executing termination hook on nodes" )
142
141
hook (framework .TestContext .Provider , nodelist )
143
142
}()
144
143
}
@@ -165,10 +164,10 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
165
164
for ix := range nodelist .Items {
166
165
n := nodelist .Items [ix ]
167
166
if ! result [ix ] {
168
- e2elog .Logf ("Node %s failed reboot test." , n .ObjectMeta .Name )
167
+ framework .Logf ("Node %s failed reboot test." , n .ObjectMeta .Name )
169
168
}
170
169
}
171
- e2elog .Failf ("Test failed; at least one node failed to reboot in the time given." )
170
+ framework .Failf ("Test failed; at least one node failed to reboot in the time given." )
172
171
}
173
172
}
174
173
@@ -179,9 +178,9 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
179
178
prefix = "Retrieving log for the last terminated container"
180
179
}
181
180
if err != nil {
182
- e2elog .Logf ("%s %s, err: %v:\n %s\n " , prefix , id , err , log )
181
+ framework .Logf ("%s %s, err: %v:\n %s\n " , prefix , id , err , log )
183
182
} else {
184
- e2elog .Logf ("%s %s:\n %s\n " , prefix , id , log )
183
+ framework .Logf ("%s %s:\n %s\n " , prefix , id , log )
185
184
}
186
185
}
187
186
podNameSet := sets .NewString (podNames ... )
@@ -195,7 +194,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
195
194
if ok , _ := testutils .PodRunningReady (p ); ok {
196
195
continue
197
196
}
198
- e2elog .Logf ("Status for not ready pod %s/%s: %+v" , p .Namespace , p .Name , p .Status )
197
+ framework .Logf ("Status for not ready pod %s/%s: %+v" , p .Namespace , p .Name , p .Status )
199
198
// Print the log of the containers if pod is not running and ready.
200
199
for _ , container := range p .Status .ContainerStatuses {
201
200
cIdentifer := fmt .Sprintf ("%s/%s/%s" , p .Namespace , p .Name , container .Name )
@@ -224,16 +223,16 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
224
223
ns := metav1 .NamespaceSystem
225
224
ps , err := testutils .NewPodStore (c , ns , labels .Everything (), fields .OneTermEqualSelector (api .PodHostField , name ))
226
225
if err != nil {
227
- e2elog .Logf ("Couldn't initialize pod store: %v" , err )
226
+ framework .Logf ("Couldn't initialize pod store: %v" , err )
228
227
return false
229
228
}
230
229
defer ps .Stop ()
231
230
232
231
// Get the node initially.
233
- e2elog .Logf ("Getting %s" , name )
232
+ framework .Logf ("Getting %s" , name )
234
233
node , err := c .CoreV1 ().Nodes ().Get (name , metav1.GetOptions {})
235
234
if err != nil {
236
- e2elog .Logf ("Couldn't get node %s" , name )
235
+ framework .Logf ("Couldn't get node %s" , name )
237
236
return false
238
237
}
239
238
@@ -258,7 +257,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
258
257
podNames = append (podNames , p .ObjectMeta .Name )
259
258
}
260
259
}
261
- e2elog .Logf ("Node %s has %d assigned pods with no liveness probes: %v" , name , len (podNames ), podNames )
260
+ framework .Logf ("Node %s has %d assigned pods with no liveness probes: %v" , name , len (podNames ), podNames )
262
261
263
262
// For each pod, we do a sanity check to ensure it's running / healthy
264
263
// or succeeded now, as that's what we'll be checking later.
@@ -269,7 +268,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
269
268
270
269
// Reboot the node.
271
270
if err = e2essh .IssueSSHCommand (rebootCmd , provider , node ); err != nil {
272
- e2elog .Logf ("Error while issuing ssh command: %v" , err )
271
+ framework .Logf ("Error while issuing ssh command: %v" , err )
273
272
return false
274
273
}
275
274
@@ -291,7 +290,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
291
290
return false
292
291
}
293
292
294
- e2elog .Logf ("Reboot successful on node %s" , name )
293
+ framework .Logf ("Reboot successful on node %s" , name )
295
294
return true
296
295
}
297
296
@@ -302,7 +301,7 @@ func catLogHook(logPath string) terminationHook {
302
301
for _ , n := range nodes .Items {
303
302
cmd := fmt .Sprintf ("cat %v && rm %v" , logPath , logPath )
304
303
if _ , err := e2essh .IssueSSHCommandWithResult (cmd , provider , & n ); err != nil {
305
- e2elog .Logf ("Error while issuing ssh command: %v" , err )
304
+ framework .Logf ("Error while issuing ssh command: %v" , err )
306
305
}
307
306
}
308
307
0 commit comments