@@ -30,7 +30,7 @@ import (
30
30
clientset "k8s.io/client-go/kubernetes"
31
31
api "k8s.io/kubernetes/pkg/apis/core"
32
32
"k8s.io/kubernetes/test/e2e/framework"
33
- e2elog "k8s.io/kubernetes/test/e2e/framework/log"
33
+
34
34
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
35
35
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
36
36
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
@@ -74,7 +74,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
74
74
framework .ExpectNoError (err )
75
75
76
76
for _ , e := range events .Items {
77
- e2elog .Logf ("event for %v: %v %v: %v" , e .InvolvedObject .Name , e .Source , e .Reason , e .Message )
77
+ framework .Logf ("event for %v: %v %v: %v" , e .InvolvedObject .Name , e .Source , e .Reason , e .Message )
78
78
}
79
79
}
80
80
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
@@ -138,7 +138,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
138
138
nodelist := framework .GetReadySchedulableNodesOrDie (c )
139
139
if hook != nil {
140
140
defer func () {
141
- e2elog .Logf ("Executing termination hook on nodes" )
141
+ framework .Logf ("Executing termination hook on nodes" )
142
142
hook (framework .TestContext .Provider , nodelist )
143
143
}()
144
144
}
@@ -165,10 +165,10 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
165
165
for ix := range nodelist .Items {
166
166
n := nodelist .Items [ix ]
167
167
if ! result [ix ] {
168
- e2elog .Logf ("Node %s failed reboot test." , n .ObjectMeta .Name )
168
+ framework .Logf ("Node %s failed reboot test." , n .ObjectMeta .Name )
169
169
}
170
170
}
171
- e2elog .Failf ("Test failed; at least one node failed to reboot in the time given." )
171
+ framework .Failf ("Test failed; at least one node failed to reboot in the time given." )
172
172
}
173
173
}
174
174
@@ -179,9 +179,9 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
179
179
prefix = "Retrieving log for the last terminated container"
180
180
}
181
181
if err != nil {
182
- e2elog .Logf ("%s %s, err: %v:\n %s\n " , prefix , id , err , log )
182
+ framework .Logf ("%s %s, err: %v:\n %s\n " , prefix , id , err , log )
183
183
} else {
184
- e2elog .Logf ("%s %s:\n %s\n " , prefix , id , log )
184
+ framework .Logf ("%s %s:\n %s\n " , prefix , id , log )
185
185
}
186
186
}
187
187
podNameSet := sets .NewString (podNames ... )
@@ -195,7 +195,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
195
195
if ok , _ := testutils .PodRunningReady (p ); ok {
196
196
continue
197
197
}
198
- e2elog .Logf ("Status for not ready pod %s/%s: %+v" , p .Namespace , p .Name , p .Status )
198
+ framework .Logf ("Status for not ready pod %s/%s: %+v" , p .Namespace , p .Name , p .Status )
199
199
// Print the log of the containers if pod is not running and ready.
200
200
for _ , container := range p .Status .ContainerStatuses {
201
201
cIdentifer := fmt .Sprintf ("%s/%s/%s" , p .Namespace , p .Name , container .Name )
@@ -224,16 +224,16 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
224
224
ns := metav1 .NamespaceSystem
225
225
ps , err := testutils .NewPodStore (c , ns , labels .Everything (), fields .OneTermEqualSelector (api .PodHostField , name ))
226
226
if err != nil {
227
- e2elog .Logf ("Couldn't initialize pod store: %v" , err )
227
+ framework .Logf ("Couldn't initialize pod store: %v" , err )
228
228
return false
229
229
}
230
230
defer ps .Stop ()
231
231
232
232
// Get the node initially.
233
- e2elog .Logf ("Getting %s" , name )
233
+ framework .Logf ("Getting %s" , name )
234
234
node , err := c .CoreV1 ().Nodes ().Get (name , metav1.GetOptions {})
235
235
if err != nil {
236
- e2elog .Logf ("Couldn't get node %s" , name )
236
+ framework .Logf ("Couldn't get node %s" , name )
237
237
return false
238
238
}
239
239
@@ -258,7 +258,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
258
258
podNames = append (podNames , p .ObjectMeta .Name )
259
259
}
260
260
}
261
- e2elog .Logf ("Node %s has %d assigned pods with no liveness probes: %v" , name , len (podNames ), podNames )
261
+ framework .Logf ("Node %s has %d assigned pods with no liveness probes: %v" , name , len (podNames ), podNames )
262
262
263
263
// For each pod, we do a sanity check to ensure it's running / healthy
264
264
// or succeeded now, as that's what we'll be checking later.
@@ -269,7 +269,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
269
269
270
270
// Reboot the node.
271
271
if err = e2essh .IssueSSHCommand (rebootCmd , provider , node ); err != nil {
272
- e2elog .Logf ("Error while issuing ssh command: %v" , err )
272
+ framework .Logf ("Error while issuing ssh command: %v" , err )
273
273
return false
274
274
}
275
275
@@ -291,7 +291,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
291
291
return false
292
292
}
293
293
294
- e2elog .Logf ("Reboot successful on node %s" , name )
294
+ framework .Logf ("Reboot successful on node %s" , name )
295
295
return true
296
296
}
297
297
@@ -302,7 +302,7 @@ func catLogHook(logPath string) terminationHook {
302
302
for _ , n := range nodes .Items {
303
303
cmd := fmt .Sprintf ("cat %v && rm %v" , logPath , logPath )
304
304
if _ , err := e2essh .IssueSSHCommandWithResult (cmd , provider , & n ); err != nil {
305
- e2elog .Logf ("Error while issuing ssh command: %v" , err )
305
+ framework .Logf ("Error while issuing ssh command: %v" , err )
306
306
}
307
307
}
308
308
0 commit comments