@@ -22,14 +22,15 @@ import (
22
22
"sync"
23
23
"time"
24
24
25
- "k8s.io/api/core/v1"
25
+ v1 "k8s.io/api/core/v1"
26
26
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
27
27
"k8s.io/apimachinery/pkg/fields"
28
28
"k8s.io/apimachinery/pkg/labels"
29
29
"k8s.io/apimachinery/pkg/util/sets"
30
30
clientset "k8s.io/client-go/kubernetes"
31
31
api "k8s.io/kubernetes/pkg/apis/core"
32
32
"k8s.io/kubernetes/test/e2e/framework"
33
+ e2elog "k8s.io/kubernetes/test/e2e/framework/log"
33
34
testutils "k8s.io/kubernetes/test/utils"
34
35
35
36
"github.com/onsi/ginkgo"
@@ -71,7 +72,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
71
72
gomega .Expect (err ).NotTo (gomega .HaveOccurred ())
72
73
73
74
for _ , e := range events .Items {
74
- framework .Logf ("event for %v: %v %v: %v" , e .InvolvedObject .Name , e .Source , e .Reason , e .Message )
75
+ e2elog .Logf ("event for %v: %v %v: %v" , e .InvolvedObject .Name , e .Source , e .Reason , e .Message )
75
76
}
76
77
}
77
78
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
@@ -135,7 +136,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
135
136
nodelist := framework .GetReadySchedulableNodesOrDie (c )
136
137
if hook != nil {
137
138
defer func () {
138
- framework .Logf ("Executing termination hook on nodes" )
139
+ e2elog .Logf ("Executing termination hook on nodes" )
139
140
hook (framework .TestContext .Provider , nodelist )
140
141
}()
141
142
}
@@ -162,7 +163,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
162
163
for ix := range nodelist .Items {
163
164
n := nodelist .Items [ix ]
164
165
if ! result [ix ] {
165
- framework .Logf ("Node %s failed reboot test." , n .ObjectMeta .Name )
166
+ e2elog .Logf ("Node %s failed reboot test." , n .ObjectMeta .Name )
166
167
}
167
168
}
168
169
framework .Failf ("Test failed; at least one node failed to reboot in the time given." )
@@ -176,9 +177,9 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
176
177
prefix = "Retrieving log for the last terminated container"
177
178
}
178
179
if err != nil {
179
- framework .Logf ("%s %s, err: %v:\n %s\n " , prefix , id , err , log )
180
+ e2elog .Logf ("%s %s, err: %v:\n %s\n " , prefix , id , err , log )
180
181
} else {
181
- framework .Logf ("%s %s:\n %s\n " , prefix , id , log )
182
+ e2elog .Logf ("%s %s:\n %s\n " , prefix , id , log )
182
183
}
183
184
}
184
185
podNameSet := sets .NewString (podNames ... )
@@ -192,7 +193,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
192
193
if ok , _ := testutils .PodRunningReady (p ); ok {
193
194
continue
194
195
}
195
- framework .Logf ("Status for not ready pod %s/%s: %+v" , p .Namespace , p .Name , p .Status )
196
+ e2elog .Logf ("Status for not ready pod %s/%s: %+v" , p .Namespace , p .Name , p .Status )
196
197
// Print the log of the containers if pod is not running and ready.
197
198
for _ , container := range p .Status .ContainerStatuses {
198
199
cIdentifer := fmt .Sprintf ("%s/%s/%s" , p .Namespace , p .Name , container .Name )
@@ -221,16 +222,16 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
221
222
ns := metav1 .NamespaceSystem
222
223
ps , err := testutils .NewPodStore (c , ns , labels .Everything (), fields .OneTermEqualSelector (api .PodHostField , name ))
223
224
if err != nil {
224
- framework .Logf ("Couldn't initialize pod store: %v" , err )
225
+ e2elog .Logf ("Couldn't initialize pod store: %v" , err )
225
226
return false
226
227
}
227
228
defer ps .Stop ()
228
229
229
230
// Get the node initially.
230
- framework .Logf ("Getting %s" , name )
231
+ e2elog .Logf ("Getting %s" , name )
231
232
node , err := c .CoreV1 ().Nodes ().Get (name , metav1.GetOptions {})
232
233
if err != nil {
233
- framework .Logf ("Couldn't get node %s" , name )
234
+ e2elog .Logf ("Couldn't get node %s" , name )
234
235
return false
235
236
}
236
237
@@ -255,7 +256,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
255
256
podNames = append (podNames , p .ObjectMeta .Name )
256
257
}
257
258
}
258
- framework .Logf ("Node %s has %d assigned pods with no liveness probes: %v" , name , len (podNames ), podNames )
259
+ e2elog .Logf ("Node %s has %d assigned pods with no liveness probes: %v" , name , len (podNames ), podNames )
259
260
260
261
// For each pod, we do a sanity check to ensure it's running / healthy
261
262
// or succeeded now, as that's what we'll be checking later.
@@ -266,7 +267,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
266
267
267
268
// Reboot the node.
268
269
if err = framework .IssueSSHCommand (rebootCmd , provider , node ); err != nil {
269
- framework .Logf ("Error while issuing ssh command: %v" , err )
270
+ e2elog .Logf ("Error while issuing ssh command: %v" , err )
270
271
return false
271
272
}
272
273
@@ -288,7 +289,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
288
289
return false
289
290
}
290
291
291
- framework .Logf ("Reboot successful on node %s" , name )
292
+ e2elog .Logf ("Reboot successful on node %s" , name )
292
293
return true
293
294
}
294
295
@@ -299,7 +300,7 @@ func catLogHook(logPath string) terminationHook {
299
300
for _ , n := range nodes .Items {
300
301
cmd := fmt .Sprintf ("cat %v && rm %v" , logPath , logPath )
301
302
if _ , err := framework .IssueSSHCommandWithResult (cmd , provider , & n ); err != nil {
302
- framework .Logf ("Error while issuing ssh command: %v" , err )
303
+ e2elog .Logf ("Error while issuing ssh command: %v" , err )
303
304
}
304
305
}
305
306
0 commit comments