Skip to content

Commit 873e7a3

Browse files
authored
Merge pull request kubernetes#77533 from jiatongw/logf/lifecycle
Modify e2e/lifecycle tests to import e2elog.Logf
2 parents 11a46d2 + b26b6f0 commit 873e7a3

File tree

6 files changed

+30
-24
lines changed

6 files changed

+30
-24
lines changed

test/e2e/lifecycle/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ go_library(
3535
"//test/e2e/framework:go_default_library",
3636
"//test/e2e/framework/ginkgowrapper:go_default_library",
3737
"//test/e2e/framework/lifecycle:go_default_library",
38+
"//test/e2e/framework/log:go_default_library",
3839
"//test/e2e/upgrades:go_default_library",
3940
"//test/e2e/upgrades/apps:go_default_library",
4041
"//test/e2e/upgrades/storage:go_default_library",

test/e2e/lifecycle/addon_update.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ import (
2929
"k8s.io/apimachinery/pkg/labels"
3030
clientset "k8s.io/client-go/kubernetes"
3131
"k8s.io/kubernetes/test/e2e/framework"
32+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3233

3334
"github.com/onsi/ginkgo"
3435
"github.com/onsi/gomega"
@@ -297,7 +298,7 @@ var _ = SIGDescribe("Addon update", func() {
297298
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExists, destinationDir, svcAddonEnsureExists))
298299
// Delete the "ensure exist class" addon at the end.
299300
defer func() {
300-
framework.Logf("Cleaning up ensure exist class addon.")
301+
e2elog.Logf("Cleaning up ensure exist class addon.")
301302
gomega.Expect(f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)).NotTo(gomega.HaveOccurred())
302303
}()
303304

@@ -389,7 +390,7 @@ func sshExecAndVerify(client *ssh.Client, cmd string) {
389390
}
390391

391392
func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
392-
framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr())
393+
e2elog.Logf("Executing '%s' on %v", cmd, client.RemoteAddr())
393394
session, err := client.NewSession()
394395
if err != nil {
395396
return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err)
@@ -421,7 +422,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
421422
}
422423

423424
func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error {
424-
framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
425+
e2elog.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
425426
session, err := sshClient.NewSession()
426427
if err != nil {
427428
return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err)

test/e2e/lifecycle/ha_master.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,11 @@ import (
2828
clientset "k8s.io/client-go/kubernetes"
2929
"k8s.io/kubernetes/test/e2e/common"
3030
"k8s.io/kubernetes/test/e2e/framework"
31+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3132
)
3233

3334
func addMasterReplica(zone string) error {
34-
framework.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone))
35+
e2elog.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone))
3536
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false")
3637
if err != nil {
3738
return err
@@ -40,7 +41,7 @@ func addMasterReplica(zone string) error {
4041
}
4142

4243
func removeMasterReplica(zone string) error {
43-
framework.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone))
44+
e2elog.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone))
4445
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false")
4546
if err != nil {
4647
return err
@@ -49,7 +50,7 @@ func removeMasterReplica(zone string) error {
4950
}
5051

5152
func addWorkerNodes(zone string) error {
52-
framework.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone))
53+
e2elog.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone))
5354
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true")
5455
if err != nil {
5556
return err
@@ -58,7 +59,7 @@ func addWorkerNodes(zone string) error {
5859
}
5960

6061
func removeWorkerNodes(zone string) error {
61-
framework.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone))
62+
e2elog.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone))
6263
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true")
6364
if err != nil {
6465
return err

test/e2e/lifecycle/node_lease.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import (
2525
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2626
clientset "k8s.io/client-go/kubernetes"
2727
"k8s.io/kubernetes/test/e2e/framework"
28+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2829

2930
"github.com/onsi/ginkgo"
3031
"github.com/onsi/gomega"
@@ -108,7 +109,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
108109
pass := true
109110
for _, node := range originalNodes.Items {
110111
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
111-
framework.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err)
112+
e2elog.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err)
112113
pass = false
113114
}
114115
}

test/e2e/lifecycle/reboot.go

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -22,14 +22,15 @@ import (
2222
"sync"
2323
"time"
2424

25-
"k8s.io/api/core/v1"
25+
v1 "k8s.io/api/core/v1"
2626
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2727
"k8s.io/apimachinery/pkg/fields"
2828
"k8s.io/apimachinery/pkg/labels"
2929
"k8s.io/apimachinery/pkg/util/sets"
3030
clientset "k8s.io/client-go/kubernetes"
3131
api "k8s.io/kubernetes/pkg/apis/core"
3232
"k8s.io/kubernetes/test/e2e/framework"
33+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3334
testutils "k8s.io/kubernetes/test/utils"
3435

3536
"github.com/onsi/ginkgo"
@@ -71,7 +72,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
7172
gomega.Expect(err).NotTo(gomega.HaveOccurred())
7273

7374
for _, e := range events.Items {
74-
framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
75+
e2elog.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
7576
}
7677
}
7778
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
@@ -135,7 +136,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
135136
nodelist := framework.GetReadySchedulableNodesOrDie(c)
136137
if hook != nil {
137138
defer func() {
138-
framework.Logf("Executing termination hook on nodes")
139+
e2elog.Logf("Executing termination hook on nodes")
139140
hook(framework.TestContext.Provider, nodelist)
140141
}()
141142
}
@@ -162,7 +163,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
162163
for ix := range nodelist.Items {
163164
n := nodelist.Items[ix]
164165
if !result[ix] {
165-
framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
166+
e2elog.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
166167
}
167168
}
168169
framework.Failf("Test failed; at least one node failed to reboot in the time given.")
@@ -176,9 +177,9 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
176177
prefix = "Retrieving log for the last terminated container"
177178
}
178179
if err != nil {
179-
framework.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log)
180+
e2elog.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log)
180181
} else {
181-
framework.Logf("%s %s:\n%s\n", prefix, id, log)
182+
e2elog.Logf("%s %s:\n%s\n", prefix, id, log)
182183
}
183184
}
184185
podNameSet := sets.NewString(podNames...)
@@ -192,7 +193,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
192193
if ok, _ := testutils.PodRunningReady(p); ok {
193194
continue
194195
}
195-
framework.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status)
196+
e2elog.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status)
196197
// Print the log of the containers if pod is not running and ready.
197198
for _, container := range p.Status.ContainerStatuses {
198199
cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name)
@@ -221,16 +222,16 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
221222
ns := metav1.NamespaceSystem
222223
ps, err := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name))
223224
if err != nil {
224-
framework.Logf("Couldn't initialize pod store: %v", err)
225+
e2elog.Logf("Couldn't initialize pod store: %v", err)
225226
return false
226227
}
227228
defer ps.Stop()
228229

229230
// Get the node initially.
230-
framework.Logf("Getting %s", name)
231+
e2elog.Logf("Getting %s", name)
231232
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
232233
if err != nil {
233-
framework.Logf("Couldn't get node %s", name)
234+
e2elog.Logf("Couldn't get node %s", name)
234235
return false
235236
}
236237

@@ -255,7 +256,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
255256
podNames = append(podNames, p.ObjectMeta.Name)
256257
}
257258
}
258-
framework.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames)
259+
e2elog.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames)
259260

260261
// For each pod, we do a sanity check to ensure it's running / healthy
261262
// or succeeded now, as that's what we'll be checking later.
@@ -266,7 +267,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
266267

267268
// Reboot the node.
268269
if err = framework.IssueSSHCommand(rebootCmd, provider, node); err != nil {
269-
framework.Logf("Error while issuing ssh command: %v", err)
270+
e2elog.Logf("Error while issuing ssh command: %v", err)
270271
return false
271272
}
272273

@@ -288,7 +289,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
288289
return false
289290
}
290291

291-
framework.Logf("Reboot successful on node %s", name)
292+
e2elog.Logf("Reboot successful on node %s", name)
292293
return true
293294
}
294295

@@ -299,7 +300,7 @@ func catLogHook(logPath string) terminationHook {
299300
for _, n := range nodes.Items {
300301
cmd := fmt.Sprintf("cat %v && rm %v", logPath, logPath)
301302
if _, err := framework.IssueSSHCommandWithResult(cmd, provider, &n); err != nil {
302-
framework.Logf("Error while issuing ssh command: %v", err)
303+
e2elog.Logf("Error while issuing ssh command: %v", err)
303304
}
304305
}
305306

test/e2e/lifecycle/restart.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import (
2525
"k8s.io/apimachinery/pkg/labels"
2626
"k8s.io/kubernetes/test/e2e/common"
2727
"k8s.io/kubernetes/test/e2e/framework"
28+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2829
testutils "k8s.io/kubernetes/test/utils"
2930

3031
"github.com/onsi/ginkgo"
@@ -61,7 +62,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
6162
ginkgo.By("ensuring all nodes are ready")
6263
originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
6364
gomega.Expect(err).NotTo(gomega.HaveOccurred())
64-
framework.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
65+
e2elog.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
6566

6667
ginkgo.By("ensuring all pods are running and ready")
6768
allPods := ps.List()
@@ -91,7 +92,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
9192
ginkgo.By("ensuring all nodes are ready after the restart")
9293
nodesAfter, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
9394
gomega.Expect(err).NotTo(gomega.HaveOccurred())
94-
framework.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))
95+
e2elog.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))
9596

9697
// Make sure that we have the same number of nodes. We're not checking
9798
// that the names match because that's implementation specific.

0 commit comments

Comments
 (0)