Skip to content

Commit 3bf2b12

Browse files
authored
Merge pull request kubernetes#81980 from carlory/fix-test-03
cleanup test code in lifecycle, servicecatalog and ui package
2 parents 5823cfc + 3c70201 commit 3bf2b12

File tree

14 files changed

+59
-73
lines changed

14 files changed

+59
-73
lines changed

test/e2e/lifecycle/BUILD

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ go_library(
3737
"//test/e2e/framework/ginkgowrapper:go_default_library",
3838
"//test/e2e/framework/kubelet:go_default_library",
3939
"//test/e2e/framework/lifecycle:go_default_library",
40-
"//test/e2e/framework/log:go_default_library",
4140
"//test/e2e/framework/node:go_default_library",
4241
"//test/e2e/framework/pod:go_default_library",
4342
"//test/e2e/framework/ssh:go_default_library",

test/e2e/lifecycle/addon_update.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,12 +29,11 @@ import (
2929
"k8s.io/apimachinery/pkg/labels"
3030
clientset "k8s.io/client-go/kubernetes"
3131
"k8s.io/kubernetes/test/e2e/framework"
32-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3332
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
33+
imageutils "k8s.io/kubernetes/test/utils/image"
3434

3535
"github.com/onsi/ginkgo"
3636
"github.com/onsi/gomega"
37-
imageutils "k8s.io/kubernetes/test/utils/image"
3837
)
3938

4039
// TODO: it would probably be slightly better to build up the objects
@@ -299,7 +298,7 @@ var _ = SIGDescribe("Addon update", func() {
299298
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExists, destinationDir, svcAddonEnsureExists))
300299
// Delete the "ensure exist class" addon at the end.
301300
defer func() {
302-
e2elog.Logf("Cleaning up ensure exist class addon.")
301+
framework.Logf("Cleaning up ensure exist class addon.")
303302
err := f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)
304303
framework.ExpectNoError(err)
305304
}()
@@ -392,7 +391,7 @@ func sshExecAndVerify(client *ssh.Client, cmd string) {
392391
}
393392

394393
func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
395-
e2elog.Logf("Executing '%s' on %v", cmd, client.RemoteAddr())
394+
framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr())
396395
session, err := client.NewSession()
397396
if err != nil {
398397
return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err)
@@ -424,7 +423,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
424423
}
425424

426425
func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error {
427-
e2elog.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
426+
framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
428427
session, err := sshClient.NewSession()
429428
if err != nil {
430429
return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err)

test/e2e/lifecycle/bootstrap/BUILD

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@ go_library(
2121
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
2222
"//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library",
2323
"//test/e2e/framework:go_default_library",
24-
"//test/e2e/framework/log:go_default_library",
2524
"//test/e2e/lifecycle:go_default_library",
2625
"//vendor/github.com/onsi/ginkgo:go_default_library",
2726
],

test/e2e/lifecycle/bootstrap/util.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ import (
2929
clientset "k8s.io/client-go/kubernetes"
3030
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
3131
"k8s.io/kubernetes/test/e2e/framework"
32-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3332
)
3433

3534
func newTokenSecret(tokenID, tokenSecret string) *v1.Secret {
@@ -84,7 +83,7 @@ func WaitforSignedClusterInfoByBootStrapToken(c clientset.Interface, tokenID str
8483
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
8584
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
8685
if err != nil {
87-
e2elog.Failf("Failed to get cluster-info configMap: %v", err)
86+
framework.Failf("Failed to get cluster-info configMap: %v", err)
8887
return false, err
8988
}
9089
_, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]
@@ -100,7 +99,7 @@ func WaitForSignedClusterInfoGetUpdatedByBootstrapToken(c clientset.Interface, t
10099
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
101100
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
102101
if err != nil {
103-
e2elog.Failf("Failed to get cluster-info configMap: %v", err)
102+
framework.Failf("Failed to get cluster-info configMap: %v", err)
104103
return false, err
105104
}
106105
updated, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]
@@ -116,7 +115,7 @@ func WaitForSignedClusterInfoByBootstrapTokenToDisappear(c clientset.Interface,
116115
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
117116
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
118117
if err != nil {
119-
e2elog.Failf("Failed to get cluster-info configMap: %v", err)
118+
framework.Failf("Failed to get cluster-info configMap: %v", err)
120119
return false, err
121120
}
122121
_, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]

test/e2e/lifecycle/ha_master.go

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,12 +28,11 @@ import (
2828
clientset "k8s.io/client-go/kubernetes"
2929
"k8s.io/kubernetes/test/e2e/common"
3030
"k8s.io/kubernetes/test/e2e/framework"
31-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3231
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3332
)
3433

3534
func addMasterReplica(zone string) error {
36-
e2elog.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone))
35+
framework.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone))
3736
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false")
3837
if err != nil {
3938
return err
@@ -42,7 +41,7 @@ func addMasterReplica(zone string) error {
4241
}
4342

4443
func removeMasterReplica(zone string) error {
45-
e2elog.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone))
44+
framework.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone))
4645
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false")
4746
if err != nil {
4847
return err
@@ -51,7 +50,7 @@ func removeMasterReplica(zone string) error {
5150
}
5251

5352
func addWorkerNodes(zone string) error {
54-
e2elog.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone))
53+
framework.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone))
5554
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true")
5655
if err != nil {
5756
return err
@@ -60,7 +59,7 @@ func addWorkerNodes(zone string) error {
6059
}
6160

6261
func removeWorkerNodes(zone string) error {
63-
e2elog.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone))
62+
framework.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone))
6463
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true")
6564
if err != nil {
6665
return err
@@ -83,7 +82,7 @@ func findRegionForZone(zone string) string {
8382
region, err := exec.Command("gcloud", "compute", "zones", "list", zone, "--quiet", "--format=csv[no-heading](region)").Output()
8483
framework.ExpectNoError(err)
8584
if string(region) == "" {
86-
e2elog.Failf("Region not found; zone: %s", zone)
85+
framework.Failf("Region not found; zone: %s", zone)
8786
}
8887
return string(region)
8988
}

test/e2e/lifecycle/kubelet_security.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,14 @@ import (
2222
"net/http"
2323
"time"
2424

25-
"k8s.io/api/core/v1"
25+
v1 "k8s.io/api/core/v1"
2626
"k8s.io/kubernetes/pkg/master/ports"
2727
"k8s.io/kubernetes/test/e2e/framework"
28+
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
29+
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
2830

2931
"github.com/onsi/ginkgo"
3032
"github.com/onsi/gomega"
31-
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
32-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
33-
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
3433
)
3534

3635
var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
@@ -82,7 +81,7 @@ func portClosedTest(f *framework.Framework, pickNode *v1.Node, port int) {
8281
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", addr, port), 1*time.Minute)
8382
if err == nil {
8483
conn.Close()
85-
e2elog.Failf("port %d is not disabled", port)
84+
framework.Failf("port %d is not disabled", port)
8685
}
8786
}
8887
}

test/e2e/lifecycle/node_lease.go

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ import (
2525
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2626
clientset "k8s.io/client-go/kubernetes"
2727
"k8s.io/kubernetes/test/e2e/framework"
28-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2928
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
3029
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3130

@@ -47,7 +46,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
4746
gomega.Expect(err).To(gomega.BeNil())
4847
systemPodsNo = int32(len(systemPods))
4948
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
50-
e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
49+
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
5150
} else {
5251
group = framework.TestContext.CloudConfig.NodeInstanceGroup
5352
}
@@ -70,7 +69,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
7069

7170
ginkgo.By("restoring the original node instance group size")
7271
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
73-
e2elog.Failf("Couldn't restore the original node instance group size: %v", err)
72+
framework.Failf("Couldn't restore the original node instance group size: %v", err)
7473
}
7574
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
7675
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
@@ -85,11 +84,11 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
8584
time.Sleep(5 * time.Minute)
8685
}
8786
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
88-
e2elog.Failf("Couldn't restore the original node instance group size: %v", err)
87+
framework.Failf("Couldn't restore the original node instance group size: %v", err)
8988
}
9089

9190
if err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
92-
e2elog.Failf("Couldn't restore the original cluster size: %v", err)
91+
framework.Failf("Couldn't restore the original cluster size: %v", err)
9392
}
9493
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
9594
// the cluster is restored to health.
@@ -111,7 +110,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
111110
pass := true
112111
for _, node := range originalNodes.Items {
113112
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
114-
e2elog.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err)
113+
framework.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err)
115114
pass = false
116115
}
117116
}

test/e2e/lifecycle/reboot.go

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ import (
3030
clientset "k8s.io/client-go/kubernetes"
3131
api "k8s.io/kubernetes/pkg/apis/core"
3232
"k8s.io/kubernetes/test/e2e/framework"
33-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3433
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
3534
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3635
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
@@ -74,7 +73,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
7473
framework.ExpectNoError(err)
7574

7675
for _, e := range events.Items {
77-
e2elog.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
76+
framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
7877
}
7978
}
8079
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
@@ -138,7 +137,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
138137
nodelist := framework.GetReadySchedulableNodesOrDie(c)
139138
if hook != nil {
140139
defer func() {
141-
e2elog.Logf("Executing termination hook on nodes")
140+
framework.Logf("Executing termination hook on nodes")
142141
hook(framework.TestContext.Provider, nodelist)
143142
}()
144143
}
@@ -165,10 +164,10 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
165164
for ix := range nodelist.Items {
166165
n := nodelist.Items[ix]
167166
if !result[ix] {
168-
e2elog.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
167+
framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
169168
}
170169
}
171-
e2elog.Failf("Test failed; at least one node failed to reboot in the time given.")
170+
framework.Failf("Test failed; at least one node failed to reboot in the time given.")
172171
}
173172
}
174173

@@ -179,9 +178,9 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
179178
prefix = "Retrieving log for the last terminated container"
180179
}
181180
if err != nil {
182-
e2elog.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log)
181+
framework.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log)
183182
} else {
184-
e2elog.Logf("%s %s:\n%s\n", prefix, id, log)
183+
framework.Logf("%s %s:\n%s\n", prefix, id, log)
185184
}
186185
}
187186
podNameSet := sets.NewString(podNames...)
@@ -195,7 +194,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
195194
if ok, _ := testutils.PodRunningReady(p); ok {
196195
continue
197196
}
198-
e2elog.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status)
197+
framework.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status)
199198
// Print the log of the containers if pod is not running and ready.
200199
for _, container := range p.Status.ContainerStatuses {
201200
cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name)
@@ -224,16 +223,16 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
224223
ns := metav1.NamespaceSystem
225224
ps, err := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name))
226225
if err != nil {
227-
e2elog.Logf("Couldn't initialize pod store: %v", err)
226+
framework.Logf("Couldn't initialize pod store: %v", err)
228227
return false
229228
}
230229
defer ps.Stop()
231230

232231
// Get the node initially.
233-
e2elog.Logf("Getting %s", name)
232+
framework.Logf("Getting %s", name)
234233
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
235234
if err != nil {
236-
e2elog.Logf("Couldn't get node %s", name)
235+
framework.Logf("Couldn't get node %s", name)
237236
return false
238237
}
239238

@@ -258,7 +257,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
258257
podNames = append(podNames, p.ObjectMeta.Name)
259258
}
260259
}
261-
e2elog.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames)
260+
framework.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames)
262261

263262
// For each pod, we do a sanity check to ensure it's running / healthy
264263
// or succeeded now, as that's what we'll be checking later.
@@ -269,7 +268,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
269268

270269
// Reboot the node.
271270
if err = e2essh.IssueSSHCommand(rebootCmd, provider, node); err != nil {
272-
e2elog.Logf("Error while issuing ssh command: %v", err)
271+
framework.Logf("Error while issuing ssh command: %v", err)
273272
return false
274273
}
275274

@@ -291,7 +290,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
291290
return false
292291
}
293292

294-
e2elog.Logf("Reboot successful on node %s", name)
293+
framework.Logf("Reboot successful on node %s", name)
295294
return true
296295
}
297296

@@ -302,7 +301,7 @@ func catLogHook(logPath string) terminationHook {
302301
for _, n := range nodes.Items {
303302
cmd := fmt.Sprintf("cat %v && rm %v", logPath, logPath)
304303
if _, err := e2essh.IssueSSHCommandWithResult(cmd, provider, &n); err != nil {
305-
e2elog.Logf("Error while issuing ssh command: %v", err)
304+
framework.Logf("Error while issuing ssh command: %v", err)
306305
}
307306
}
308307

test/e2e/lifecycle/resize_nodes.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ import (
2525
clientset "k8s.io/client-go/kubernetes"
2626
"k8s.io/kubernetes/test/e2e/common"
2727
"k8s.io/kubernetes/test/e2e/framework"
28-
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2928
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
3029
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3130

@@ -56,7 +55,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
5655
framework.ExpectNoError(err)
5756
systemPodsNo = int32(len(systemPods))
5857
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
59-
e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
58+
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
6059
} else {
6160
group = framework.TestContext.CloudConfig.NodeInstanceGroup
6261
}
@@ -81,7 +80,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
8180

8281
ginkgo.By("restoring the original node instance group size")
8382
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
84-
e2elog.Failf("Couldn't restore the original node instance group size: %v", err)
83+
framework.Failf("Couldn't restore the original node instance group size: %v", err)
8584
}
8685
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
8786
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
@@ -96,11 +95,11 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
9695
time.Sleep(5 * time.Minute)
9796
}
9897
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
99-
e2elog.Failf("Couldn't restore the original node instance group size: %v", err)
98+
framework.Failf("Couldn't restore the original node instance group size: %v", err)
10099
}
101100

102101
if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil {
103-
e2elog.Failf("Couldn't restore the original cluster size: %v", err)
102+
framework.Failf("Couldn't restore the original cluster size: %v", err)
104103
}
105104
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
106105
// the cluster is restored to health.

0 commit comments

Comments
 (0)