Skip to content

Commit c7c0d09

Browse files
authored
Merge pull request kubernetes#85200 from SataQiu/refactor-e2e-kubectl-20191113
e2e: move LogFailedContainers out of e2e test framework util.go
2 parents 0bfe9d5 + 50bc528 commit c7c0d09

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+248
-115
lines changed

test/e2e/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ go_library(
6767
"//test/e2e/common:go_default_library",
6868
"//test/e2e/framework:go_default_library",
6969
"//test/e2e/framework/auth:go_default_library",
70+
"//test/e2e/framework/kubectl:go_default_library",
7071
"//test/e2e/framework/log:go_default_library",
7172
"//test/e2e/framework/metrics:go_default_library",
7273
"//test/e2e/framework/node:go_default_library",

test/e2e/apimachinery/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,7 @@ go_library(
8787
"//test/e2e/framework/metrics:go_default_library",
8888
"//test/e2e/framework/node:go_default_library",
8989
"//test/e2e/framework/pod:go_default_library",
90+
"//test/e2e/framework/rc:go_default_library",
9091
"//test/e2e/framework/ssh:go_default_library",
9192
"//test/utils:go_default_library",
9293
"//test/utils/crd:go_default_library",

test/e2e/apimachinery/etcd_failure.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import (
2525
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
2626
"k8s.io/kubernetes/test/e2e/apps"
2727
"k8s.io/kubernetes/test/e2e/framework"
28+
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
2829
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
2930
testutils "k8s.io/kubernetes/test/utils"
3031
imageutils "k8s.io/kubernetes/test/utils/image"
@@ -45,7 +46,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
4546
framework.SkipUnlessProviderIs("gce")
4647
framework.SkipUnlessSSHKeyPresent()
4748

48-
err := framework.RunRC(testutils.RCConfig{
49+
err := e2erc.RunRC(testutils.RCConfig{
4950
Client: f.ClientSet,
5051
Name: "baz",
5152
Namespace: f.Namespace.Name,

test/e2e/apps/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ go_library(
6868
"//test/e2e/framework/node:go_default_library",
6969
"//test/e2e/framework/pod:go_default_library",
7070
"//test/e2e/framework/pv:go_default_library",
71+
"//test/e2e/framework/rc:go_default_library",
7172
"//test/e2e/framework/replicaset:go_default_library",
7273
"//test/e2e/framework/service:go_default_library",
7374
"//test/e2e/framework/ssh:go_default_library",

test/e2e/apps/daemon_restart.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ import (
3434
"k8s.io/kubernetes/pkg/master/ports"
3535
"k8s.io/kubernetes/test/e2e/framework"
3636
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
37+
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
3738
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
3839
testutils "k8s.io/kubernetes/test/utils"
3940
imageutils "k8s.io/kubernetes/test/utils/image"
@@ -209,7 +210,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
209210
Replicas: numPods,
210211
CreatedPods: &[]*v1.Pod{},
211212
}
212-
framework.ExpectNoError(framework.RunRC(config))
213+
framework.ExpectNoError(e2erc.RunRC(config))
213214
replacePods(*config.CreatedPods, existingPods)
214215

215216
stopCh = make(chan struct{})
@@ -260,7 +261,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
260261
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
261262
// to the same size achieves this, because the scale operation advances the RC's sequence number
262263
// and awaits it to be observed and reported back in the RC's status.
263-
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true)
264+
e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true)
264265

265266
// Only check the keys, the pods can be different if the kubelet updated it.
266267
// TODO: Can it really?
@@ -291,9 +292,9 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
291292
restarter.kill()
292293
// This is best effort to try and create pods while the scheduler is down,
293294
// since we don't know exactly when it is restarted after the kill signal.
294-
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false))
295+
framework.ExpectNoError(e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false))
295296
restarter.waitUp()
296-
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true))
297+
framework.ExpectNoError(e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true))
297298
})
298299

299300
ginkgo.It("Kubelet should not restart containers across restart", func() {

test/e2e/autoscaling/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ go_library(
4444
"//test/e2e/framework/node:go_default_library",
4545
"//test/e2e/framework/pod:go_default_library",
4646
"//test/e2e/framework/pv:go_default_library",
47+
"//test/e2e/framework/rc:go_default_library",
4748
"//test/e2e/instrumentation/monitoring:go_default_library",
4849
"//test/e2e/scheduling:go_default_library",
4950
"//test/utils:go_default_library",

test/e2e/autoscaling/cluster_autoscaler_scalability.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ import (
3232
"k8s.io/klog"
3333
"k8s.io/kubernetes/test/e2e/framework"
3434
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
35+
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
3536
testutils "k8s.io/kubernetes/test/utils"
3637
imageutils "k8s.io/kubernetes/test/utils/image"
3738

@@ -345,8 +346,8 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
345346
totalMemReservation := unschedulableMemReservation * unschedulablePodReplicas
346347
timeToWait := 5 * time.Minute
347348
podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait)
348-
framework.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable)
349-
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)
349+
e2erc.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable)
350+
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)
350351

351352
// Ensure that no new nodes have been added so far.
352353
readyNodeCount, _ := e2enode.TotalReady(f.ClientSet)
@@ -379,7 +380,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
379380
// run rc based on config
380381
ginkgo.By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name))
381382
start := time.Now()
382-
framework.ExpectNoError(framework.RunRC(*config.extraPods))
383+
framework.ExpectNoError(e2erc.RunRC(*config.extraPods))
383384
// check results
384385
if tolerateMissingNodeCount > 0 {
385386
// Tolerate some number of nodes not to be created.
@@ -397,7 +398,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
397398
}
398399
timeTrack(start, fmt.Sprintf("Scale up to %v", config.expectedResult.nodes))
399400
return func() error {
400-
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, config.extraPods.Name)
401+
return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, config.extraPods.Name)
401402
}
402403
}
403404

@@ -475,10 +476,10 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p
475476
HostPorts: map[string]int{"port1": port},
476477
MemRequest: request,
477478
}
478-
err := framework.RunRC(*config)
479+
err := e2erc.RunRC(*config)
479480
framework.ExpectNoError(err)
480481
return func() error {
481-
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
482+
return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
482483
}
483484
}
484485

@@ -515,10 +516,10 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist
515516
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
516517
// Create the target RC
517518
rcConfig := reserveMemoryRCConfig(f, id, totalPods, totalPods*podMemRequestMegabytes, timeout)
518-
framework.ExpectNoError(framework.RunRC(*rcConfig))
519+
framework.ExpectNoError(e2erc.RunRC(*rcConfig))
519520
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
520521
return func() error {
521-
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
522+
return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
522523
}
523524
}
524525

0 commit comments

Comments
 (0)