Skip to content

Commit aaac13f

Browse files
committed
remove gke from e2e
1 parent ab54e44 commit aaac13f

29 files changed

+42
-154
lines changed

test/e2e/apps/deployment.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ var _ = SIGDescribe("Deployment", func() {
161161
testProportionalScalingDeployment(ctx, f)
162162
})
163163
ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func(ctx context.Context) {
164-
e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce", "gke")
164+
e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce")
165165
e2eskipper.SkipIfIPv6("aws")
166166
nodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
167167
framework.ExpectNoError(err)

test/e2e/apps/rc.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ var _ = SIGDescribe("ReplicationController", func() {
7373

7474
ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {
7575
// requires private images
76-
e2eskipper.SkipUnlessProviderIs("gce", "gke")
76+
e2eskipper.SkipUnlessProviderIs("gce")
7777
privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
7878
TestReplicationControllerServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage())
7979
})

test/e2e/apps/replica_set.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
115115

116116
ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {
117117
// requires private images
118-
e2eskipper.SkipUnlessProviderIs("gce", "gke")
118+
e2eskipper.SkipUnlessProviderIs("gce")
119119
privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
120120
testReplicaSetServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage())
121121
})

test/e2e/cloud/gcp/cluster_upgrade.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ var _ = SIGDescribe("Upgrade", feature.Upgrade, func() {
6060
testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests)
6161

6262
ginkgo.BeforeEach(func() {
63-
e2eskipper.SkipUnlessProviderIs("gce", "gke")
63+
e2eskipper.SkipUnlessProviderIs("gce")
6464
})
6565

6666
// Create the frameworks here because we can only create them
@@ -103,7 +103,7 @@ var _ = SIGDescribe("Downgrade", feature.Downgrade, func() {
103103
testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests)
104104

105105
ginkgo.BeforeEach(func() {
106-
e2eskipper.SkipUnlessProviderIs("gce", "gke")
106+
e2eskipper.SkipUnlessProviderIs("gce")
107107
})
108108

109109
ginkgo.Describe("cluster downgrade", func() {

test/e2e/cloud/gcp/common/upgrade_mechanics.go

Lines changed: 0 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,6 @@ func controlPlaneUpgrade(ctx context.Context, f *framework.Framework, v string,
7575
switch framework.TestContext.Provider {
7676
case "gce":
7777
return controlPlaneUpgradeGCE(v, extraEnvs)
78-
case "gke":
79-
return e2eproviders.MasterUpgradeGKE(ctx, f.Namespace.Name, v)
8078
default:
8179
return fmt.Errorf("controlPlaneUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
8280
}
@@ -151,8 +149,6 @@ func nodeUpgrade(ctx context.Context, f *framework.Framework, v string, img stri
151149
switch framework.TestContext.Provider {
152150
case "gce":
153151
err = nodeUpgradeGCE(v, img, extraEnvs)
154-
case "gke":
155-
err = nodeUpgradeGKE(ctx, f.Namespace.Name, v, img)
156152
default:
157153
err = fmt.Errorf("nodeUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
158154
}
@@ -175,59 +171,6 @@ func nodeUpgradeGCE(rawV, img string, extraEnvs []string) error {
175171
return err
176172
}
177173

178-
func nodeUpgradeGKE(ctx context.Context, namespace string, v string, img string) error {
179-
framework.Logf("Upgrading nodes to version %q and image %q", v, img)
180-
nps, err := nodePoolsGKE()
181-
if err != nil {
182-
return err
183-
}
184-
framework.Logf("Found node pools %v", nps)
185-
for _, np := range nps {
186-
args := []string{
187-
"container",
188-
"clusters",
189-
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
190-
e2eproviders.LocationParamGKE(),
191-
"upgrade",
192-
framework.TestContext.CloudConfig.Cluster,
193-
fmt.Sprintf("--node-pool=%s", np),
194-
fmt.Sprintf("--cluster-version=%s", v),
195-
"--quiet",
196-
}
197-
if len(img) > 0 {
198-
args = append(args, fmt.Sprintf("--image-type=%s", img))
199-
}
200-
_, _, err = framework.RunCmd("gcloud", framework.AppendContainerCommandGroupIfNeeded(args)...)
201-
202-
if err != nil {
203-
return err
204-
}
205-
206-
e2enode.WaitForSSHTunnels(ctx, namespace)
207-
}
208-
return nil
209-
}
210-
211-
func nodePoolsGKE() ([]string, error) {
212-
args := []string{
213-
"container",
214-
"node-pools",
215-
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
216-
e2eproviders.LocationParamGKE(),
217-
"list",
218-
fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster),
219-
"--format=get(name)",
220-
}
221-
stdout, _, err := framework.RunCmd("gcloud", framework.AppendContainerCommandGroupIfNeeded(args)...)
222-
if err != nil {
223-
return nil, err
224-
}
225-
if len(strings.TrimSpace(stdout)) == 0 {
226-
return []string{}, nil
227-
}
228-
return strings.Fields(stdout), nil
229-
}
230-
231174
func waitForNodesReadyAfterUpgrade(ctx context.Context, f *framework.Framework) error {
232175
// Wait for it to complete and validate nodes are healthy.
233176
//

test/e2e/cloud/gcp/gke_node_pools.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ var _ = SIGDescribe("GKE node pools", feature.GKENodePool, func() {
3636
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
3737

3838
ginkgo.BeforeEach(func() {
39-
e2eskipper.SkipUnlessProviderIs("gce", "gke")
39+
e2eskipper.SkipUnlessProviderIs("gce")
4040
})
4141

4242
f.It("should create a cluster with multiple node pools", feature.GKENodePool, func(ctx context.Context) {

test/e2e/cloud/gcp/kubelet_security.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ var _ = SIGDescribe("Ports Security Check", feature.KubeletSecurity, func() {
4444
var nodeName string
4545

4646
ginkgo.BeforeEach(func(ctx context.Context) {
47-
e2eskipper.SkipUnlessProviderIs("gce", "gke")
47+
e2eskipper.SkipUnlessProviderIs("gce")
4848
var err error
4949
node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
5050
framework.ExpectNoError(err)

test/e2e/cloud/gcp/node_lease.go

Lines changed: 3 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ var _ = SIGDescribe(framework.WithDisruptive(), "NodeLease", func() {
4444
var group string
4545

4646
ginkgo.BeforeEach(func(ctx context.Context) {
47-
e2eskipper.SkipUnlessProviderIs("gce", "gke")
47+
e2eskipper.SkipUnlessProviderIs("gce")
4848
c = f.ClientSet
4949
ns = f.Namespace.Name
5050
systemPods, err := e2epod.GetPodsInNamespace(ctx, c, ns, map[string]string{})
@@ -62,7 +62,7 @@ var _ = SIGDescribe(framework.WithDisruptive(), "NodeLease", func() {
6262

6363
ginkgo.BeforeEach(func() {
6464
skipped = true
65-
e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
65+
e2eskipper.SkipUnlessProviderIs("gce", "aws")
6666
e2eskipper.SkipUnlessNodeCountIsAtLeast(2)
6767
skipped = false
6868
})
@@ -76,18 +76,7 @@ var _ = SIGDescribe(framework.WithDisruptive(), "NodeLease", func() {
7676
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
7777
framework.Failf("Couldn't restore the original node instance group size: %v", err)
7878
}
79-
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
80-
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
81-
// Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs
82-
// right before a test that tries to get logs, for example, we may get unlucky and try to use a
83-
// closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies
84-
// being closed, so we sleep.
85-
//
86-
// TODO(cjcullen) reduce this sleep (#19314)
87-
if framework.ProviderIs("gke") {
88-
ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped")
89-
time.Sleep(5 * time.Minute)
90-
}
79+
9180
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
9281
framework.Failf("Couldn't restore the original node instance group size: %v", err)
9382
}

test/e2e/cloud/gcp/reboot.go

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -79,17 +79,6 @@ var _ = SIGDescribe("Reboot", framework.WithDisruptive(), feature.Reboot, func()
7979
framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
8080
}
8181
}
82-
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
83-
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. Most tests
84-
// make use of some proxy feature to verify functionality. So, if a reboot test runs right before a test
85-
// that tries to get logs, for example, we may get unlucky and try to use a closed tunnel to a node that
86-
// was recently rebooted. There's no good way to framework.Poll for proxies being closed, so we sleep.
87-
//
88-
// TODO(cjcullen) reduce this sleep (#19314)
89-
if framework.ProviderIs("gke") {
90-
ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped")
91-
time.Sleep(5 * time.Minute)
92-
}
9382
})
9483

9584
f = framework.NewDefaultFramework("reboot")

test/e2e/cloud/gcp/resize_nodes.go

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -70,25 +70,14 @@ var _ = SIGDescribe("Nodes", framework.WithDisruptive(), func() {
7070
var originalNodeCount int32
7171

7272
ginkgo.BeforeEach(func() {
73-
e2eskipper.SkipUnlessProviderIs("gce", "gke")
73+
e2eskipper.SkipUnlessProviderIs("gce")
7474
e2eskipper.SkipUnlessNodeCountIsAtLeast(2)
7575
ginkgo.DeferCleanup(func(ctx context.Context) {
7676
ginkgo.By("restoring the original node instance group size")
7777
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
7878
framework.Failf("Couldn't restore the original node instance group size: %v", err)
7979
}
80-
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
81-
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
82-
// Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs
83-
// right before a test that tries to get logs, for example, we may get unlucky and try to use a
84-
// closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies
85-
// being closed, so we sleep.
86-
//
87-
// TODO(cjcullen) reduce this sleep (#19314)
88-
if framework.ProviderIs("gke") {
89-
ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped")
90-
time.Sleep(5 * time.Minute)
91-
}
80+
9281
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
9382
framework.Failf("Couldn't restore the original node instance group size: %v", err)
9483
}

0 commit comments

Comments
 (0)