Skip to content

Commit 64d75b3

Browse files
authored
Merge pull request kubernetes#89787 from oomichi/NodeUpgrade
Move NodeUpgrade() into GCP e2e tests
2 parents b984f59 + c0d09c9 commit 64d75b3

File tree

2 files changed

+131
-128
lines changed

2 files changed

+131
-128
lines changed

test/e2e/cloud/gcp/cluster_upgrade.go

Lines changed: 118 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
131131
start := time.Now()
132132
defer finalizeUpgradeTest(start, nodeUpgradeTest)
133133
target := upgCtx.Versions[1].Version.String()
134-
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
134+
framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage))
135135
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
136136
}
137137
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgrades.NodeUpgrade, upgradeFunc)
@@ -152,7 +152,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
152152
target := upgCtx.Versions[1].Version.String()
153153
framework.ExpectNoError(framework.MasterUpgrade(f, target))
154154
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
155-
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
155+
framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage))
156156
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
157157
}
158158
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
@@ -181,7 +181,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
181181
defer finalizeUpgradeTest(start, clusterDowngradeTest)
182182
// Yes this really is a downgrade. And nodes must downgrade first.
183183
target := upgCtx.Versions[1].Version.String()
184-
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
184+
framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage))
185185
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
186186
framework.ExpectNoError(framework.MasterUpgrade(f, target))
187187
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
@@ -251,7 +251,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
251251
target := upgCtx.Versions[1].Version.String()
252252
framework.ExpectNoError(framework.MasterUpgrade(f, target))
253253
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
254-
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
254+
framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage))
255255
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
256256
}
257257
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
@@ -269,7 +269,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
269269
start := time.Now()
270270
defer finalizeUpgradeTest(start, gpuDowngradeTest)
271271
target := upgCtx.Versions[1].Version.String()
272-
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
272+
framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage))
273273
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
274274
framework.ExpectNoError(framework.MasterUpgrade(f, target))
275275
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
@@ -299,7 +299,7 @@ var _ = ginkgo.Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]",
299299
target := upgCtx.Versions[1].Version.String()
300300
framework.ExpectNoError(framework.MasterUpgrade(f, target))
301301
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
302-
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
302+
framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage))
303303
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
304304
}
305305
runUpgradeSuite(f, statefulsetUpgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
@@ -334,7 +334,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
334334
target := upgCtx.Versions[1].Version.String()
335335
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, true))
336336
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
337-
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, true))
337+
framework.ExpectNoError(nodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, true))
338338
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
339339
}
340340
runUpgradeSuite(f, kubeProxyUpgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
@@ -360,7 +360,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
360360
defer finalizeUpgradeTest(start, kubeProxyDowngradeTest)
361361
// Yes this really is a downgrade. And nodes must downgrade first.
362362
target := upgCtx.Versions[1].Version.String()
363-
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, false))
363+
framework.ExpectNoError(nodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, false))
364364
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
365365
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, false))
366366
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
@@ -605,3 +605,113 @@ func checkNodesVersions(cs clientset.Interface, want string) error {
605605
}
606606
return nil
607607
}
608+
609+
// nodeUpgrade upgrades nodes on GCE/GKE.
610+
func nodeUpgrade(f *framework.Framework, v string, img string) error {
611+
// Perform the upgrade.
612+
var err error
613+
switch framework.TestContext.Provider {
614+
case "gce":
615+
err = nodeUpgradeGCE(v, img, false)
616+
case "gke":
617+
err = nodeUpgradeGKE(f.Namespace.Name, v, img)
618+
default:
619+
err = fmt.Errorf("nodeUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
620+
}
621+
if err != nil {
622+
return err
623+
}
624+
return waitForNodesReadyAfterUpgrade(f)
625+
}
626+
627+
// nodeUpgradeGCEWithKubeProxyDaemonSet upgrades nodes on GCE with enabling/disabling the daemon set of kube-proxy.
628+
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
629+
func nodeUpgradeGCEWithKubeProxyDaemonSet(f *framework.Framework, v string, img string, enableKubeProxyDaemonSet bool) error {
630+
// Perform the upgrade.
631+
if err := nodeUpgradeGCE(v, img, enableKubeProxyDaemonSet); err != nil {
632+
return err
633+
}
634+
return waitForNodesReadyAfterUpgrade(f)
635+
}
636+
637+
// TODO(mrhohn): Remove 'enableKubeProxyDaemonSet' when kube-proxy is run as a DaemonSet by default.
638+
func nodeUpgradeGCE(rawV, img string, enableKubeProxyDaemonSet bool) error {
639+
v := "v" + rawV
640+
env := append(os.Environ(), fmt.Sprintf("KUBE_PROXY_DAEMONSET=%v", enableKubeProxyDaemonSet))
641+
if img != "" {
642+
env = append(env, "KUBE_NODE_OS_DISTRIBUTION="+img)
643+
_, _, err := framework.RunCmdEnv(env, framework.GCEUpgradeScript(), "-N", "-o", v)
644+
return err
645+
}
646+
_, _, err := framework.RunCmdEnv(env, framework.GCEUpgradeScript(), "-N", v)
647+
return err
648+
}
649+
650+
func nodeUpgradeGKE(namespace string, v string, img string) error {
651+
framework.Logf("Upgrading nodes to version %q and image %q", v, img)
652+
nps, err := nodePoolsGKE()
653+
if err != nil {
654+
return err
655+
}
656+
framework.Logf("Found node pools %v", nps)
657+
for _, np := range nps {
658+
args := []string{
659+
"container",
660+
"clusters",
661+
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
662+
framework.LocationParamGKE(),
663+
"upgrade",
664+
framework.TestContext.CloudConfig.Cluster,
665+
fmt.Sprintf("--node-pool=%s", np),
666+
fmt.Sprintf("--cluster-version=%s", v),
667+
"--quiet",
668+
}
669+
if len(img) > 0 {
670+
args = append(args, fmt.Sprintf("--image-type=%s", img))
671+
}
672+
_, _, err = framework.RunCmd("gcloud", framework.AppendContainerCommandGroupIfNeeded(args)...)
673+
674+
if err != nil {
675+
return err
676+
}
677+
678+
framework.WaitForSSHTunnels(namespace)
679+
}
680+
return nil
681+
}
682+
683+
func nodePoolsGKE() ([]string, error) {
684+
args := []string{
685+
"container",
686+
"node-pools",
687+
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
688+
framework.LocationParamGKE(),
689+
"list",
690+
fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster),
691+
"--format=get(name)",
692+
}
693+
stdout, _, err := framework.RunCmd("gcloud", framework.AppendContainerCommandGroupIfNeeded(args)...)
694+
if err != nil {
695+
return nil, err
696+
}
697+
if len(strings.TrimSpace(stdout)) == 0 {
698+
return []string{}, nil
699+
}
700+
return strings.Fields(stdout), nil
701+
}
702+
703+
func waitForNodesReadyAfterUpgrade(f *framework.Framework) error {
704+
// Wait for it to complete and validate nodes are healthy.
705+
//
706+
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
707+
// GKE; the operation shouldn't return until they all are.
708+
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
709+
if err != nil {
710+
return fmt.Errorf("couldn't detect number of nodes")
711+
}
712+
framework.Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", framework.RestartNodeReadyAgainTimeout, numNodes)
713+
if _, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout); err != nil {
714+
return err
715+
}
716+
return nil
717+
}

test/e2e/framework/nodes_util.go

Lines changed: 13 additions & 120 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@ import (
2121
"os"
2222
"path"
2323
"path/filepath"
24-
"strings"
2524
"sync"
2625
"time"
2726

@@ -67,7 +66,7 @@ func etcdUpgradeGCE(targetStorage, targetVersion string) error {
6766
"STORAGE_BACKEND="+targetStorage,
6867
"TEST_ETCD_IMAGE="+etcdImage)
6968

70-
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
69+
_, _, err := RunCmdEnv(env, GCEUpgradeScript(), "-l", "-M")
7170
return err
7271
}
7372

@@ -93,19 +92,21 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
9392
}
9493

9594
v := "v" + rawV
96-
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-M", v)
95+
_, _, err := RunCmdEnv(env, GCEUpgradeScript(), "-M", v)
9796
return err
9897
}
9998

100-
func locationParamGKE() string {
99+
// LocationParamGKE returns parameter related to location for gcloud command.
100+
func LocationParamGKE() string {
101101
if TestContext.CloudConfig.MultiMaster {
102102
// GKE Regional Clusters are being tested.
103103
return fmt.Sprintf("--region=%s", TestContext.CloudConfig.Region)
104104
}
105105
return fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone)
106106
}
107107

108-
func appendContainerCommandGroupIfNeeded(args []string) []string {
108+
// AppendContainerCommandGroupIfNeeded returns container command group parameter if necessary.
109+
func AppendContainerCommandGroupIfNeeded(args []string) []string {
109110
if TestContext.CloudConfig.Region != "" {
110111
// TODO(wojtek-t): Get rid of it once Regional Clusters go to GA.
111112
return append([]string{"beta"}, args...)
@@ -120,19 +121,19 @@ func MasterUpgradeGKE(namespace string, v string) error {
120121
"container",
121122
"clusters",
122123
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
123-
locationParamGKE(),
124+
LocationParamGKE(),
124125
"upgrade",
125126
TestContext.CloudConfig.Cluster,
126127
"--master",
127128
fmt.Sprintf("--cluster-version=%s", v),
128129
"--quiet",
129130
}
130-
_, _, err := RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
131+
_, _, err := RunCmd("gcloud", AppendContainerCommandGroupIfNeeded(args)...)
131132
if err != nil {
132133
return err
133134
}
134135

135-
waitForSSHTunnels(namespace)
136+
WaitForSSHTunnels(namespace)
136137

137138
return nil
138139
}
@@ -174,124 +175,16 @@ func masterUpgradeKubernetesAnywhere(v string) error {
174175
return nil
175176
}
176177

177-
// NodeUpgrade upgrades nodes on GCE/GKE.
178-
func NodeUpgrade(f *Framework, v string, img string) error {
179-
// Perform the upgrade.
180-
var err error
181-
switch TestContext.Provider {
182-
case "gce":
183-
err = nodeUpgradeGCE(v, img, false)
184-
case "gke":
185-
err = nodeUpgradeGKE(f.Namespace.Name, v, img)
186-
default:
187-
err = fmt.Errorf("NodeUpgrade() is not implemented for provider %s", TestContext.Provider)
188-
}
189-
if err != nil {
190-
return err
191-
}
192-
return waitForNodesReadyAfterUpgrade(f)
193-
}
194-
195-
// NodeUpgradeGCEWithKubeProxyDaemonSet upgrades nodes on GCE with enabling/disabling the daemon set of kube-proxy.
196-
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
197-
func NodeUpgradeGCEWithKubeProxyDaemonSet(f *Framework, v string, img string, enableKubeProxyDaemonSet bool) error {
198-
// Perform the upgrade.
199-
if err := nodeUpgradeGCE(v, img, enableKubeProxyDaemonSet); err != nil {
200-
return err
201-
}
202-
return waitForNodesReadyAfterUpgrade(f)
203-
}
204-
205-
func waitForNodesReadyAfterUpgrade(f *Framework) error {
206-
// Wait for it to complete and validate nodes are healthy.
207-
//
208-
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
209-
// GKE; the operation shouldn't return until they all are.
210-
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
211-
if err != nil {
212-
return fmt.Errorf("couldn't detect number of nodes")
213-
}
214-
Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
215-
if _, err := e2enode.CheckReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
216-
return err
217-
}
218-
return nil
219-
}
220-
221-
// TODO(mrhohn): Remove 'enableKubeProxyDaemonSet' when kube-proxy is run as a DaemonSet by default.
222-
func nodeUpgradeGCE(rawV, img string, enableKubeProxyDaemonSet bool) error {
223-
v := "v" + rawV
224-
env := append(os.Environ(), fmt.Sprintf("KUBE_PROXY_DAEMONSET=%v", enableKubeProxyDaemonSet))
225-
if img != "" {
226-
env = append(env, "KUBE_NODE_OS_DISTRIBUTION="+img)
227-
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-N", "-o", v)
228-
return err
229-
}
230-
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-N", v)
231-
return err
232-
}
233-
234-
func nodeUpgradeGKE(namespace string, v string, img string) error {
235-
Logf("Upgrading nodes to version %q and image %q", v, img)
236-
nps, err := nodePoolsGKE()
237-
if err != nil {
238-
return err
239-
}
240-
Logf("Found node pools %v", nps)
241-
for _, np := range nps {
242-
args := []string{
243-
"container",
244-
"clusters",
245-
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
246-
locationParamGKE(),
247-
"upgrade",
248-
TestContext.CloudConfig.Cluster,
249-
fmt.Sprintf("--node-pool=%s", np),
250-
fmt.Sprintf("--cluster-version=%s", v),
251-
"--quiet",
252-
}
253-
if len(img) > 0 {
254-
args = append(args, fmt.Sprintf("--image-type=%s", img))
255-
}
256-
_, _, err = RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
257-
258-
if err != nil {
259-
return err
260-
}
261-
262-
waitForSSHTunnels(namespace)
263-
}
264-
return nil
265-
}
266-
267-
func nodePoolsGKE() ([]string, error) {
268-
args := []string{
269-
"container",
270-
"node-pools",
271-
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
272-
locationParamGKE(),
273-
"list",
274-
fmt.Sprintf("--cluster=%s", TestContext.CloudConfig.Cluster),
275-
"--format=get(name)",
276-
}
277-
stdout, _, err := RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
278-
if err != nil {
279-
return nil, err
280-
}
281-
if len(strings.TrimSpace(stdout)) == 0 {
282-
return []string{}, nil
283-
}
284-
return strings.Fields(stdout), nil
285-
}
286-
287-
func gceUpgradeScript() string {
178+
// GCEUpgradeScript returns path of script for upgrading on GCE.
179+
func GCEUpgradeScript() string {
288180
if len(TestContext.GCEUpgradeScript) == 0 {
289181
return path.Join(TestContext.RepoRoot, "cluster/gce/upgrade.sh")
290182
}
291183
return TestContext.GCEUpgradeScript
292184
}
293185

294-
func waitForSSHTunnels(namespace string) {
186+
// WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod.
187+
func WaitForSSHTunnels(namespace string) {
295188
Logf("Waiting for SSH tunnels to establish")
296189
RunKubectl(namespace, "run", "ssh-tunnel-test",
297190
"--image=busybox",

0 commit comments

Comments
 (0)