Skip to content

Commit cb6dad2

Browse files
authored
Merge pull request #6817 from killianmuldoon/runtimeSDK/flaky-e2e-fix
🌱 Runtime sdk/flaky e2e fix
2 parents fc780de + 02b346e commit cb6dad2

File tree

5 files changed

+98
-112
lines changed

5 files changed

+98
-112
lines changed

test/e2e/cluster_upgrade_runtimesdk.go

Lines changed: 63 additions & 92 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@ import (
3535
"sigs.k8s.io/controller-runtime/pkg/client"
3636

3737
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
38-
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
3938
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
4039
"sigs.k8s.io/cluster-api/test/e2e/internal/log"
4140
"sigs.k8s.io/cluster-api/test/framework"
@@ -44,8 +43,6 @@ import (
4443
"sigs.k8s.io/cluster-api/util/conditions"
4544
)
4645

47-
var hookFailedMessage = "hook failed"
48-
4946
// clusterUpgradeWithRuntimeSDKSpecInput is the input for clusterUpgradeWithRuntimeSDKSpec.
5047
type clusterUpgradeWithRuntimeSDKSpecInput struct {
5148
E2EConfig *clusterctl.E2EConfig
@@ -83,14 +80,14 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
8380
var (
8481
input clusterUpgradeWithRuntimeSDKSpecInput
8582
namespace *corev1.Namespace
86-
ext *runtimev1.ExtensionConfig
8783
cancelWatches context.CancelFunc
8884

8985
controlPlaneMachineCount int64
9086
workerMachineCount int64
9187

9288
clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
9389
testExtensionPath string
90+
clusterName string
9491
)
9592

9693
BeforeEach(func() {
@@ -123,11 +120,12 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
123120

124121
// Set up a Namespace where to host objects for this spec and create a watcher for the Namespace events.
125122
namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)
123+
clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
124+
126125
clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
127126
})
128127

129128
It("Should create, upgrade and delete a workload cluster", func() {
130-
clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
131129
By("Deploy Test Extension")
132130
testExtensionDeploymentTemplate, err := os.ReadFile(testExtensionPath) //nolint:gosec
133131
Expect(err).ToNot(HaveOccurred(), "Failed to read the extension deployment manifest file")
@@ -141,12 +139,14 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
141139
Expect(input.BootstrapClusterProxy.Apply(ctx, []byte(testExtensionDeployment), "--namespace", namespace.Name)).To(Succeed())
142140

143141
By("Deploy Test Extension ExtensionConfig and ConfigMap")
144-
ext = extensionConfig(specName, namespace)
145-
err = input.BootstrapClusterProxy.GetClient().Create(ctx, ext)
146-
Expect(err).ToNot(HaveOccurred(), "Failed to create the extension config")
147-
responses := responsesConfigMap(clusterName, namespace)
148-
err = input.BootstrapClusterProxy.GetClient().Create(ctx, responses)
149-
Expect(err).ToNot(HaveOccurred(), "Failed to create the responses configmap")
142+
143+
Expect(input.BootstrapClusterProxy.GetClient().Create(ctx,
144+
extensionConfig(specName, namespace))).
145+
To(Succeed(), "Failed to create the extension config")
146+
147+
Expect(input.BootstrapClusterProxy.GetClient().Create(ctx,
148+
responsesConfigMap(clusterName, namespace))).
149+
To(Succeed(), "Failed to create the responses configMap")
150150

151151
By("Creating a workload cluster")
152152

@@ -182,8 +182,6 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
182182
ClusterProxy: input.BootstrapClusterProxy,
183183
Cluster: clusterResources.Cluster,
184184
ControlPlane: clusterResources.ControlPlane,
185-
EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo),
186-
DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo),
187185
MachineDeployments: clusterResources.MachineDeployments,
188186
KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
189187
WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
@@ -195,6 +193,7 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
195193
input.BootstrapClusterProxy.GetClient(),
196194
namespace.Name,
197195
clusterName,
196+
input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
198197
input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"))
199198
},
200199
PreWaitForMachineDeploymentToBeUpgraded: func() {
@@ -236,26 +235,26 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
236235

237236
By("Checking all lifecycle hooks have been called")
238237
// Assert that each hook has been called and returned "Success" during the test.
239-
err = checkLifecycleHookResponses(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, map[string]string{
240-
"BeforeClusterCreate": "Success",
241-
"BeforeClusterUpgrade": "Success",
242-
"BeforeClusterDelete": "Success",
238+
Expect(checkLifecycleHookResponses(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, map[string]string{
239+
"BeforeClusterCreate": "Status: Success, RetryAfterSeconds: 0",
240+
"BeforeClusterUpgrade": "Status: Success, RetryAfterSeconds: 0",
241+
"BeforeClusterDelete": "Status: Success, RetryAfterSeconds: 0",
242+
"AfterControlPlaneUpgrade": "Status: Success, RetryAfterSeconds: 0",
243243
"AfterControlPlaneInitialized": "Success",
244-
"AfterControlPlaneUpgrade": "Success",
245244
"AfterClusterUpgrade": "Success",
246-
})
247-
Expect(err).ToNot(HaveOccurred(), "Lifecycle hook calls were not as expected")
245+
})).To(Succeed(), "Lifecycle hook calls were not as expected")
248246

249247
By("PASSED!")
250248
})
251249

252250
AfterEach(func() {
251+
// Delete the extensionConfig first to ensure the BeforeDeleteCluster hook doesn't block deletion.
252+
Eventually(func() error {
253+
return input.BootstrapClusterProxy.GetClient().Delete(ctx, extensionConfig(specName, namespace))
254+
}, 10*time.Second, 1*time.Second).Should(Succeed(), "delete extensionConfig failed")
255+
253256
// Dumps all the resources in the spec Namespace, then cleanups the cluster object and the spec Namespace itself.
254257
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
255-
256-
Eventually(func() error {
257-
return input.BootstrapClusterProxy.GetClient().Delete(ctx, ext)
258-
}, 10*time.Second, 1*time.Second).Should(Succeed())
259258
})
260259
}
261260

@@ -303,11 +302,11 @@ func responsesConfigMap(name string, namespace *corev1.Namespace) *corev1.Config
303302
},
304303
// Set the initial preloadedResponses for each of the tested hooks.
305304
Data: map[string]string{
306-
// Blocking hooks are set to Status:Failure initially. These will be changed during the test.
307-
"BeforeClusterCreate-preloadedResponse": fmt.Sprintf(`{"Status": "Failure", "Message": %q}`, hookFailedMessage),
308-
"BeforeClusterUpgrade-preloadedResponse": fmt.Sprintf(`{"Status": "Failure", "Message": %q}`, hookFailedMessage),
309-
"AfterControlPlaneUpgrade-preloadedResponse": fmt.Sprintf(`{"Status": "Failure", "Message": %q}`, hookFailedMessage),
310-
"BeforeClusterDelete-preloadedResponse": fmt.Sprintf(`{"Status": "Failure", "Message": %q}`, hookFailedMessage),
305+
// Blocking hooks are set to return RetryAfterSeconds initially. These will be changed during the test.
306+
"BeforeClusterCreate-preloadedResponse": `{"Status": "Success", "RetryAfterSeconds": 5}`,
307+
"BeforeClusterUpgrade-preloadedResponse": `{"Status": "Success", "RetryAfterSeconds": 5}`,
308+
"AfterControlPlaneUpgrade-preloadedResponse": `{"Status": "Success", "RetryAfterSeconds": 5}`,
309+
"BeforeClusterDelete-preloadedResponse": `{"Status": "Success", "RetryAfterSeconds": 5}`,
311310

312311
// Non-blocking hooks are set to Status:Success.
313312
"AfterControlPlaneInitialized-preloadedResponse": `{"Status": "Success"}`,
@@ -359,42 +358,29 @@ func beforeClusterCreateTestHandler(ctx context.Context, c client.Client, namesp
359358
runtimeHookTestHandler(ctx, c, namespace, clusterName, hookName, true, func() bool {
360359
blocked := true
361360
// This hook should block the Cluster from entering the "Provisioned" state.
362-
cluster := &clusterv1.Cluster{}
363-
Eventually(func() error {
364-
return c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, cluster)
365-
}).Should(Succeed())
361+
cluster := framework.GetClusterByName(ctx,
362+
framework.GetClusterByNameInput{Name: clusterName, Namespace: namespace, Getter: c})
366363

367-
// Check if the TopologyReconciled condition message contains both the hook name and hookFailedMessage.
368-
if !clusterConditionShowsHookFailed(cluster, hookName) {
369-
blocked = false
370-
}
371364
if cluster.Status.Phase == string(clusterv1.ClusterPhaseProvisioned) {
372365
blocked = false
373366
}
374367
return blocked
375368
}, intervals)
376369
}
377370

378-
// beforeClusterUpgradeTestHandler calls runtimeHookTestHandler with a blocking function which returns false if the
379-
// Cluster has controlplanev1.RollingUpdateInProgressReason in its ReadyCondition.
380-
func beforeClusterUpgradeTestHandler(ctx context.Context, c client.Client, namespace, clusterName string, intervals []interface{}) {
371+
// beforeClusterUpgradeTestHandler calls runtimeHookTestHandler with a blocking function which returns false if
372+
// any of the machines in the control plane has been updated to the target Kubernetes version.
373+
func beforeClusterUpgradeTestHandler(ctx context.Context, c client.Client, namespace, clusterName, toVersion string, intervals []interface{}) {
381374
hookName := "BeforeClusterUpgrade"
382375
runtimeHookTestHandler(ctx, c, namespace, clusterName, hookName, true, func() bool {
383376
var blocked = true
384377

385-
cluster := &clusterv1.Cluster{}
386-
Eventually(func() error {
387-
return c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, cluster)
388-
}).Should(Succeed())
389-
390-
// Check if the TopologyReconciled condition message contains both the hook name and hookFailedMessage.
391-
if !clusterConditionShowsHookFailed(cluster, hookName) {
392-
blocked = false
393-
}
394-
// Check if the Cluster is showing the RollingUpdateInProgress condition reason. If it has the update process is unblocked.
395-
if conditions.IsFalse(cluster, clusterv1.ReadyCondition) &&
396-
conditions.GetReason(cluster, clusterv1.ReadyCondition) == controlplanev1.RollingUpdateInProgressReason {
397-
blocked = false
378+
controlPlaneMachines := framework.GetControlPlaneMachinesByCluster(ctx,
379+
framework.GetControlPlaneMachinesByClusterInput{Lister: c, ClusterName: clusterName, Namespace: namespace})
380+
for _, machine := range controlPlaneMachines {
381+
if *machine.Spec.Version == toVersion {
382+
blocked = false
383+
}
398384
}
399385
return blocked
400386
}, intervals)
@@ -406,26 +392,11 @@ func afterControlPlaneUpgradeTestHandler(ctx context.Context, c client.Client, n
406392
hookName := "AfterControlPlaneUpgrade"
407393
runtimeHookTestHandler(ctx, c, namespace, clusterName, hookName, true, func() bool {
408394
var blocked = true
409-
cluster := &clusterv1.Cluster{}
410-
Eventually(func() error {
411-
return c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, cluster)
412-
}).Should(Succeed())
413-
414-
// Check if the TopologyReconciled condition message contains both the hook name and hookFailedMessage.
415-
if !clusterConditionShowsHookFailed(cluster, hookName) {
416-
blocked = false
417-
}
418-
419-
mds := &clusterv1.MachineDeploymentList{}
420-
Eventually(func() error {
421-
return c.List(ctx, mds, client.MatchingLabels{
422-
clusterv1.ClusterLabelName: clusterName,
423-
clusterv1.ClusterTopologyOwnedLabel: "",
424-
})
425-
}).Should(Succeed())
426395

396+
mds := framework.GetMachineDeploymentsByCluster(ctx,
397+
framework.GetMachineDeploymentsByClusterInput{ClusterName: clusterName, Namespace: namespace, Lister: c})
427398
// If any of the MachineDeployments have the target Kubernetes Version, the hook is unblocked.
428-
for _, md := range mds.Items {
399+
for _, md := range mds {
429400
if *md.Spec.Template.Spec.Version == version {
430401
blocked = false
431402
}
@@ -464,23 +435,23 @@ func runtimeHookTestHandler(ctx context.Context, c client.Client, namespace, clu
464435
if err := checkLifecycleHooksCalledAtLeastOnce(ctx, c, namespace, clusterName, []string{hookName}); err != nil {
465436
return err
466437
}
467-
cluster := &clusterv1.Cluster{}
468-
if err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, cluster); err != nil {
469-
return err
470-
}
471438

472439
// Check for the existence of the condition if withTopologyReconciledCondition is true.
473-
if withTopologyReconciledCondition &&
474-
(conditions.GetReason(cluster, clusterv1.TopologyReconciledCondition) != clusterv1.TopologyReconcileFailedReason) {
475-
return errors.New("Condition not found on Cluster object")
440+
if withTopologyReconciledCondition {
441+
cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{
442+
Name: clusterName, Namespace: namespace, Getter: c})
443+
444+
if !clusterConditionShowsHookBlocking(cluster, hookName) {
445+
return errors.Errorf("Blocking condition for %s not found on Cluster object", hookName)
446+
}
476447
}
477448
return nil
478-
}, 60*time.Second).Should(Succeed(), "%s has not been called", hookName)
449+
}, 30*time.Second).Should(Succeed(), "%s has not been called", hookName)
479450

480451
// blockingCondition should consistently be true as the Runtime hook is returning "Failure".
481452
Consistently(func() bool {
482453
return blockingCondition()
483-
}, 30*time.Second).Should(BeTrue(),
454+
}, 60*time.Second).Should(BeTrue(),
484455
fmt.Sprintf("Cluster Topology reconciliation continued unexpectedly: hook %s not blocking", hookName))
485456

486457
// Patch the ConfigMap to set the hook response to "Success".
@@ -500,32 +471,32 @@ func runtimeHookTestHandler(ctx context.Context, c client.Client, namespace, clu
500471
Eventually(func() bool {
501472
return blockingCondition()
502473
}, intervals...).Should(BeFalse(),
503-
fmt.Sprintf("ClusterTopology reconcile did not unblock after updating hook response: hook %s still blocking", hookName))
474+
fmt.Sprintf("ClusterTopology reconcile did proceed as expected when calling %s", hookName))
504475
}
505476

506-
// clusterConditionShowsHookFailed checks if the TopologyReconciled condition message contains both the hook name and hookFailedMessage.
507-
func clusterConditionShowsHookFailed(cluster *clusterv1.Cluster, hookName string) bool {
508-
return conditions.GetReason(cluster, clusterv1.TopologyReconciledCondition) == clusterv1.TopologyReconcileFailedReason &&
509-
strings.Contains(conditions.GetMessage(cluster, clusterv1.TopologyReconciledCondition), hookFailedMessage) &&
477+
// clusterConditionShowsHookBlocking checks if the TopologyReconciled condition message contains both the hook name and hookFailedMessage.
478+
func clusterConditionShowsHookBlocking(cluster *clusterv1.Cluster, hookName string) bool {
479+
return conditions.GetReason(cluster, clusterv1.TopologyReconciledCondition) == clusterv1.TopologyReconciledHookBlockingReason &&
510480
strings.Contains(conditions.GetMessage(cluster, clusterv1.TopologyReconciledCondition), hookName)
511481
}
512482

513483
func dumpAndDeleteCluster(ctx context.Context, proxy framework.ClusterProxy, namespace, clusterName, artifactFolder string) {
514484
By("Deleting the workload cluster")
515-
cluster := &clusterv1.Cluster{}
516-
Eventually(func() error {
517-
return proxy.GetClient().Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, cluster)
518-
}).Should(Succeed())
485+
486+
cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{
487+
Name: clusterName, Namespace: namespace, Getter: proxy.GetClient()})
519488

520489
// Dump all the logs from the workload cluster before deleting them.
521-
proxy.CollectWorkloadClusterLogs(ctx, cluster.Namespace, cluster.Name, filepath.Join(artifactFolder, "clusters-beforeClusterDelete", cluster.Name))
490+
proxy.CollectWorkloadClusterLogs(ctx,
491+
cluster.Namespace,
492+
cluster.Name,
493+
filepath.Join(artifactFolder, "clusters-beforeClusterDelete", cluster.Name))
522494

523495
// Dump all Cluster API related resources to artifacts before deleting them.
524496
framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{
525497
Lister: proxy.GetClient(),
526498
Namespace: namespace,
527-
LogPath: filepath.Join(artifactFolder, "clusters-beforeClusterDelete", proxy.GetName(), "resources"),
528-
})
499+
LogPath: filepath.Join(artifactFolder, "clusters-beforeClusterDelete", proxy.GetName(), "resources")})
529500

530501
By("Deleting the workload cluster")
531502
framework.DeleteCluster(ctx, framework.DeleteClusterInput{

test/extension/handlers/lifecycle/handlers.go

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,10 @@ func (h *Handler) readResponseFromConfigMap(ctx context.Context, name, namespace
153153
if err := yaml.Unmarshal([]byte(configMap.Data[hookName+"-preloadedResponse"]), response); err != nil {
154154
return errors.Wrapf(err, "failed to read %q response information from ConfigMap", hook)
155155
}
156+
if r, ok := response.(runtimehooksv1.RetryResponseObject); ok {
157+
log := ctrl.LoggerFrom(ctx)
158+
log.Info(fmt.Sprintf("%s response is %s. retry: %v", hookName, r.GetStatus(), r.GetRetryAfterSeconds()))
159+
}
156160
return nil
157161
}
158162

@@ -163,10 +167,15 @@ func (h *Handler) recordCallInConfigMap(ctx context.Context, name, namespace str
163167
if err := h.Client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap); err != nil {
164168
return errors.Wrapf(err, "failed to read the ConfigMap %s/%s", namespace, configMapName)
165169
}
166-
167-
// Patch the actualResponseStatus with the returned value
168-
patch := client.RawPatch(types.MergePatchType,
169-
[]byte(fmt.Sprintf(`{"data":{"%s-actualResponseStatus":"%s"}}`, hookName, response.GetStatus()))) //nolint:gocritic
170+
var patch client.Patch
171+
if r, ok := response.(runtimehooksv1.RetryResponseObject); ok {
172+
patch = client.RawPatch(types.MergePatchType,
173+
[]byte(fmt.Sprintf(`{"data":{"%s-actualResponseStatus": "Status: %s, RetryAfterSeconds: %v"}}`, hookName, r.GetStatus(), r.GetRetryAfterSeconds())))
174+
} else {
175+
// Patch the actualResponseStatus with the returned value
176+
patch = client.RawPatch(types.MergePatchType,
177+
[]byte(fmt.Sprintf(`{"data":{"%s-actualResponseStatus":"%s"}}`, hookName, response.GetStatus()))) //nolint:gocritic
178+
}
170179
if err := h.Client.Patch(ctx, configMap, patch); err != nil {
171180
return errors.Wrapf(err, "failed to update the ConfigMap %s/%s", namespace, configMapName)
172181
}

test/framework/cluster_helpers.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ type DeleteClusterInput struct {
150150
Cluster *clusterv1.Cluster
151151
}
152152

153-
// DeleteCluster deletes the cluster and waits for everything the cluster owned to actually be gone.
153+
// DeleteCluster deletes the cluster.
154154
func DeleteCluster(ctx context.Context, input DeleteClusterInput) {
155155
Byf("Deleting cluster %s", input.Cluster.GetName())
156156
Expect(input.Deleter.Delete(ctx, input.Cluster)).To(Succeed())

test/framework/cluster_topology_helpers.go

Lines changed: 20 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -128,20 +128,26 @@ func UpgradeClusterTopologyAndWaitForUpgrade(ctx context.Context, input UpgradeC
128128
KubernetesVersion: input.KubernetesUpgradeVersion,
129129
}, input.WaitForKubeProxyUpgrade...)
130130

131-
log.Logf("Waiting for CoreDNS to have the upgraded image tag")
132-
WaitForDNSUpgrade(ctx, WaitForDNSUpgradeInput{
133-
Getter: workloadClient,
134-
DNSVersion: input.DNSImageTag,
135-
}, input.WaitForDNSUpgrade...)
136-
137-
log.Logf("Waiting for etcd to have the upgraded image tag")
138-
lblSelector, err := labels.Parse("component=etcd")
139-
Expect(err).ToNot(HaveOccurred())
140-
WaitForPodListCondition(ctx, WaitForPodListConditionInput{
141-
Lister: workloadClient,
142-
ListOptions: &client.ListOptions{LabelSelector: lblSelector},
143-
Condition: EtcdImageTagCondition(input.EtcdImageTag, int(*input.ControlPlane.Spec.Replicas)),
144-
}, input.WaitForEtcdUpgrade...)
131+
// Wait for the CoreDNS upgrade if the DNSImageTag is set.
132+
if input.DNSImageTag != "" {
133+
log.Logf("Waiting for CoreDNS to have the upgraded image tag")
134+
WaitForDNSUpgrade(ctx, WaitForDNSUpgradeInput{
135+
Getter: workloadClient,
136+
DNSVersion: input.DNSImageTag,
137+
}, input.WaitForDNSUpgrade...)
138+
}
139+
140+
// Wait for the etcd upgrade if the EtcdImageTag is set.
141+
if input.EtcdImageTag != "" {
142+
log.Logf("Waiting for etcd to have the upgraded image tag")
143+
lblSelector, err := labels.Parse("component=etcd")
144+
Expect(err).ToNot(HaveOccurred())
145+
WaitForPodListCondition(ctx, WaitForPodListConditionInput{
146+
Lister: workloadClient,
147+
ListOptions: &client.ListOptions{LabelSelector: lblSelector},
148+
Condition: EtcdImageTagCondition(input.EtcdImageTag, int(*input.ControlPlane.Spec.Replicas)),
149+
}, input.WaitForEtcdUpgrade...)
150+
}
145151

146152
// Once the ControlPlane is upgraded we can run PreWaitForMachineDeploymentToBeUpgraded.
147153
// Note: This can e.g. be used to verify the AfterControlPlaneUpgrade lifecycle hook is executed

0 commit comments

Comments
 (0)