@@ -35,7 +35,6 @@ import (
3535 "sigs.k8s.io/controller-runtime/pkg/client"
3636
3737 clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
38- controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
3938 runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
4039 "sigs.k8s.io/cluster-api/test/e2e/internal/log"
4140 "sigs.k8s.io/cluster-api/test/framework"
@@ -44,8 +43,6 @@ import (
4443 "sigs.k8s.io/cluster-api/util/conditions"
4544)
4645
47- var hookFailedMessage = "hook failed"
48-
4946// clusterUpgradeWithRuntimeSDKSpecInput is the input for clusterUpgradeWithRuntimeSDKSpec.
5047type clusterUpgradeWithRuntimeSDKSpecInput struct {
5148 E2EConfig * clusterctl.E2EConfig
@@ -83,14 +80,14 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
8380 var (
8481 input clusterUpgradeWithRuntimeSDKSpecInput
8582 namespace * corev1.Namespace
86- ext * runtimev1.ExtensionConfig
8783 cancelWatches context.CancelFunc
8884
8985 controlPlaneMachineCount int64
9086 workerMachineCount int64
9187
9288 clusterResources * clusterctl.ApplyClusterTemplateAndWaitResult
9389 testExtensionPath string
90+ clusterName string
9491 )
9592
9693 BeforeEach (func () {
@@ -123,11 +120,12 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
123120
124121 // Set up a Namespace where to host objects for this spec and create a watcher for the Namespace events.
125122 namespace , cancelWatches = setupSpecNamespace (ctx , specName , input .BootstrapClusterProxy , input .ArtifactFolder )
123+ clusterName = fmt .Sprintf ("%s-%s" , specName , util .RandomString (6 ))
124+
126125 clusterResources = new (clusterctl.ApplyClusterTemplateAndWaitResult )
127126 })
128127
129128 It ("Should create, upgrade and delete a workload cluster" , func () {
130- clusterName := fmt .Sprintf ("%s-%s" , specName , util .RandomString (6 ))
131129 By ("Deploy Test Extension" )
132130 testExtensionDeploymentTemplate , err := os .ReadFile (testExtensionPath ) //nolint:gosec
133131 Expect (err ).ToNot (HaveOccurred (), "Failed to read the extension deployment manifest file" )
@@ -141,12 +139,14 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
141139 Expect (input .BootstrapClusterProxy .Apply (ctx , []byte (testExtensionDeployment ), "--namespace" , namespace .Name )).To (Succeed ())
142140
143141 By ("Deploy Test Extension ExtensionConfig and ConfigMap" )
144- ext = extensionConfig (specName , namespace )
145- err = input .BootstrapClusterProxy .GetClient ().Create (ctx , ext )
146- Expect (err ).ToNot (HaveOccurred (), "Failed to create the extension config" )
147- responses := responsesConfigMap (clusterName , namespace )
148- err = input .BootstrapClusterProxy .GetClient ().Create (ctx , responses )
149- Expect (err ).ToNot (HaveOccurred (), "Failed to create the responses configmap" )
142+
143+ Expect (input .BootstrapClusterProxy .GetClient ().Create (ctx ,
144+ extensionConfig (specName , namespace ))).
145+ To (Succeed (), "Failed to create the extension config" )
146+
147+ Expect (input .BootstrapClusterProxy .GetClient ().Create (ctx ,
148+ responsesConfigMap (clusterName , namespace ))).
149+ To (Succeed (), "Failed to create the responses configMap" )
150150
151151 By ("Creating a workload cluster" )
152152
@@ -182,8 +182,6 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
182182 ClusterProxy : input .BootstrapClusterProxy ,
183183 Cluster : clusterResources .Cluster ,
184184 ControlPlane : clusterResources .ControlPlane ,
185- EtcdImageTag : input .E2EConfig .GetVariable (EtcdVersionUpgradeTo ),
186- DNSImageTag : input .E2EConfig .GetVariable (CoreDNSVersionUpgradeTo ),
187185 MachineDeployments : clusterResources .MachineDeployments ,
188186 KubernetesUpgradeVersion : input .E2EConfig .GetVariable (KubernetesVersionUpgradeTo ),
189187 WaitForMachinesToBeUpgraded : input .E2EConfig .GetIntervals (specName , "wait-machine-upgrade" ),
@@ -195,6 +193,7 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
195193 input .BootstrapClusterProxy .GetClient (),
196194 namespace .Name ,
197195 clusterName ,
196+ input .E2EConfig .GetVariable (KubernetesVersionUpgradeTo ),
198197 input .E2EConfig .GetIntervals (specName , "wait-machine-upgrade" ))
199198 },
200199 PreWaitForMachineDeploymentToBeUpgraded : func () {
@@ -236,26 +235,26 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
236235
237236 By ("Checking all lifecycle hooks have been called" )
238237 // Assert that each hook has been called and returned "Success" during the test.
239- err = checkLifecycleHookResponses (ctx , input .BootstrapClusterProxy .GetClient (), namespace .Name , clusterName , map [string ]string {
240- "BeforeClusterCreate" : "Success" ,
241- "BeforeClusterUpgrade" : "Success" ,
242- "BeforeClusterDelete" : "Success" ,
238+ Expect (checkLifecycleHookResponses (ctx , input .BootstrapClusterProxy .GetClient (), namespace .Name , clusterName , map [string ]string {
239+ "BeforeClusterCreate" : "Status: Success, RetryAfterSeconds: 0" ,
240+ "BeforeClusterUpgrade" : "Status: Success, RetryAfterSeconds: 0" ,
241+ "BeforeClusterDelete" : "Status: Success, RetryAfterSeconds: 0" ,
242+ "AfterControlPlaneUpgrade" : "Status: Success, RetryAfterSeconds: 0" ,
243243 "AfterControlPlaneInitialized" : "Success" ,
244- "AfterControlPlaneUpgrade" : "Success" ,
245244 "AfterClusterUpgrade" : "Success" ,
246- })
247- Expect (err ).ToNot (HaveOccurred (), "Lifecycle hook calls were not as expected" )
245+ })).To (Succeed (), "Lifecycle hook calls were not as expected" )
248246
249247 By ("PASSED!" )
250248 })
251249
252250 AfterEach (func () {
251+ // Delete the extensionConfig first to ensure the BeforeDeleteCluster hook doesn't block deletion.
252+ Eventually (func () error {
253+ return input .BootstrapClusterProxy .GetClient ().Delete (ctx , extensionConfig (specName , namespace ))
254+ }, 10 * time .Second , 1 * time .Second ).Should (Succeed (), "delete extensionConfig failed" )
255+
253256 // Dumps all the resources in the spec Namespace, then cleanups the cluster object and the spec Namespace itself.
254257 dumpSpecResourcesAndCleanup (ctx , specName , input .BootstrapClusterProxy , input .ArtifactFolder , namespace , cancelWatches , clusterResources .Cluster , input .E2EConfig .GetIntervals , input .SkipCleanup )
255-
256- Eventually (func () error {
257- return input .BootstrapClusterProxy .GetClient ().Delete (ctx , ext )
258- }, 10 * time .Second , 1 * time .Second ).Should (Succeed ())
259258 })
260259}
261260
@@ -303,11 +302,11 @@ func responsesConfigMap(name string, namespace *corev1.Namespace) *corev1.Config
303302 },
304303 // Set the initial preloadedResponses for each of the tested hooks.
305304 Data : map [string ]string {
306- // Blocking hooks are set to Status:Failure initially. These will be changed during the test.
307- "BeforeClusterCreate-preloadedResponse" : fmt . Sprintf ( `{"Status": "Failure ", "Message ": %q}` , hookFailedMessage ) ,
308- "BeforeClusterUpgrade-preloadedResponse" : fmt . Sprintf ( `{"Status": "Failure ", "Message ": %q}` , hookFailedMessage ) ,
309- "AfterControlPlaneUpgrade-preloadedResponse" : fmt . Sprintf ( `{"Status": "Failure ", "Message ": %q}` , hookFailedMessage ) ,
310- "BeforeClusterDelete-preloadedResponse" : fmt . Sprintf ( `{"Status": "Failure ", "Message ": %q}` , hookFailedMessage ) ,
305+ // Blocking hooks are set to return RetryAfterSeconds initially. These will be changed during the test.
306+ "BeforeClusterCreate-preloadedResponse" : `{"Status": "Success ", "RetryAfterSeconds ": 5}` ,
307+ "BeforeClusterUpgrade-preloadedResponse" : `{"Status": "Success ", "RetryAfterSeconds ": 5}` ,
308+ "AfterControlPlaneUpgrade-preloadedResponse" : `{"Status": "Success ", "RetryAfterSeconds ": 5}` ,
309+ "BeforeClusterDelete-preloadedResponse" : `{"Status": "Success ", "RetryAfterSeconds ": 5}` ,
311310
312311 // Non-blocking hooks are set to Status:Success.
313312 "AfterControlPlaneInitialized-preloadedResponse" : `{"Status": "Success"}` ,
@@ -359,42 +358,29 @@ func beforeClusterCreateTestHandler(ctx context.Context, c client.Client, namesp
359358 runtimeHookTestHandler (ctx , c , namespace , clusterName , hookName , true , func () bool {
360359 blocked := true
361360 // This hook should block the Cluster from entering the "Provisioned" state.
362- cluster := & clusterv1.Cluster {}
363- Eventually (func () error {
364- return c .Get (ctx , client.ObjectKey {Namespace : namespace , Name : clusterName }, cluster )
365- }).Should (Succeed ())
361+ cluster := framework .GetClusterByName (ctx ,
362+ framework.GetClusterByNameInput {Name : clusterName , Namespace : namespace , Getter : c })
366363
367- // Check if the TopologyReconciled condition message contains both the hook name and hookFailedMessage.
368- if ! clusterConditionShowsHookFailed (cluster , hookName ) {
369- blocked = false
370- }
371364 if cluster .Status .Phase == string (clusterv1 .ClusterPhaseProvisioned ) {
372365 blocked = false
373366 }
374367 return blocked
375368 }, intervals )
376369}
377370
378- // beforeClusterUpgradeTestHandler calls runtimeHookTestHandler with a blocking function which returns false if the
379- // Cluster has controlplanev1.RollingUpdateInProgressReason in its ReadyCondition .
380- func beforeClusterUpgradeTestHandler (ctx context.Context , c client.Client , namespace , clusterName string , intervals []interface {}) {
371+ // beforeClusterUpgradeTestHandler calls runtimeHookTestHandler with a blocking function which returns false if
372+ // any of the machines in the control plane has been updated to the target Kubernetes version .
373+ func beforeClusterUpgradeTestHandler (ctx context.Context , c client.Client , namespace , clusterName , toVersion string , intervals []interface {}) {
381374 hookName := "BeforeClusterUpgrade"
382375 runtimeHookTestHandler (ctx , c , namespace , clusterName , hookName , true , func () bool {
383376 var blocked = true
384377
385- cluster := & clusterv1.Cluster {}
386- Eventually (func () error {
387- return c .Get (ctx , client.ObjectKey {Namespace : namespace , Name : clusterName }, cluster )
388- }).Should (Succeed ())
389-
390- // Check if the TopologyReconciled condition message contains both the hook name and hookFailedMessage.
391- if ! clusterConditionShowsHookFailed (cluster , hookName ) {
392- blocked = false
393- }
394- // Check if the Cluster is showing the RollingUpdateInProgress condition reason. If it has the update process is unblocked.
395- if conditions .IsFalse (cluster , clusterv1 .ReadyCondition ) &&
396- conditions .GetReason (cluster , clusterv1 .ReadyCondition ) == controlplanev1 .RollingUpdateInProgressReason {
397- blocked = false
378+ controlPlaneMachines := framework .GetControlPlaneMachinesByCluster (ctx ,
379+ framework.GetControlPlaneMachinesByClusterInput {Lister : c , ClusterName : clusterName , Namespace : namespace })
380+ for _ , machine := range controlPlaneMachines {
381+ if * machine .Spec .Version == toVersion {
382+ blocked = false
383+ }
398384 }
399385 return blocked
400386 }, intervals )
@@ -406,26 +392,11 @@ func afterControlPlaneUpgradeTestHandler(ctx context.Context, c client.Client, n
406392 hookName := "AfterControlPlaneUpgrade"
407393 runtimeHookTestHandler (ctx , c , namespace , clusterName , hookName , true , func () bool {
408394 var blocked = true
409- cluster := & clusterv1.Cluster {}
410- Eventually (func () error {
411- return c .Get (ctx , client.ObjectKey {Namespace : namespace , Name : clusterName }, cluster )
412- }).Should (Succeed ())
413-
414- // Check if the TopologyReconciled condition message contains both the hook name and hookFailedMessage.
415- if ! clusterConditionShowsHookFailed (cluster , hookName ) {
416- blocked = false
417- }
418-
419- mds := & clusterv1.MachineDeploymentList {}
420- Eventually (func () error {
421- return c .List (ctx , mds , client.MatchingLabels {
422- clusterv1 .ClusterLabelName : clusterName ,
423- clusterv1 .ClusterTopologyOwnedLabel : "" ,
424- })
425- }).Should (Succeed ())
426395
396+ mds := framework .GetMachineDeploymentsByCluster (ctx ,
397+ framework.GetMachineDeploymentsByClusterInput {ClusterName : clusterName , Namespace : namespace , Lister : c })
427398 // If any of the MachineDeployments have the target Kubernetes Version, the hook is unblocked.
428- for _ , md := range mds . Items {
399+ for _ , md := range mds {
429400 if * md .Spec .Template .Spec .Version == version {
430401 blocked = false
431402 }
@@ -464,23 +435,23 @@ func runtimeHookTestHandler(ctx context.Context, c client.Client, namespace, clu
464435 if err := checkLifecycleHooksCalledAtLeastOnce (ctx , c , namespace , clusterName , []string {hookName }); err != nil {
465436 return err
466437 }
467- cluster := & clusterv1.Cluster {}
468- if err := c .Get (ctx , client.ObjectKey {Namespace : namespace , Name : clusterName }, cluster ); err != nil {
469- return err
470- }
471438
472439 // Check for the existence of the condition if withTopologyReconciledCondition is true.
473- if withTopologyReconciledCondition &&
474- (conditions .GetReason (cluster , clusterv1 .TopologyReconciledCondition ) != clusterv1 .TopologyReconcileFailedReason ) {
475- return errors .New ("Condition not found on Cluster object" )
440+ if withTopologyReconciledCondition {
441+ cluster := framework .GetClusterByName (ctx , framework.GetClusterByNameInput {
442+ Name : clusterName , Namespace : namespace , Getter : c })
443+
444+ if ! clusterConditionShowsHookBlocking (cluster , hookName ) {
445+ return errors .Errorf ("Blocking condition for %s not found on Cluster object" , hookName )
446+ }
476447 }
477448 return nil
478- }, 60 * time .Second ).Should (Succeed (), "%s has not been called" , hookName )
449+ }, 30 * time .Second ).Should (Succeed (), "%s has not been called" , hookName )
479450
480451 // blockingCondition should consistently be true as the Runtime hook is returning "Failure".
481452 Consistently (func () bool {
482453 return blockingCondition ()
483- }, 30 * time .Second ).Should (BeTrue (),
454+ }, 60 * time .Second ).Should (BeTrue (),
484455 fmt .Sprintf ("Cluster Topology reconciliation continued unexpectedly: hook %s not blocking" , hookName ))
485456
486457 // Patch the ConfigMap to set the hook response to "Success".
@@ -500,32 +471,32 @@ func runtimeHookTestHandler(ctx context.Context, c client.Client, namespace, clu
500471 Eventually (func () bool {
501472 return blockingCondition ()
502473 }, intervals ... ).Should (BeFalse (),
503- fmt .Sprintf ("ClusterTopology reconcile did not unblock after updating hook response: hook %s still blocking " , hookName ))
474+ fmt .Sprintf ("ClusterTopology reconcile did proceed as expected when calling %s " , hookName ))
504475}
505476
506- // clusterConditionShowsHookFailed checks if the TopologyReconciled condition message contains both the hook name and hookFailedMessage.
507- func clusterConditionShowsHookFailed (cluster * clusterv1.Cluster , hookName string ) bool {
508- return conditions .GetReason (cluster , clusterv1 .TopologyReconciledCondition ) == clusterv1 .TopologyReconcileFailedReason &&
509- strings .Contains (conditions .GetMessage (cluster , clusterv1 .TopologyReconciledCondition ), hookFailedMessage ) &&
477+ // clusterConditionShowsHookBlocking checks if the TopologyReconciled condition message contains both the hook name and hookFailedMessage.
478+ func clusterConditionShowsHookBlocking (cluster * clusterv1.Cluster , hookName string ) bool {
479+ return conditions .GetReason (cluster , clusterv1 .TopologyReconciledCondition ) == clusterv1 .TopologyReconciledHookBlockingReason &&
510480 strings .Contains (conditions .GetMessage (cluster , clusterv1 .TopologyReconciledCondition ), hookName )
511481}
512482
513483func dumpAndDeleteCluster (ctx context.Context , proxy framework.ClusterProxy , namespace , clusterName , artifactFolder string ) {
514484 By ("Deleting the workload cluster" )
515- cluster := & clusterv1.Cluster {}
516- Eventually (func () error {
517- return proxy .GetClient ().Get (ctx , client.ObjectKey {Namespace : namespace , Name : clusterName }, cluster )
518- }).Should (Succeed ())
485+
486+ cluster := framework .GetClusterByName (ctx , framework.GetClusterByNameInput {
487+ Name : clusterName , Namespace : namespace , Getter : proxy .GetClient ()})
519488
520489 // Dump all the logs from the workload cluster before deleting them.
521- proxy .CollectWorkloadClusterLogs (ctx , cluster .Namespace , cluster .Name , filepath .Join (artifactFolder , "clusters-beforeClusterDelete" , cluster .Name ))
490+ proxy .CollectWorkloadClusterLogs (ctx ,
491+ cluster .Namespace ,
492+ cluster .Name ,
493+ filepath .Join (artifactFolder , "clusters-beforeClusterDelete" , cluster .Name ))
522494
523495 // Dump all Cluster API related resources to artifacts before deleting them.
524496 framework .DumpAllResources (ctx , framework.DumpAllResourcesInput {
525497 Lister : proxy .GetClient (),
526498 Namespace : namespace ,
527- LogPath : filepath .Join (artifactFolder , "clusters-beforeClusterDelete" , proxy .GetName (), "resources" ),
528- })
499+ LogPath : filepath .Join (artifactFolder , "clusters-beforeClusterDelete" , proxy .GetName (), "resources" )})
529500
530501 By ("Deleting the workload cluster" )
531502 framework .DeleteCluster (ctx , framework.DeleteClusterInput {
0 commit comments