diff --git a/.golangci.yml b/.golangci.yml index bc313cbadf91..21291a1f04d6 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -265,10 +265,6 @@ linters: - linters: - staticcheck text: 'SA1019: .*\.Deprecated\.V1Beta1.* is deprecated' - # CR v0.21 deprecated Result.Requeue, will be fixed incrementally and tracked via https://github.com/kubernetes-sigs/cluster-api/issues/12272 - - linters: - - staticcheck - text: 'SA1019: .*(res|result|i|j)\.Requeue is deprecated: Use `RequeueAfter` instead' # TODO: var-naming: avoid meaningless package names by revive # * test/infrastructure/docker/internal/docker/types/ # * bootstrap/kubeadm/types/ diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go index 8a39706461e8..d95517ab0260 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go @@ -18,6 +18,7 @@ package controllers import ( "testing" + "time" . "github.com/onsi/gomega" ctrl "sigs.k8s.io/controller-runtime" @@ -61,7 +62,7 @@ func TestKubeadmConfigReconciler(t *testing.T) { }, }) g.Expect(err).To(Succeed()) - g.Expect(result.Requeue).To(BeFalse()) + g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) }) }) } diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go index 50c708c77b26..374aaa304c36 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go @@ -124,7 +124,6 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfKubeadmConfigIsReady(t * } result, err := k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) } @@ -297,7 +296,6 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasDataSecretName actual := &bootstrapv1.KubeadmConfig{} g.Expect(myclient.Get(ctx, client.ObjectKey{Namespace: config.Namespace, Name: config.Name}, actual)).To(Succeed()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) assertHasTrueCondition(g, myclient, request, bootstrapv1.KubeadmConfigDataSecretAvailableCondition) } @@ -476,7 +474,6 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueJoiningNodesIfControlPlaneNotI result, err := k.Reconcile(ctx, tc.request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(30 * time.Second)) assertHasFalseCondition(g, myclient, tc.request, bootstrapv1.KubeadmConfigDataSecretAvailableCondition, bootstrapv1.KubeadmConfigDataSecretNotAvailableReason) }) @@ -528,7 +525,6 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T) result, err := k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, "control-plane-init-cfg", metav1.NamespaceDefault) @@ -631,7 +627,6 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueIfControlPlaneIsMissingAPIEndp } result, err := k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(10 * time.Second)) actualConfig := &bootstrapv1.KubeadmConfig{} @@ -709,7 +704,6 @@ func TestReconcileIfJoinCertificatesAvailableConditioninNodesAndControlPlaneIsRe } result, err := k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, rt.configName, metav1.NamespaceDefault) @@ -786,7 +780,6 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) { } result, err := k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, rt.configName, metav1.NamespaceDefault) @@ -993,7 +986,6 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) result, err := k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, "worker-join-cfg", metav1.NamespaceDefault) @@ -1236,7 +1228,6 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { } { result, err := k.Reconcile(ctx, req) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) } @@ -2085,7 +2076,6 @@ func TestKubeadmConfigReconciler_Reconcile_ExactlyOneControlPlaneMachineInitiali } result, err := k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) request = ctrl.Request{ @@ -2096,7 +2086,6 @@ func TestKubeadmConfigReconciler_Reconcile_ExactlyOneControlPlaneMachineInitiali } result, err = k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(30 * time.Second)) confList := &bootstrapv1.KubeadmConfigList{} g.Expect(myclient.List(ctx, confList)).To(Succeed()) @@ -2151,7 +2140,6 @@ func TestKubeadmConfigReconciler_Reconcile_PatchWhenErrorOccurred(t *testing.T) result, err := k.Reconcile(ctx, request) g.Expect(err).To(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, "control-plane-init-cfg", metav1.NamespaceDefault) diff --git a/controlplane/kubeadm/internal/controllers/consts.go b/controlplane/kubeadm/internal/controllers/consts.go index 8b173df49ca3..154b42088fb5 100644 --- a/controlplane/kubeadm/internal/controllers/consts.go +++ b/controlplane/kubeadm/internal/controllers/consts.go @@ -30,4 +30,10 @@ const ( // dependentCertRequeueAfter is how long to wait before checking again to see if // dependent certificates have been created. dependentCertRequeueAfter = 30 * time.Second + + // scaleRequeueAfter is how long to wait before scaling up/down again after a scale operation has been requested. + scaleRequeueAfter = 15 * time.Second + + // initializationRequeueAfter is how long to wait before checking again to see if the initialization has been completed. + initializationRequeueAfter = 15 * time.Second ) diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index f3539ae83527..aaf964fe4268 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -194,8 +194,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. // Initialize the patch helper. patchHelper, err := patch.NewHelper(kcp, r.Client) if err != nil { - log.Error(err, "Failed to configure the patch helper") - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{}, errors.Wrap(err, "failed to configure the patch helper") } if isPaused, requeue, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, kcp); err != nil || isPaused || requeue { @@ -506,8 +505,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl // Get the workload cluster client. workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { - log.V(2).Info("cannot get remote client to workload cluster, will requeue", "cause", err) - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{}, errors.Wrapf(err, "cannot get remote client to workload cluster for KubeadmControlPlane %s/%s", controlPlane.KCP.Namespace, controlPlane.KCP.Name) } // Update kube-proxy daemonset. diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 4f2f82002283..38954e6d9605 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -482,7 +482,7 @@ func TestReconcileClusterNoEndpoints(t *testing.T) { result, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) g.Expect(err).ToNot(HaveOccurred()) // TODO: this should stop to re-queue as soon as we have a proper remote cluster cache in place. - g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: false, RequeueAfter: 20 * time.Second})) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: 20 * time.Second})) g.Expect(r.Client.Get(ctx, util.ObjectKey(kcp), kcp)).To(Succeed()) // Always expect that the Finalizer is set on the passed in resource diff --git a/controlplane/kubeadm/internal/controllers/remediation.go b/controlplane/kubeadm/internal/controllers/remediation.go index f8295868dffc..c4ba252641dd 100644 --- a/controlplane/kubeadm/internal/controllers/remediation.go +++ b/controlplane/kubeadm/internal/controllers/remediation.go @@ -368,7 +368,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C controlplanev1.RemediationInProgressAnnotation: remediationInProgressValue, }) - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{RequeueAfter: time.Second * 20}, nil } // Gets the machine to be remediated, which is the "most broken" among the unhealthy machines, determined as the machine diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index b58b1f1a5ffb..b393a73c12ef 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -68,7 +68,7 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte newMachine.Spec.Bootstrap.ConfigRef.Kind, klog.KRef(newMachine.Namespace, newMachine.Spec.Bootstrap.ConfigRef.Name)) // Requeue the control plane, in case there are additional operations to perform - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{RequeueAfter: initializationRequeueAfter}, nil } func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { @@ -107,7 +107,7 @@ func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, newMachine.Spec.Bootstrap.ConfigRef.Kind, klog.KRef(newMachine.Namespace, newMachine.Spec.Bootstrap.ConfigRef.Name)) // Requeue the control plane, in case there are other operations to perform - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{RequeueAfter: scaleRequeueAfter}, nil } func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( @@ -163,7 +163,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( Info("Deleting Machine (scale down)", "Machine", klog.KObj(machineToDelete)) // Requeue the control plane, in case there are additional operations to perform - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{RequeueAfter: scaleRequeueAfter}, nil } // preflightChecks checks if the control plane is stable before proceeding with a scale up/scale down operation, diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index b125a7abfdc9..8fdec49f8664 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -81,7 +81,7 @@ func TestKubeadmControlPlaneReconciler_initializeControlPlane(t *testing.T) { } result, err := r.initializeControlPlane(ctx, controlPlane) - g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: initializationRequeueAfter})) g.Expect(err).ToNot(HaveOccurred()) machineList := &clusterv1.MachineList{} @@ -161,7 +161,7 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { } result, err := r.scaleUpControlPlane(ctx, controlPlane) - g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: scaleRequeueAfter})) g.Expect(err).ToNot(HaveOccurred()) controlPlaneMachines := clusterv1.MachineList{} @@ -285,7 +285,7 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. result, err := r.scaleDownControlPlane(context.Background(), controlPlane, controlPlane.Machines) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: scaleRequeueAfter})) controlPlaneMachines := clusterv1.MachineList{} g.Expect(fakeClient.List(context.Background(), &controlPlaneMachines)).To(Succeed()) @@ -327,7 +327,7 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. result, err := r.scaleDownControlPlane(context.Background(), controlPlane, controlPlane.Machines) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: scaleRequeueAfter})) controlPlaneMachines := clusterv1.MachineList{} g.Expect(fakeClient.List(context.Background(), &controlPlaneMachines)).To(Succeed()) diff --git a/controlplane/kubeadm/internal/controllers/upgrade_test.go b/controlplane/kubeadm/internal/controllers/upgrade_test.go index c841da58c0c2..031416049cde 100644 --- a/controlplane/kubeadm/internal/controllers/upgrade_test.go +++ b/controlplane/kubeadm/internal/controllers/upgrade_test.go @@ -105,7 +105,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { controlPlane.InjectTestManagementCluster(r.managementCluster) result, err := r.initializeControlPlane(ctx, controlPlane) - g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: initializationRequeueAfter})) g.Expect(err).ToNot(HaveOccurred()) // initial setup @@ -127,7 +127,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { needingUpgrade := collections.FromMachineList(initialMachine) controlPlane.Machines = needingUpgrade result, err = r.upgradeControlPlane(ctx, controlPlane, needingUpgrade) - g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: scaleRequeueAfter})) g.Expect(err).ToNot(HaveOccurred()) bothMachines := &clusterv1.MachineList{} g.Eventually(func(g Gomega) { @@ -170,7 +170,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { // run upgrade the second time, expect we scale down result, err = r.upgradeControlPlane(ctx, controlPlane, machinesRequireUpgrade) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: scaleRequeueAfter})) finalMachine := &clusterv1.MachineList{} g.Eventually(func(g Gomega) { g.Expect(env.List(ctx, finalMachine, client.InNamespace(cluster.Namespace))).To(Succeed()) @@ -261,7 +261,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleDown(t *testing.T) { controlPlane.Machines = needingUpgrade result, err = r.upgradeControlPlane(ctx, controlPlane, needingUpgrade) - g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: scaleRequeueAfter})) g.Expect(err).ToNot(HaveOccurred()) remainingMachines := &clusterv1.MachineList{} g.Expect(fakeClient.List(ctx, remainingMachines, client.InNamespace(cluster.Namespace))).To(Succeed()) diff --git a/exp/internal/controllers/machinepool_controller_phases_test.go b/exp/internal/controllers/machinepool_controller_phases_test.go index 8b89cd313611..2bf9c81e6762 100644 --- a/exp/internal/controllers/machinepool_controller_phases_test.go +++ b/exp/internal/controllers/machinepool_controller_phases_test.go @@ -147,7 +147,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) r.reconcilePhase(machinepool) @@ -190,7 +190,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) r.reconcilePhase(machinepool) g.Expect(machinepool.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePoolPhasePending)) @@ -230,7 +230,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) r.reconcilePhase(machinepool) g.Expect(machinepool.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePoolPhaseProvisioning)) @@ -286,7 +286,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) // Set ReadyReplicas machinepool.Status.Deprecated = &clusterv1.MachinePoolDeprecatedStatus{ @@ -358,7 +358,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) // Set ReadyReplicas machinepool.Status.Deprecated = &clusterv1.MachinePoolDeprecatedStatus{ @@ -408,7 +408,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) r.reconcilePhase(machinepool) g.Expect(machinepool.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePoolPhaseProvisioned)) @@ -461,7 +461,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) // Set ReadyReplicas machinepool.Status.Deprecated = &clusterv1.MachinePoolDeprecatedStatus{ @@ -531,7 +531,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) // Set ReadyReplicas machinepool.Status.Deprecated = &clusterv1.MachinePoolDeprecatedStatus{ @@ -607,7 +607,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) r.reconcilePhase(machinepool) g.Expect(machinepool.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePoolPhaseDeleting)) @@ -681,7 +681,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) r.reconcilePhase(machinePool) g.Expect(machinePool.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePoolPhaseRunning)) @@ -701,7 +701,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { // Reconcile again. The new bootstrap config should be used. res, err = doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) r.reconcilePhase(machinePool) g.Expect(*machinePool.Spec.Template.Spec.Bootstrap.DataSecretName).To(Equal("secret-data-new")) @@ -777,7 +777,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) r.reconcilePhase(machinePool) g.Expect(machinePool.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePoolPhaseRunning)) @@ -801,7 +801,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) // Controller should wait until bootstrap provider reports ready bootstrap config - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) r.reconcilePhase(machinePool) @@ -1858,7 +1858,7 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) r.reconcilePhase(machinepool) @@ -1926,7 +1926,7 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) r.reconcilePhase(machinepool) g.Expect(machinepool.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePoolPhaseRunning)) @@ -1977,7 +1977,7 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) r.reconcilePhase(machinepool) @@ -2024,7 +2024,7 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) r.reconcilePhase(machinepool) @@ -2093,7 +2093,7 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { res, err := doReconcile(ctx, scope, reconcileNormalFuncsForTest(r)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Requeue).To(BeFalse()) + g.Expect(res.RequeueAfter).To(Equal(time.Duration(0))) r.reconcilePhase(machinepool) diff --git a/exp/runtime/internal/controllers/extensionconfig_controller.go b/exp/runtime/internal/controllers/extensionconfig_controller.go index d85730c41c7a..277d7953a00b 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "strings" + "time" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -112,10 +113,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu var errs []error log := ctrl.LoggerFrom(ctx) - // Requeue events when the registry is not ready. + // RequeueAfter events when the registry is not ready. // The registry will become ready after it is 'warmed up' by warmupRunnable. if !r.RuntimeClient.IsReady() { - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{RequeueAfter: time.Second * 5}, nil } extensionConfig := &runtimev1.ExtensionConfig{} diff --git a/exp/runtime/internal/controllers/extensionconfig_controller_test.go b/exp/runtime/internal/controllers/extensionconfig_controller_test.go index 49bfba2b8a3d..7b111b32d63d 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller_test.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller_test.go @@ -94,8 +94,8 @@ func TestExtensionReconciler_Reconcile(t *testing.T) { // Attempt to reconcile. This will be an error as the registry has not been warmed up at this point. res, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(extensionConfig)}) g.Expect(err).ToNot(HaveOccurred()) - // If the registry isn't warm the reconcile loop will return Requeue: True - g.Expect(res.Requeue).To(BeTrue()) + // If the registry isn't warm the reconcile loop will return RequeueAfter is 5 seconds. + g.Expect(res.RequeueAfter).To(Equal(time.Second * 5)) }) t.Run("successful reconcile and discovery on ExtensionConfig create", func(*testing.T) { diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go index b92ce679409a..0e999f58696a 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -326,7 +326,8 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster if len(errList) > 0 { return ctrl.Result{}, kerrors.NewAggregate(errList) } - return reconcile.Result{Requeue: true}, nil + minNextCheck := minDuration(nextCheckTimes) + return reconcile.Result{RequeueAfter: minNextCheck}, nil } if m.Spec.Remediation.TriggerIf.UnhealthyInRange == "" { diff --git a/util/util.go b/util/util.go index 684ef3d07b8c..7b5879830355 100644 --- a/util/util.go +++ b/util/util.go @@ -696,9 +696,10 @@ func LowestNonZeroResult(i, j ctrl.Result) ctrl.Result { return j case j.IsZero(): return i - case i.Requeue: + // The Requeue filed is already deprecated, but we keep it for backward compatibility. + case i.Requeue: //nolint:staticcheck return i - case j.Requeue: + case j.Requeue: //nolint:staticcheck return j case i.RequeueAfter < j.RequeueAfter: return i