Skip to content

Commit 6d18bea

Browse files
Integrate UpdateStatus function
Signed-off-by: whitewindmills <[email protected]>
1 parent 2220124 commit 6d18bea

File tree

11 files changed

+223
-265
lines changed

11 files changed

+223
-265
lines changed

pkg/controllers/cronfederatedhpa/cronfederatedhpa_job.go

Lines changed: 76 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ import (
3232
"k8s.io/client-go/util/retry"
3333
"k8s.io/klog/v2"
3434
"sigs.k8s.io/controller-runtime/pkg/client"
35+
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
3536

3637
autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
3738
"github.com/karmada-io/karmada/pkg/metrics"
@@ -226,105 +227,101 @@ func (c *ScalingJob) addFailedExecutionHistory(
226227
cronFHPA *autoscalingv1alpha1.CronFederatedHPA, errMsg string) error {
227228
_, nextExecutionTime := c.scheduler.NextRun()
228229

229-
// Add success history record, return false if there is no such rule
230-
addFailedHistoryFunc := func() bool {
231-
exists := false
232-
for index, rule := range cronFHPA.Status.ExecutionHistories {
233-
if rule.RuleName != c.rule.Name {
234-
continue
235-
}
236-
failedExecution := autoscalingv1alpha1.FailedExecution{
237-
ScheduleTime: rule.NextExecutionTime,
238-
ExecutionTime: &metav1.Time{Time: time.Now()},
239-
Message: errMsg,
240-
}
241-
historyLimits := helper.GetCronFederatedHPAFailedHistoryLimits(c.rule)
242-
if len(rule.FailedExecutions) > historyLimits-1 {
243-
rule.FailedExecutions = rule.FailedExecutions[:historyLimits-1]
244-
}
245-
cronFHPA.Status.ExecutionHistories[index].FailedExecutions =
246-
append([]autoscalingv1alpha1.FailedExecution{failedExecution}, rule.FailedExecutions...)
247-
cronFHPA.Status.ExecutionHistories[index].NextExecutionTime = &metav1.Time{Time: nextExecutionTime}
248-
exists = true
249-
break
230+
// Add failed history record
231+
addFailedHistoryFunc := func(index int) {
232+
failedExecution := autoscalingv1alpha1.FailedExecution{
233+
ScheduleTime: cronFHPA.Status.ExecutionHistories[index].NextExecutionTime,
234+
ExecutionTime: &metav1.Time{Time: time.Now()},
235+
Message: errMsg,
250236
}
237+
historyLimits := helper.GetCronFederatedHPAFailedHistoryLimits(c.rule)
238+
if len(cronFHPA.Status.ExecutionHistories[index].FailedExecutions) > historyLimits-1 {
239+
cronFHPA.Status.ExecutionHistories[index].FailedExecutions = cronFHPA.Status.ExecutionHistories[index].FailedExecutions[:historyLimits-1]
240+
}
241+
cronFHPA.Status.ExecutionHistories[index].FailedExecutions =
242+
append([]autoscalingv1alpha1.FailedExecution{failedExecution}, cronFHPA.Status.ExecutionHistories[index].FailedExecutions...)
243+
cronFHPA.Status.ExecutionHistories[index].NextExecutionTime = &metav1.Time{Time: nextExecutionTime}
244+
}
251245

252-
return exists
246+
index := c.findExecutionHistory(cronFHPA.Status.ExecutionHistories)
247+
if index < 0 {
248+
// The failed history does not exist, it means the rule deleted, so just ignore it.
249+
return nil
253250
}
254251

255-
return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
256-
// If this history not exist, it means the rule is suspended or deleted, so just ignore it.
257-
if exists := addFailedHistoryFunc(); !exists {
252+
var operationResult controllerutil.OperationResult
253+
if err := retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
254+
operationResult, err = helper.UpdateStatus(context.Background(), c.client, cronFHPA, func() error {
255+
addFailedHistoryFunc(index)
258256
return nil
259-
}
257+
})
258+
return err
259+
}); err != nil {
260+
klog.Errorf("Failed to add failed history record to CronFederatedHPA(%s/%s): %v", cronFHPA.Namespace, cronFHPA.Name, err)
261+
return err
262+
}
260263

261-
updateErr := c.client.Status().Update(context.Background(), cronFHPA)
262-
if updateErr == nil {
263-
klog.V(4).Infof("CronFederatedHPA(%s/%s) status has been updated successfully", cronFHPA.Namespace, cronFHPA.Name)
264-
return nil
265-
}
264+
if operationResult == controllerutil.OperationResultUpdatedStatusOnly {
265+
klog.V(4).Infof("CronFederatedHPA(%s/%s) status has been updated successfully", cronFHPA.Namespace, cronFHPA.Name)
266+
}
266267

267-
updated := &autoscalingv1alpha1.CronFederatedHPA{}
268-
if err = c.client.Get(context.Background(), client.ObjectKey{Namespace: cronFHPA.Namespace, Name: cronFHPA.Name}, updated); err == nil {
269-
cronFHPA = updated
270-
} else {
271-
klog.Errorf("Get CronFederatedHPA(%s/%s) failed: %v", cronFHPA.Namespace, cronFHPA.Name, err)
268+
return nil
269+
}
270+
271+
// findExecutionHistory finds the history record, returns -1 if there is no such rule.
272+
func (c *ScalingJob) findExecutionHistory(histories []autoscalingv1alpha1.ExecutionHistory) int {
273+
for index, rule := range histories {
274+
if rule.RuleName == c.rule.Name {
275+
return index
272276
}
273-
return updateErr
274-
})
277+
}
278+
return -1
275279
}
276280

277281
func (c *ScalingJob) addSuccessExecutionHistory(
278282
cronFHPA *autoscalingv1alpha1.CronFederatedHPA,
279283
appliedReplicas, appliedMinReplicas, appliedMaxReplicas *int32) error {
280284
_, nextExecutionTime := c.scheduler.NextRun()
281285

282-
// Add success history record, return false if there is no such rule
283-
addSuccessHistoryFunc := func() bool {
284-
exists := false
285-
for index, rule := range cronFHPA.Status.ExecutionHistories {
286-
if rule.RuleName != c.rule.Name {
287-
continue
288-
}
289-
successExecution := autoscalingv1alpha1.SuccessfulExecution{
290-
ScheduleTime: rule.NextExecutionTime,
291-
ExecutionTime: &metav1.Time{Time: time.Now()},
292-
AppliedReplicas: appliedReplicas,
293-
AppliedMaxReplicas: appliedMaxReplicas,
294-
AppliedMinReplicas: appliedMinReplicas,
295-
}
296-
historyLimits := helper.GetCronFederatedHPASuccessHistoryLimits(c.rule)
297-
if len(rule.SuccessfulExecutions) > historyLimits-1 {
298-
rule.SuccessfulExecutions = rule.SuccessfulExecutions[:historyLimits-1]
299-
}
300-
cronFHPA.Status.ExecutionHistories[index].SuccessfulExecutions =
301-
append([]autoscalingv1alpha1.SuccessfulExecution{successExecution}, rule.SuccessfulExecutions...)
302-
cronFHPA.Status.ExecutionHistories[index].NextExecutionTime = &metav1.Time{Time: nextExecutionTime}
303-
exists = true
304-
break
286+
// Add success history record
287+
addSuccessHistoryFunc := func(index int) {
288+
successExecution := autoscalingv1alpha1.SuccessfulExecution{
289+
ScheduleTime: cronFHPA.Status.ExecutionHistories[index].NextExecutionTime,
290+
ExecutionTime: &metav1.Time{Time: time.Now()},
291+
AppliedReplicas: appliedReplicas,
292+
AppliedMaxReplicas: appliedMaxReplicas,
293+
AppliedMinReplicas: appliedMinReplicas,
294+
}
295+
historyLimits := helper.GetCronFederatedHPASuccessHistoryLimits(c.rule)
296+
if len(cronFHPA.Status.ExecutionHistories[index].SuccessfulExecutions) > historyLimits-1 {
297+
cronFHPA.Status.ExecutionHistories[index].SuccessfulExecutions = cronFHPA.Status.ExecutionHistories[index].SuccessfulExecutions[:historyLimits-1]
305298
}
299+
cronFHPA.Status.ExecutionHistories[index].SuccessfulExecutions =
300+
append([]autoscalingv1alpha1.SuccessfulExecution{successExecution}, cronFHPA.Status.ExecutionHistories[index].SuccessfulExecutions...)
301+
cronFHPA.Status.ExecutionHistories[index].NextExecutionTime = &metav1.Time{Time: nextExecutionTime}
302+
}
306303

307-
return exists
304+
index := c.findExecutionHistory(cronFHPA.Status.ExecutionHistories)
305+
if index < 0 {
306+
// The success history does not exist, it means the rule deleted, so just ignore it.
307+
return nil
308308
}
309309

310-
return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
311-
// If this history not exist, it means the rule deleted, so just ignore it.
312-
if exists := addSuccessHistoryFunc(); !exists {
310+
var operationResult controllerutil.OperationResult
311+
if err := retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
312+
operationResult, err = helper.UpdateStatus(context.Background(), c.client, cronFHPA, func() error {
313+
addSuccessHistoryFunc(index)
313314
return nil
314-
}
315+
})
316+
return err
317+
}); err != nil {
318+
klog.Errorf("Failed to add success history record to CronFederatedHPA(%s/%s): %v", cronFHPA.Namespace, cronFHPA.Name, err)
319+
return err
320+
}
315321

316-
updateErr := c.client.Status().Update(context.Background(), cronFHPA)
317-
if updateErr == nil {
318-
klog.V(4).Infof("CronFederatedHPA(%s/%s) status has been updated successfully", cronFHPA.Namespace, cronFHPA.Name)
319-
return err
320-
}
322+
if operationResult == controllerutil.OperationResultUpdatedStatusOnly {
323+
klog.V(4).Infof("CronFederatedHPA(%s/%s) status has been updated successfully", cronFHPA.Namespace, cronFHPA.Name)
324+
}
321325

322-
updated := &autoscalingv1alpha1.CronFederatedHPA{}
323-
if err = c.client.Get(context.Background(), client.ObjectKey{Namespace: cronFHPA.Namespace, Name: cronFHPA.Name}, updated); err == nil {
324-
cronFHPA = updated
325-
} else {
326-
klog.Errorf("Get CronFederatedHPA(%s/%s) failed: %v", cronFHPA.Namespace, cronFHPA.Name, err)
327-
}
328-
return updateErr
329-
})
326+
return nil
330327
}

pkg/controllers/execution/execution_controller.go

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -258,18 +258,11 @@ func (c *Controller) updateAppliedCondition(work *workv1alpha1.Work, status meta
258258
}
259259

260260
return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
261-
meta.SetStatusCondition(&work.Status.Conditions, newWorkAppliedCondition)
262-
updateErr := c.Status().Update(context.TODO(), work)
263-
if updateErr == nil {
261+
_, err = helper.UpdateStatus(context.Background(), c.Client, work, func() error {
262+
meta.SetStatusCondition(&work.Status.Conditions, newWorkAppliedCondition)
264263
return nil
265-
}
266-
updated := &workv1alpha1.Work{}
267-
if err = c.Get(context.TODO(), client.ObjectKey{Namespace: work.Namespace, Name: work.Name}, updated); err == nil {
268-
work = updated
269-
} else {
270-
klog.Errorf("Failed to get the updated work(%s/%s), err: %v", work.Namespace, work.Name, err)
271-
}
272-
return updateErr
264+
})
265+
return err
273266
})
274267
}
275268

pkg/controllers/federatedresourcequota/federated_resource_quota_status_controller.go

Lines changed: 4 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -160,20 +160,11 @@ func (c *StatusController) collectQuotaStatus(quota *policyv1alpha1.FederatedRes
160160
}
161161

162162
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
163-
quota.Status = *quotaStatus
164-
updateErr := c.Status().Update(context.TODO(), quota)
165-
if updateErr == nil {
163+
_, err = helper.UpdateStatus(context.Background(), c.Client, quota, func() error {
164+
quota.Status = *quotaStatus
166165
return nil
167-
}
168-
169-
updated := &policyv1alpha1.FederatedResourceQuota{}
170-
if err = c.Get(context.TODO(), client.ObjectKey{Namespace: quota.Namespace, Name: quota.Name}, updated); err == nil {
171-
quota = updated
172-
} else {
173-
klog.Errorf("Failed to get updated federatedResourceQuota(%s): %v", klog.KObj(quota).String(), err)
174-
}
175-
176-
return updateErr
166+
})
167+
return err
177168
})
178169
}
179170

pkg/controllers/mcs/service_import_controller.go

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ import (
3232

3333
"github.com/karmada-io/karmada/pkg/events"
3434
"github.com/karmada-io/karmada/pkg/util"
35+
"github.com/karmada-io/karmada/pkg/util/helper"
3536
"github.com/karmada-io/karmada/pkg/util/names"
3637
)
3738

@@ -153,24 +154,15 @@ func (c *ServiceImportController) updateServiceStatus(svcImport *mcsv1alpha1.Ser
153154
}
154155

155156
err := retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
156-
derivedService.Status = corev1.ServiceStatus{
157-
LoadBalancer: corev1.LoadBalancerStatus{
158-
Ingress: ingress,
159-
},
160-
}
161-
updateErr := c.Status().Update(context.TODO(), derivedService)
162-
if updateErr == nil {
157+
_, err = helper.UpdateStatus(context.Background(), c.Client, derivedService, func() error {
158+
derivedService.Status = corev1.ServiceStatus{
159+
LoadBalancer: corev1.LoadBalancerStatus{
160+
Ingress: ingress,
161+
},
162+
}
163163
return nil
164-
}
165-
166-
updated := &corev1.Service{}
167-
if err = c.Get(context.TODO(), client.ObjectKey{Namespace: derivedService.Namespace, Name: derivedService.Name}, updated); err == nil {
168-
derivedService = updated
169-
} else {
170-
klog.Errorf("Failed to get updated service %s/%s: %v", derivedService.Namespace, derivedService.Name, err)
171-
}
172-
173-
return updateErr
164+
})
165+
return err
174166
})
175167

176168
if err != nil {

pkg/controllers/multiclusterservice/endpointslice_dispatch_controller.go

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -127,18 +127,11 @@ func (c *EndpointsliceDispatchController) updateEndpointSliceDispatched(mcs *net
127127
}
128128

129129
return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
130-
meta.SetStatusCondition(&mcs.Status.Conditions, EndpointSliceCollected)
131-
updateErr := c.Status().Update(context.TODO(), mcs)
132-
if updateErr == nil {
130+
_, err = helper.UpdateStatus(context.Background(), c.Client, mcs, func() error {
131+
meta.SetStatusCondition(&mcs.Status.Conditions, EndpointSliceCollected)
133132
return nil
134-
}
135-
updated := &networkingv1alpha1.MultiClusterService{}
136-
if err = c.Get(context.TODO(), client.ObjectKey{Namespace: mcs.Namespace, Name: mcs.Name}, updated); err == nil {
137-
mcs = updated
138-
} else {
139-
klog.Errorf("Failed to get updated MultiClusterService %s/%s: %v", mcs.Namespace, mcs.Name, err)
140-
}
141-
return updateErr
133+
})
134+
return err
142135
})
143136
}
144137

pkg/controllers/multiclusterservice/mcs_controller.go

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -525,18 +525,11 @@ func (c *MCSController) updateMultiClusterServiceStatus(mcs *networkingv1alpha1.
525525
}
526526

527527
return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
528-
meta.SetStatusCondition(&mcs.Status.Conditions, serviceAppliedCondition)
529-
updateErr := c.Status().Update(context.TODO(), mcs)
530-
if updateErr == nil {
528+
_, err = helper.UpdateStatus(context.Background(), c.Client, mcs, func() error {
529+
meta.SetStatusCondition(&mcs.Status.Conditions, serviceAppliedCondition)
531530
return nil
532-
}
533-
updated := &networkingv1alpha1.MultiClusterService{}
534-
if err = c.Get(context.TODO(), client.ObjectKey{Namespace: mcs.Namespace, Name: mcs.Name}, updated); err == nil {
535-
mcs = updated
536-
} else {
537-
klog.Errorf("Failed to get updated MultiClusterService %s/%s: %v", mcs.Namespace, mcs.Name, err)
538-
}
539-
return updateErr
531+
})
532+
return err
540533
})
541534
}
542535

pkg/controllers/remediation/remedy_controller.go

Lines changed: 7 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,8 @@ package remediation
1818

1919
import (
2020
"context"
21-
"reflect"
2221

2322
apierrors "k8s.io/apimachinery/pkg/api/errors"
24-
"k8s.io/apimachinery/pkg/types"
2523
"k8s.io/client-go/util/retry"
2624
"k8s.io/klog/v2"
2725
controllerruntime "sigs.k8s.io/controller-runtime"
@@ -33,6 +31,7 @@ import (
3331
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
3432
remedyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/remedy/v1alpha1"
3533
"github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
34+
"github.com/karmada-io/karmada/pkg/util/helper"
3635
)
3736

3837
// ControllerName is the controller name that will be used when reporting events.
@@ -70,26 +69,13 @@ func (c *RemedyController) Reconcile(ctx context.Context, req controllerruntime.
7069
}
7170

7271
actions := calculateActions(clusterRelatedRemedies, cluster)
73-
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
74-
if reflect.DeepEqual(actions, cluster.Status.RemedyActions) {
72+
if err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
73+
_, err = helper.UpdateStatus(ctx, c.Client, cluster, func() error {
74+
cluster.Status.RemedyActions = actions
7575
return nil
76-
}
77-
cluster.Status.RemedyActions = actions
78-
updateErr := c.Client.Status().Update(ctx, cluster)
79-
if updateErr == nil {
80-
return nil
81-
}
82-
83-
updatedCluster := &clusterv1alpha1.Cluster{}
84-
err = c.Client.Get(ctx, types.NamespacedName{Name: cluster.Name}, updatedCluster)
85-
if err == nil {
86-
cluster = updatedCluster
87-
} else {
88-
klog.Errorf("Failed to get updated cluster(%s): %v", cluster.Name, err)
89-
}
90-
return updateErr
91-
})
92-
if err != nil {
76+
})
77+
return err
78+
}); err != nil {
9379
klog.Errorf("Failed to sync cluster(%s) remedy actions: %v", cluster.Name, err)
9480
return controllerruntime.Result{}, err
9581
}

pkg/controllers/status/cluster_status_controller.go

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -277,19 +277,11 @@ func (c *ClusterStatusController) updateStatusIfNeeded(cluster *clusterv1alpha1.
277277
if !equality.Semantic.DeepEqual(cluster.Status, currentClusterStatus) {
278278
klog.V(4).Infof("Start to update cluster status: %s", cluster.Name)
279279
err := retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
280-
cluster.Status = currentClusterStatus
281-
updateErr := c.Status().Update(context.TODO(), cluster)
282-
if updateErr == nil {
280+
_, err = helper.UpdateStatus(context.Background(), c.Client, cluster, func() error {
281+
cluster.Status = currentClusterStatus
283282
return nil
284-
}
285-
286-
updated := &clusterv1alpha1.Cluster{}
287-
if err = c.Get(context.TODO(), client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, updated); err == nil {
288-
cluster = updated
289-
} else {
290-
klog.Errorf("Failed to get updated cluster %s: %v", cluster.Name, err)
291-
}
292-
return updateErr
283+
})
284+
return err
293285
})
294286
if err != nil {
295287
klog.Errorf("Failed to update health status of the member cluster: %v, err is : %v", cluster.Name, err)

0 commit comments

Comments
 (0)