Skip to content

Commit 92db2d1

Browse files
authored
Merge pull request #6910 from RainbowMango/pr_add_comments_to_descheduler
Add more comments and some readability improvement to karmada-descheduler
2 parents 600c534 + 96937f7 commit 92db2d1

File tree

3 files changed

+47
-33
lines changed

3 files changed

+47
-33
lines changed

pkg/descheduler/core/helper.go

Lines changed: 21 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ import (
3434
// SchedulingResultHelper is a helper to wrap the ResourceBinding and its target cluster result.
3535
type SchedulingResultHelper struct {
3636
*workv1alpha2.ResourceBinding
37-
TargetClusters []*TargetClusterWrapper
37+
TargetClusterReplicaStatus []*ClusterReplicaStatus
3838
}
3939

4040
// NewSchedulingResultHelper returns a new SchedulingResultHelper based on ResourceBinding.
@@ -43,22 +43,22 @@ func NewSchedulingResultHelper(binding *workv1alpha2.ResourceBinding) *Schedulin
4343
readyReplicas := getReadyReplicas(binding)
4444
for i := range binding.Spec.Clusters {
4545
targetCluster := &binding.Spec.Clusters[i]
46-
targetClusterHelper := &TargetClusterWrapper{
46+
targetClusterReplicaStatus := &ClusterReplicaStatus{
4747
ClusterName: targetCluster.Name,
4848
Spec: targetCluster.Replicas,
4949
}
5050
if ready, exist := readyReplicas[targetCluster.Name]; exist {
51-
targetClusterHelper.Ready = ready
51+
targetClusterReplicaStatus.Ready = ready
5252
} else {
53-
targetClusterHelper.Ready = estimatorclient.UnauthenticReplica
53+
targetClusterReplicaStatus.Ready = estimatorclient.UnauthenticReplica
5454
}
55-
h.TargetClusters = append(h.TargetClusters, targetClusterHelper)
55+
h.TargetClusterReplicaStatus = append(h.TargetClusterReplicaStatus, targetClusterReplicaStatus)
5656
}
5757
return h
5858
}
5959

6060
// FillUnschedulableReplicas will detect the unschedulable replicas of member cluster by calling
61-
// unschedulable replica estimators and fill the unschedulable field of TargetClusterWrapper.
61+
// unschedulable replica estimators and fill the unschedulable field of ClusterReplicaStatus.
6262
func (h *SchedulingResultHelper) FillUnschedulableReplicas(unschedulableThreshold time.Duration) {
6363
reference := &h.Spec.Resource
6464
undesiredClusters, undesiredClusterNames := h.GetUndesiredClusters()
@@ -96,10 +96,10 @@ func (h *SchedulingResultHelper) FillUnschedulableReplicas(unschedulableThreshol
9696
}
9797

9898
// GetUndesiredClusters returns the cluster which of ready replicas are not reach the ready ones.
99-
func (h *SchedulingResultHelper) GetUndesiredClusters() ([]*TargetClusterWrapper, []string) {
100-
var clusters []*TargetClusterWrapper
99+
func (h *SchedulingResultHelper) GetUndesiredClusters() ([]*ClusterReplicaStatus, []string) {
100+
var clusters []*ClusterReplicaStatus
101101
var names []string
102-
for _, cluster := range h.TargetClusters {
102+
for _, cluster := range h.TargetClusterReplicaStatus {
103103
if cluster.Ready < cluster.Spec {
104104
clusters = append(clusters, cluster)
105105
names = append(names, cluster.ClusterName)
@@ -108,12 +108,18 @@ func (h *SchedulingResultHelper) GetUndesiredClusters() ([]*TargetClusterWrapper
108108
return clusters, names
109109
}
110110

111-
// TargetClusterWrapper is a wrapper to wrap the target cluster name, spec replicas,
112-
// ready replicas and unschedulable replicas.
113-
type TargetClusterWrapper struct {
114-
ClusterName string
115-
Spec int32
116-
Ready int32
111+
// ClusterReplicaStatus represents the replicas status of a workload on a particular cluster.
112+
type ClusterReplicaStatus struct {
113+
// ClusterName is the name of the member cluster targeted by ResourceBinding.
114+
ClusterName string
115+
116+
// Spec is the desired replicas assigned to this cluster (from ResourceBinding.spec.clusters).
117+
Spec int32
118+
119+
// Ready is the number of replicas currently reported as ready in this cluster.
120+
Ready int32
121+
122+
// Unschedulable is the estimated count of replicas that cannot be scheduled in this cluster.
117123
Unschedulable int32
118124
}
119125

pkg/descheduler/core/helper_test.go

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ func TestNewSchedulingResultHelper(t *testing.T) {
5959
},
6060
},
6161
expected: &SchedulingResultHelper{
62-
TargetClusters: []*TargetClusterWrapper{
62+
TargetClusterReplicaStatus: []*ClusterReplicaStatus{
6363
{ClusterName: "cluster1", Spec: 3, Ready: 2},
6464
{ClusterName: "cluster2", Spec: 2, Ready: 1},
6565
},
@@ -88,7 +88,7 @@ func TestNewSchedulingResultHelper(t *testing.T) {
8888
},
8989
},
9090
expected: &SchedulingResultHelper{
91-
TargetClusters: []*TargetClusterWrapper{
91+
TargetClusterReplicaStatus: []*ClusterReplicaStatus{
9292
{ClusterName: "cluster1", Spec: 3, Ready: 2},
9393
{ClusterName: "cluster2", Spec: 2, Ready: client.UnauthenticReplica},
9494
},
@@ -112,7 +112,7 @@ func TestSchedulingResultHelper_FillUnschedulableReplicas(t *testing.T) {
112112
name string
113113
helper *SchedulingResultHelper
114114
mockEstimator *mockUnschedulableReplicaEstimator
115-
expected []*TargetClusterWrapper
115+
expected []*ClusterReplicaStatus
116116
expectedErrLog string
117117
}{
118118
{
@@ -128,13 +128,13 @@ func TestSchedulingResultHelper_FillUnschedulableReplicas(t *testing.T) {
128128
},
129129
},
130130
},
131-
TargetClusters: []*TargetClusterWrapper{
131+
TargetClusterReplicaStatus: []*ClusterReplicaStatus{
132132
{ClusterName: "cluster1", Spec: 3, Ready: 2},
133133
{ClusterName: "cluster2", Spec: 2, Ready: 1},
134134
},
135135
},
136136
mockEstimator: &mockUnschedulableReplicaEstimator{},
137-
expected: []*TargetClusterWrapper{
137+
expected: []*ClusterReplicaStatus{
138138
{ClusterName: "cluster1", Spec: 3, Ready: 2, Unschedulable: 1},
139139
{ClusterName: "cluster2", Spec: 2, Ready: 1, Unschedulable: 1},
140140
},
@@ -152,12 +152,12 @@ func TestSchedulingResultHelper_FillUnschedulableReplicas(t *testing.T) {
152152
},
153153
},
154154
},
155-
TargetClusters: []*TargetClusterWrapper{
155+
TargetClusterReplicaStatus: []*ClusterReplicaStatus{
156156
{ClusterName: "cluster1", Spec: 3, Ready: 2},
157157
},
158158
},
159159
mockEstimator: &mockUnschedulableReplicaEstimator{shouldError: true},
160-
expected: []*TargetClusterWrapper{
160+
expected: []*ClusterReplicaStatus{
161161
{ClusterName: "cluster1", Spec: 3, Ready: 2, Unschedulable: 0},
162162
},
163163
expectedErrLog: "Max cluster unschedulable replicas error: mock error",
@@ -175,13 +175,13 @@ func TestSchedulingResultHelper_FillUnschedulableReplicas(t *testing.T) {
175175
},
176176
},
177177
},
178-
TargetClusters: []*TargetClusterWrapper{
178+
TargetClusterReplicaStatus: []*ClusterReplicaStatus{
179179
{ClusterName: "cluster1", Spec: 3, Ready: 2},
180180
{ClusterName: "cluster2", Spec: 2, Ready: 1},
181181
},
182182
},
183183
mockEstimator: &mockUnschedulableReplicaEstimator{unauthenticCluster: "cluster2"},
184-
expected: []*TargetClusterWrapper{
184+
expected: []*ClusterReplicaStatus{
185185
{ClusterName: "cluster1", Spec: 3, Ready: 2, Unschedulable: 1},
186186
{ClusterName: "cluster2", Spec: 2, Ready: 1, Unschedulable: 0},
187187
},
@@ -195,8 +195,8 @@ func TestSchedulingResultHelper_FillUnschedulableReplicas(t *testing.T) {
195195

196196
tt.helper.FillUnschedulableReplicas(time.Minute)
197197

198-
if !reflect.DeepEqual(tt.helper.TargetClusters, tt.expected) {
199-
t.Errorf("FillUnschedulableReplicas() = %v, want %v", tt.helper.TargetClusters, tt.expected)
198+
if !reflect.DeepEqual(tt.helper.TargetClusterReplicaStatus, tt.expected) {
199+
t.Errorf("FillUnschedulableReplicas() = %v, want %v", tt.helper.TargetClusterReplicaStatus, tt.expected)
200200
}
201201
})
202202
}
@@ -206,19 +206,19 @@ func TestSchedulingResultHelper_GetUndesiredClusters(t *testing.T) {
206206
tests := []struct {
207207
name string
208208
helper *SchedulingResultHelper
209-
expectedClusters []*TargetClusterWrapper
209+
expectedClusters []*ClusterReplicaStatus
210210
expectedNames []string
211211
}{
212212
{
213213
name: "Mixed desired and undesired clusters",
214214
helper: &SchedulingResultHelper{
215-
TargetClusters: []*TargetClusterWrapper{
215+
TargetClusterReplicaStatus: []*ClusterReplicaStatus{
216216
{ClusterName: "cluster1", Spec: 3, Ready: 2},
217217
{ClusterName: "cluster2", Spec: 2, Ready: 2},
218218
{ClusterName: "cluster3", Spec: 4, Ready: 1},
219219
},
220220
},
221-
expectedClusters: []*TargetClusterWrapper{
221+
expectedClusters: []*ClusterReplicaStatus{
222222
{ClusterName: "cluster1", Spec: 3, Ready: 2},
223223
{ClusterName: "cluster3", Spec: 4, Ready: 1},
224224
},
@@ -227,7 +227,7 @@ func TestSchedulingResultHelper_GetUndesiredClusters(t *testing.T) {
227227
{
228228
name: "All clusters desired",
229229
helper: &SchedulingResultHelper{
230-
TargetClusters: []*TargetClusterWrapper{
230+
TargetClusterReplicaStatus: []*ClusterReplicaStatus{
231231
{ClusterName: "cluster1", Spec: 2, Ready: 2},
232232
{ClusterName: "cluster2", Spec: 3, Ready: 3},
233233
},

pkg/descheduler/descheduler.go

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -199,19 +199,25 @@ func (d *Descheduler) worker(key util.QueueKey) error {
199199
}
200200

201201
h.FillUnschedulableReplicas(d.unschedulableThreshold)
202-
klog.V(3).Infof("Unschedulable result of resource(%s): %v", namespacedName, h.TargetClusters)
202+
klog.V(3).Infof("Unschedulable result of resource(%s): %v", namespacedName, h.TargetClusterReplicaStatus)
203203

204204
return d.updateScheduleResult(h)
205205
}
206206

207+
// updateScheduleResult adjusts per-cluster replica targets based on unschedulable estimates
208+
// and updates the corresponding ResourceBinding. It also records an event summarizing
209+
// how many replicas were descheduled per cluster and in total.
207210
func (d *Descheduler) updateScheduleResult(h *core.SchedulingResultHelper) error {
208211
unschedulableSum := int32(0)
209212
message := descheduleSuccessMessage
210213
binding := h.ResourceBinding.DeepCopy()
211-
for i, cluster := range h.TargetClusters {
214+
for i, cluster := range h.TargetClusterReplicaStatus {
215+
// Only act when estimator reports some unschedulable replicas and the
216+
// desired replicas (Spec) can cover that reduction.
212217
if cluster.Unschedulable > 0 && cluster.Spec >= cluster.Unschedulable {
213218
target := cluster.Spec - cluster.Unschedulable
214-
// The target cluster replicas must not be less than ready replicas.
219+
// Ensure we never reduce below the currently ready replicas (safety cap),
220+
// but only if ready <= desired Spec to avoid increasing above Spec.
215221
if target < cluster.Ready && cluster.Ready <= cluster.Spec {
216222
target = cluster.Ready
217223
}
@@ -222,12 +228,14 @@ func (d *Descheduler) updateScheduleResult(h *core.SchedulingResultHelper) error
222228
}
223229
}
224230
if unschedulableSum == 0 {
231+
// Nothing changed; skip API update and event recording logic.
225232
return nil
226233
}
227234
message += fmt.Sprintf(", %d total descheduled replica(s)", unschedulableSum)
228235

229236
var err error
230237
defer func() {
238+
// Always record an event for observability, including failure reasons if any.
231239
d.recordDescheduleResultEventForResourceBinding(binding, message, err)
232240
}()
233241

0 commit comments

Comments
 (0)