@@ -73,7 +73,7 @@ var _ = framework.SerialDescribe("workload rebalancer testing", func() {
73
73
// sort member clusters in increasing order
74
74
targetClusters = framework .ClusterNames ()[0 :2 ]
75
75
sort .Strings (targetClusters )
76
- taint = corev1.Taint {Key : "workload-rebalancer-test" , Effect : corev1 .TaintEffectNoExecute }
76
+ taint = corev1.Taint {Key : "workload-rebalancer-test-" + randomStr , Effect : corev1 .TaintEffectNoExecute }
77
77
78
78
deploy = helper .NewDeployment (namespace , deployName )
79
79
notExistDeploy = helper .NewDeployment (namespace , notExistDeployName )
@@ -176,7 +176,116 @@ var _ = framework.SerialDescribe("workload rebalancer testing", func() {
176
176
checkWorkloadRebalancerResult (expectedWorkloads )
177
177
})
178
178
179
- ginkgo .By ("step4: udpate WorkloadRebalancer spec workloads" , func () {
179
+ ginkgo .By ("step4: update WorkloadRebalancer spec workloads" , func () {
180
+ // update workload list from {deploy, clusterrole, notExistDeployObjRef} to {clusterroleObjRef, newAddedDeployObjRef}
181
+ updatedWorkloads := []appsv1alpha1.ObjectReference {clusterroleObjRef , newAddedDeployObjRef }
182
+ framework .UpdateWorkloadRebalancer (karmadaClient , rebalancerName , updatedWorkloads )
183
+
184
+ expectedWorkloads := []appsv1alpha1.ObservedWorkload {
185
+ {Workload : deployObjRef , Result : appsv1alpha1 .RebalanceSuccessful },
186
+ {Workload : newAddedDeployObjRef , Result : appsv1alpha1 .RebalanceSuccessful },
187
+ {Workload : clusterroleObjRef , Result : appsv1alpha1 .RebalanceSuccessful },
188
+ }
189
+ framework .WaitRebalancerObservedWorkloads (karmadaClient , rebalancerName , expectedWorkloads )
190
+ })
191
+ })
192
+ })
193
+
194
+ // 2. static weight scheduling
195
+ ginkgo .Context ("static weight schedule type" , func () {
196
+ ginkgo .BeforeEach (func () {
197
+ policy .Spec .Placement .ReplicaScheduling = helper .NewStaticWeightPolicyStrategy (targetClusters , []int64 {2 , 1 })
198
+ policy .Spec .Placement .ClusterTolerations = []corev1.Toleration {{
199
+ Key : taint .Key ,
200
+ Effect : taint .Effect ,
201
+ Operator : corev1 .TolerationOpExists ,
202
+ TolerationSeconds : pointer .Int64 (0 ),
203
+ }}
204
+ })
205
+
206
+ ginkgo .It ("reschedule when policy is static weight schedule type" , func () {
207
+ ginkgo .By ("step1: check first schedule result" , func () {
208
+ // after first schedule, deployment is assigned as 2:1 in target clusters and clusterrole propagated to each cluster.
209
+ framework .AssertBindingScheduledClusters (karmadaClient , namespace , deployBindingName , [][]string {targetClusters })
210
+ framework .WaitClusterRolePresentOnClustersFitWith (targetClusters , clusterroleName , func (_ * rbacv1.ClusterRole ) bool { return true })
211
+ })
212
+
213
+ ginkgo .By ("step2: add taints to cluster to mock cluster failure" , func () {
214
+ err := taintCluster (controlPlaneClient , targetClusters [0 ], taint )
215
+ gomega .Expect (err ).ShouldNot (gomega .HaveOccurred ())
216
+
217
+ framework .AssertBindingScheduledClusters (karmadaClient , namespace , deployBindingName , [][]string {targetClusters [1 :]})
218
+ framework .WaitGracefulEvictionTasksDone (karmadaClient , namespace , deployBindingName )
219
+
220
+ err = recoverTaintedCluster (controlPlaneClient , targetClusters [0 ], taint )
221
+ gomega .Expect (err ).ShouldNot (gomega .HaveOccurred ())
222
+ })
223
+
224
+ ginkgo .By ("step3: trigger a reschedule by WorkloadRebalancer" , func () {
225
+ framework .CreateWorkloadRebalancer (karmadaClient , rebalancer )
226
+ ginkgo .DeferCleanup (func () {
227
+ framework .RemoveWorkloadRebalancer (karmadaClient , rebalancerName )
228
+ })
229
+
230
+ // actual replicas propagation of deployment should reschedule back to `targetClusters`,
231
+ // which represents rebalancer changed deployment replicas propagation.
232
+ framework .AssertBindingScheduledClusters (karmadaClient , namespace , deployBindingName , [][]string {targetClusters })
233
+
234
+ expectedWorkloads := []appsv1alpha1.ObservedWorkload {
235
+ {Workload : deployObjRef , Result : appsv1alpha1 .RebalanceSuccessful },
236
+ {Workload : notExistDeployObjRef , Result : appsv1alpha1 .RebalanceFailed , Reason : appsv1alpha1 .RebalanceObjectNotFound },
237
+ {Workload : clusterroleObjRef , Result : appsv1alpha1 .RebalanceSuccessful },
238
+ }
239
+ checkWorkloadRebalancerResult (expectedWorkloads )
240
+ })
241
+
242
+ ginkgo .By ("step4: update WorkloadRebalancer spec workloads" , func () {
243
+ // update workload list from {deploy, clusterrole, notExistDeployObjRef} to {clusterroleObjRef, newAddedDeployObjRef}
244
+ updatedWorkloads := []appsv1alpha1.ObjectReference {clusterroleObjRef , newAddedDeployObjRef }
245
+ framework .UpdateWorkloadRebalancer (karmadaClient , rebalancerName , updatedWorkloads )
246
+
247
+ expectedWorkloads := []appsv1alpha1.ObservedWorkload {
248
+ {Workload : deployObjRef , Result : appsv1alpha1 .RebalanceSuccessful },
249
+ {Workload : newAddedDeployObjRef , Result : appsv1alpha1 .RebalanceSuccessful },
250
+ {Workload : clusterroleObjRef , Result : appsv1alpha1 .RebalanceSuccessful },
251
+ }
252
+ framework .WaitRebalancerObservedWorkloads (karmadaClient , rebalancerName , expectedWorkloads )
253
+ })
254
+ })
255
+ })
256
+
257
+ // 3. aggregated scheduling
258
+ ginkgo .Context ("aggregated schedule type" , func () {
259
+ ginkgo .BeforeEach (func () {
260
+ policy .Spec .Placement .ReplicaScheduling = & policyv1alpha1.ReplicaSchedulingStrategy {
261
+ ReplicaSchedulingType : policyv1alpha1 .ReplicaSchedulingTypeDivided ,
262
+ ReplicaDivisionPreference : policyv1alpha1 .ReplicaDivisionPreferenceAggregated ,
263
+ }
264
+ })
265
+
266
+ ginkgo .It ("reschedule when policy is aggregated schedule type" , func () {
267
+ ginkgo .By ("step1: check first schedule result" , func () {
268
+ // after first schedule, deployment is assigned to exactly one of the target clusters while clusterrole propagated to each cluster.
269
+ possibleScheduledClusters := getPossibleClustersInAggregatedScheduling (targetClusters )
270
+ framework .AssertBindingScheduledClusters (karmadaClient , namespace , deployBindingName , possibleScheduledClusters )
271
+ framework .WaitClusterRolePresentOnClustersFitWith (targetClusters , clusterroleName , func (_ * rbacv1.ClusterRole ) bool { return true })
272
+ })
273
+
274
+ ginkgo .By ("step2: trigger a reschedule by WorkloadRebalancer" , func () {
275
+ framework .CreateWorkloadRebalancer (karmadaClient , rebalancer )
276
+ ginkgo .DeferCleanup (func () {
277
+ framework .RemoveWorkloadRebalancer (karmadaClient , rebalancerName )
278
+ })
279
+
280
+ expectedWorkloads := []appsv1alpha1.ObservedWorkload {
281
+ {Workload : deployObjRef , Result : appsv1alpha1 .RebalanceSuccessful },
282
+ {Workload : notExistDeployObjRef , Result : appsv1alpha1 .RebalanceFailed , Reason : appsv1alpha1 .RebalanceObjectNotFound },
283
+ {Workload : clusterroleObjRef , Result : appsv1alpha1 .RebalanceSuccessful },
284
+ }
285
+ checkWorkloadRebalancerResult (expectedWorkloads )
286
+ })
287
+
288
+ ginkgo .By ("step3: update WorkloadRebalancer spec workloads" , func () {
180
289
// update workload list from {deploy, clusterrole, notExistDeployObjRef} to {clusterroleObjRef, newAddedDeployObjRef}
181
290
updatedWorkloads := []appsv1alpha1.ObjectReference {clusterroleObjRef , newAddedDeployObjRef }
182
291
framework .UpdateWorkloadRebalancer (karmadaClient , rebalancerName , updatedWorkloads )
@@ -200,3 +309,11 @@ func bindingHasRescheduled(spec workv1alpha2.ResourceBindingSpec, status workv1a
200
309
}
201
310
return true
202
311
}
312
+
313
+ func getPossibleClustersInAggregatedScheduling (targetClusters []string ) [][]string {
314
+ possibleScheduledClusters := make ([][]string , 0 )
315
+ for _ , cluster := range targetClusters {
316
+ possibleScheduledClusters = append (possibleScheduledClusters , []string {cluster })
317
+ }
318
+ return possibleScheduledClusters
319
+ }
0 commit comments