Skip to content

Commit 8e5a594

Browse files
author
Ryan Zhang
committed
address comments
Signed-off-by: Ryan Zhang <[email protected]>
1 parent ef3501e commit 8e5a594

File tree

6 files changed

+89
-87
lines changed

6 files changed

+89
-87
lines changed

hack/Azure/property-based-scheduling.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -424,6 +424,6 @@ All the AKS clusters in the resource group will be removed.
424424
Congrats! We hope that property-based scheduling (preview) has improved your overall Fleet experience. If you have any questions, feedback, or concerns, please raise [a GitHub issue](https://github.com/Azure/fleet/issues).
425425

426426
Aside from property-based scheduling, Fleet offers many other scheduling features that are useful in a
427-
multi-cluster environment; check out the [How-to Guide: Using the Fleet `ClusterResourcePlacement` API](https://github.com/Azure/fleet/tree/main/docs/howtos/crp.md) for more information.
427+
multi-cluster environment; check out the [How-to Guide: Using the Fleet `ClusterResourcePlacement` API](https://kubefleet.dev/docs/how-tos/crp/) for more information.
428428

429-
You can also review Fleet's [source code](https://github.com/Azure/fleet) or review its [documentation](https://github.com/Azure/fleet/tree/main/docs) on GitHub.
429+
You can also review Fleet's [source code](https://github.com/kubefleet-dev/kubefleet/) or review its [documentation](https://kubefleet.dev/docs/) on GitHub.

pkg/controllers/clusterresourceplacement/controller_test.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -749,20 +749,20 @@ func TestGetOrCreateClusterSchedulingPolicySnapshot(t *testing.T) {
749749
}
750750
got, err := r.getOrCreateSchedulingPolicySnapshot(ctx, crp, int(limit))
751751
if err != nil {
752-
t.Fatalf("failed to getOrCreateClusterSchedulingPolicySnapshot: %v", err)
752+
t.Fatalf("failed to getOrCreateSchedulingPolicySnapshot: %v", err)
753753
}
754754

755755
// Convert interface to concrete type for comparison
756756
gotSnapshot, ok := got.(*fleetv1beta1.ClusterSchedulingPolicySnapshot)
757757
if !ok {
758-
t.Fatalf("expected *ClusterSchedulingPolicySnapshot, got %T", got)
758+
t.Fatalf("getOrCreateSchedulingPolicySnapshot() got %T, want *ClusterSchedulingPolicySnapshot", got)
759759
}
760760

761761
options := []cmp.Option{
762762
cmpopts.IgnoreFields(metav1.ObjectMeta{}, "ResourceVersion"),
763763
}
764764
if diff := cmp.Diff(tc.wantPolicySnapshots[tc.wantLatestSnapshotIndex], *gotSnapshot, options...); diff != "" {
765-
t.Errorf("getOrCreateClusterSchedulingPolicySnapshot() mismatch (-want, +got):\n%s", diff)
765+
t.Errorf("getOrCreateSchedulingPolicySnapshot() mismatch (-want, +got):\n%s", diff)
766766
}
767767
clusterPolicySnapshotList := &fleetv1beta1.ClusterSchedulingPolicySnapshotList{}
768768
if err := fakeClient.List(ctx, clusterPolicySnapshotList); err != nil {
@@ -2712,10 +2712,10 @@ func TestGetOrCreateClusterResourceSnapshot(t *testing.T) {
27122712
resourceSnapshotResourceSizeLimit = tc.selectedResourcesSizeLimit
27132713
res, got, err := r.getOrCreateResourceSnapshot(ctx, crp, tc.envelopeObjCount, tc.resourceSnapshotSpec, int(limit))
27142714
if err != nil {
2715-
t.Fatalf("failed to handle getOrCreateClusterResourceSnapshot: %v", err)
2715+
t.Fatalf("failed to handle getOrCreateResourceSnapshot: %v", err)
27162716
}
27172717
if res.Requeue != tc.wantRequeue {
2718-
t.Fatalf("getOrCreateClusterResourceSnapshot() got Requeue %v, want %v", res.Requeue, tc.wantRequeue)
2718+
t.Fatalf("getOrCreateResourceSnapshot() got Requeue %v, want %v", res.Requeue, tc.wantRequeue)
27192719
}
27202720

27212721
options := []cmp.Option{
@@ -2726,7 +2726,7 @@ func TestGetOrCreateClusterResourceSnapshot(t *testing.T) {
27262726
}
27272727
if tc.wantRequeue {
27282728
if res.RequeueAfter <= 0 {
2729-
t.Fatalf("getOrCreateClusterResourceSnapshot() got RequeueAfter %v, want greater than zero value", res.RequeueAfter)
2729+
t.Fatalf("getOrCreateResourceSnapshot() got RequeueAfter %v, want greater than zero value", res.RequeueAfter)
27302730
}
27312731
}
27322732
annotationOption := cmp.Transformer("NormalizeAnnotations", func(m map[string]string) map[string]string {

pkg/controllers/clusterresourceplacement/placement_status.go

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ func (r *Reconciler) appendScheduledResourcePlacementStatuses(
191191
// The allRPS slice has been pre-allocated, so the append call will never produce a new
192192
// slice; here, however, Fleet will still return the old slice just in case.
193193
allRPS = append(allRPS, *rps)
194-
klog.V(2).InfoS("Populated the resource placement status for the scheduled cluster", "clusterResourcePlacement", klog.KObj(placementObj), "cluster", clusterDecision.ClusterName, "resourcePlacementStatus", rps)
194+
klog.V(2).InfoS("Populated the resource placement status for the scheduled cluster", "placement", klog.KObj(placementObj), "cluster", clusterDecision.ClusterName, "resourcePlacementStatus", rps)
195195
}
196196

197197
return allRPS, rpsSetCondTypeCounter, nil
@@ -254,9 +254,10 @@ func setPlacementConditions(
254254
}
255255
}
256256

257-
klog.V(2).InfoS("Populated the placement conditions", "clusterResourcePlacement", klog.KObj(placementObj))
257+
klog.V(2).InfoS("Populated the placement conditions", "placement", klog.KObj(placementObj))
258258
}
259259

260+
// TODO: make this work with RP
260261
func (r *Reconciler) buildClusterResourceBindings(ctx context.Context, placementObj fleetv1beta1.PlacementObj, latestSchedulingPolicySnapshot fleetv1beta1.PolicySnapshotObj) (map[string]fleetv1beta1.BindingObj, error) {
261262
// List all bindings derived from the CRP.
262263
bindingList := &fleetv1beta1.ClusterResourceBindingList{}
@@ -280,7 +281,7 @@ func (r *Reconciler) buildClusterResourceBindings(ctx context.Context, placement
280281

281282
if len(bindings[i].Spec.TargetCluster) == 0 {
282283
err := fmt.Errorf("targetCluster is empty on clusterResourceBinding %s", bindings[i].Name)
283-
klog.ErrorS(controller.NewUnexpectedBehaviorError(err), "Found an invalid clusterResourceBinding and skipping it when building placement status", "clusterResourceBinding", klog.KObj(&bindings[i]), "clusterResourcePlacement", crpKObj)
284+
klog.ErrorS(controller.NewUnexpectedBehaviorError(err), "Found an invalid clusterResourceBinding and skipping it when building placement status", "clusterResourceBinding", klog.KObj(&bindings[i]), "placement", crpKObj)
284285
continue
285286
}
286287

@@ -294,6 +295,7 @@ func (r *Reconciler) buildClusterResourceBindings(ctx context.Context, placement
294295
return res, nil
295296
}
296297

298+
// TODO: make this work with RP
297299
// findClusterResourceSnapshotIndexForBindings finds the resource snapshot index for each binding.
298300
// It returns a map which maps the target cluster name to the resource snapshot index string.
299301
func (r *Reconciler) findClusterResourceSnapshotIndexForBindings(
@@ -306,19 +308,19 @@ func (r *Reconciler) findClusterResourceSnapshotIndexForBindings(
306308
for targetCluster, binding := range bindingMap {
307309
resourceSnapshotName := binding.GetBindingSpec().ResourceSnapshotName
308310
if resourceSnapshotName == "" {
309-
klog.InfoS("Empty resource snapshot name found in binding, controller might observe in-between state", "binding", klog.KObj(binding), "clusterResourcePlacement", crpKObj)
311+
klog.InfoS("Empty resource snapshot name found in binding, controller might observe in-between state", "binding", klog.KObj(binding), "placement", crpKObj)
310312
res[targetCluster] = ""
311313
continue
312314
}
313315
resourceSnapshot := &fleetv1beta1.ClusterResourceSnapshot{}
314316
if err := r.Client.Get(ctx, types.NamespacedName{Name: resourceSnapshotName, Namespace: ""}, resourceSnapshot); err != nil {
315317
if apierrors.IsNotFound(err) {
316318
klog.InfoS("The resource snapshot specified in binding is not found, probably deleted due to revision history limit",
317-
"resourceSnapshotName", resourceSnapshotName, "binding", klog.KObj(binding), "clusterResourcePlacement", crpKObj)
319+
"resourceSnapshotName", resourceSnapshotName, "binding", klog.KObj(binding), "placement", crpKObj)
318320
res[targetCluster] = ""
319321
continue
320322
}
321-
klog.ErrorS(err, "Failed to get the cluster resource snapshot specified in binding", "resourceSnapshotName", resourceSnapshotName, "binding", klog.KObj(binding), "clusterResourcePlacement", crpKObj)
323+
klog.ErrorS(err, "Failed to get the cluster resource snapshot specified in binding", "resourceSnapshotName", resourceSnapshotName, "binding", klog.KObj(binding), "placement", crpKObj)
322324
return res, controller.NewAPIServerError(true, err)
323325
}
324326
res[targetCluster] = resourceSnapshot.GetLabels()[fleetv1beta1.ResourceIndexLabel]
@@ -374,7 +376,7 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster(
374376
// The binding uses an out of date resource snapshot, and the RolloutStarted condition is
375377
// set to True, Unknown, or has become stale. Fleet might be observing an in-between state.
376378
meta.SetStatusCondition(&status.Conditions, condition.RolloutStartedCondition.UnknownResourceConditionPerCluster(placementObj.GetGeneration()))
377-
klog.V(5).InfoS("The cluster resource binding has a stale RolloutStarted condition, or it links to an out of date resource snapshot yet has the RolloutStarted condition set to True or Unknown status", "clusterResourceBinding", klog.KObj(binding), "clusterResourcePlacement", klog.KObj(placementObj))
379+
klog.V(5).InfoS("The cluster resource binding has a stale RolloutStarted condition, or it links to an out of date resource snapshot yet has the RolloutStarted condition set to True or Unknown status", "clusterResourceBinding", klog.KObj(binding), "placement", klog.KObj(placementObj))
378380
res[condition.RolloutStartedCondition] = metav1.ConditionUnknown
379381
return res
380382
default:
@@ -384,7 +386,7 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster(
384386
}
385387
}
386388

387-
// setResourcePlacementStatusBasedOnBinding sets the cluster's resource placement status based on its corresponding binding status.
389+
// setResourcePlacementStatusBasedOnBinding sets the placement status based on its corresponding binding status.
388390
// It updates the status object in place and tracks the set status for each relevant condition type in setStatusByCondType map provided.
389391
func setResourcePlacementStatusBasedOnBinding(
390392
placementObj fleetv1beta1.PlacementObj,
@@ -398,7 +400,7 @@ func setResourcePlacementStatusBasedOnBinding(
398400
if !condition.IsConditionStatusTrue(bindingCond, binding.GetGeneration()) &&
399401
!condition.IsConditionStatusFalse(bindingCond, binding.GetGeneration()) {
400402
meta.SetStatusCondition(&status.Conditions, i.UnknownResourceConditionPerCluster(placementObj.GetGeneration()))
401-
klog.V(5).InfoS("Find an unknown condition", "bindingCond", bindingCond, "clusterResourceBinding", klog.KObj(binding), "clusterResourcePlacement", klog.KObj(placementObj))
403+
klog.V(5).InfoS("Find an unknown condition", "bindingCond", bindingCond, "clusterResourceBinding", klog.KObj(binding), "placement", klog.KObj(placementObj))
402404
setStatusByCondType[i] = metav1.ConditionUnknown
403405
break
404406
}

0 commit comments

Comments
 (0)