Skip to content

Commit 5ce4192

Browse files
author
Ryan Zhang
committed
adjust the code
Signed-off-by: Ryan Zhang <[email protected]>
1 parent e9e49a7 commit 5ce4192

File tree

11 files changed

+1076
-325
lines changed

11 files changed

+1076
-325
lines changed

apis/placement/v1beta1/commons.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@ const (
109109
ParentNamespaceLabel = fleetPrefix + "parent-placement-namespace"
110110

111111
// CRPGenerationAnnotation indicates the generation of the CRP from which an object is derived or last updated.
112+
// TODO: rename this variable
112113
CRPGenerationAnnotation = fleetPrefix + "CRP-generation"
113114

114115
// EnvelopeConfigMapAnnotation indicates the configmap is an envelope configmap containing resources we need to apply to the member cluster instead of the configMap itself.

pkg/controllers/clusterresourceplacement/controller.go

Lines changed: 189 additions & 175 deletions
Large diffs are not rendered by default.

pkg/controllers/clusterresourceplacement/placement_status.go

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -97,9 +97,9 @@ func appendFailedToScheduleResourcePlacementStatuses(
9797
return allRPS
9898
}
9999

100-
// determineExpectedCRPAndResourcePlacementStatusCondType determines the expected condition types for the CRP and resource placement statuses
100+
// determineExpectedPlacementAndResourcePlacementStatusCondType determines the expected condition types for the CRP and resource placement statuses
101101
// given the currently in-use apply strategy.
102-
func determineExpectedCRPAndResourcePlacementStatusCondType(placementObj fleetv1beta1.PlacementObj) []condition.ResourceCondition {
102+
func determineExpectedPlacementAndResourcePlacementStatusCondType(placementObj fleetv1beta1.PlacementObj) []condition.ResourceCondition {
103103
placementSpec := placementObj.GetPlacementSpec()
104104
switch {
105105
case placementSpec.Strategy.ApplyStrategy == nil:
@@ -113,6 +113,7 @@ func determineExpectedCRPAndResourcePlacementStatusCondType(placementObj fleetv1
113113

114114
// appendScheduledResourcePlacementStatuses appends the resource placement statuses for the
115115
// scheduled clusters to the list of all resource placement statuses.
116+
// it returns the updated list of resource placement statuses.
116117
func (r *Reconciler) appendScheduledResourcePlacementStatuses(
117118
ctx context.Context,
118119
allRPS []fleetv1beta1.ResourcePlacementStatus,
@@ -196,8 +197,9 @@ func (r *Reconciler) appendScheduledResourcePlacementStatuses(
196197
return allRPS, rpsSetCondTypeCounter, nil
197198
}
198199

199-
// setCRPConditions sets the CRP conditions based on the resource placement statuses.
200-
func setCRPConditions(
200+
// TODO: make this work with RP
201+
// setPlacementConditions currently sets the CRP conditions based on the resource placement statuses.
202+
func setPlacementConditions(
201203
placementObj fleetv1beta1.PlacementObj,
202204
allRPS []fleetv1beta1.ResourcePlacementStatus,
203205
rpsSetCondTypeCounter [condition.TotalCondition][condition.TotalConditionStatus]int,

pkg/controllers/clusterresourceplacement/placement_status_test.go

Lines changed: 119 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -962,7 +962,6 @@ func TestSetPlacementStatus(t *testing.T) {
962962
},
963963
},
964964
},
965-
// TODO special handling when selected cluster is 0
966965
{
967966
name: "the placement scheduling succeeded when numberOfClusters is 0",
968967
policy: &fleetv1beta1.PlacementPolicy{
@@ -1039,7 +1038,7 @@ func TestSetPlacementStatus(t *testing.T) {
10391038
},
10401039
},
10411040
},
1042-
want: false,
1041+
want: true,
10431042
wantStatus: &fleetv1beta1.PlacementStatus{
10441043
SelectedResources: selectedResources,
10451044
ObservedResourceIndex: "0",
@@ -1055,6 +1054,124 @@ func TestSetPlacementStatus(t *testing.T) {
10551054
PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{},
10561055
},
10571056
},
1057+
1058+
{
1059+
name: "the placement scheduling succeeded when numberOfClusters is 0 but user actually wants to select some",
1060+
policy: &fleetv1beta1.PlacementPolicy{
1061+
PlacementType: fleetv1beta1.PickNPlacementType,
1062+
NumberOfClusters: ptr.To(int32(3)),
1063+
},
1064+
latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{
1065+
ObjectMeta: metav1.ObjectMeta{
1066+
Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0),
1067+
Labels: map[string]string{
1068+
fleetv1beta1.PolicyIndexLabel: "0",
1069+
fleetv1beta1.IsLatestSnapshotLabel: "true",
1070+
fleetv1beta1.PlacementTrackingLabel: testCRPName,
1071+
},
1072+
OwnerReferences: []metav1.OwnerReference{
1073+
{
1074+
Name: testCRPName,
1075+
BlockOwnerDeletion: ptr.To(true),
1076+
Controller: ptr.To(true),
1077+
APIVersion: fleetAPIVersion,
1078+
Kind: "ClusterResourcePlacement",
1079+
},
1080+
},
1081+
Annotations: map[string]string{
1082+
fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(0),
1083+
},
1084+
Generation: 1,
1085+
},
1086+
Status: fleetv1beta1.SchedulingPolicySnapshotStatus{
1087+
ObservedCRPGeneration: crpGeneration,
1088+
Conditions: []metav1.Condition{
1089+
{
1090+
Status: metav1.ConditionFalse,
1091+
Type: string(fleetv1beta1.PolicySnapshotScheduled),
1092+
Reason: "Scheduled not met",
1093+
Message: "message",
1094+
ObservedGeneration: 1,
1095+
},
1096+
},
1097+
ClusterDecisions: []fleetv1beta1.ClusterDecision{
1098+
{
1099+
ClusterName: "member-1",
1100+
Selected: false,
1101+
Reason: "filtered",
1102+
},
1103+
{
1104+
ClusterName: "member-2",
1105+
Selected: false,
1106+
Reason: "filtered",
1107+
},
1108+
},
1109+
},
1110+
},
1111+
latestResourceSnapshot: &fleetv1beta1.ClusterResourceSnapshot{
1112+
ObjectMeta: metav1.ObjectMeta{
1113+
Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0),
1114+
Labels: map[string]string{
1115+
fleetv1beta1.ResourceIndexLabel: "0",
1116+
fleetv1beta1.PlacementTrackingLabel: testCRPName,
1117+
fleetv1beta1.IsLatestSnapshotLabel: "true",
1118+
},
1119+
OwnerReferences: []metav1.OwnerReference{
1120+
{
1121+
Name: testCRPName,
1122+
BlockOwnerDeletion: ptr.To(true),
1123+
Controller: ptr.To(true),
1124+
APIVersion: fleetAPIVersion,
1125+
Kind: "ClusterResourcePlacement",
1126+
},
1127+
},
1128+
Annotations: map[string]string{
1129+
fleetv1beta1.ResourceGroupHashAnnotation: "hash",
1130+
fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "0",
1131+
},
1132+
},
1133+
},
1134+
want: false,
1135+
wantStatus: &fleetv1beta1.PlacementStatus{
1136+
SelectedResources: selectedResources,
1137+
ObservedResourceIndex: "0",
1138+
Conditions: []metav1.Condition{
1139+
{
1140+
Status: metav1.ConditionFalse,
1141+
Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType),
1142+
Reason: "Scheduled not met",
1143+
ObservedGeneration: crpGeneration,
1144+
LastTransitionTime: metav1.NewTime(currentTime),
1145+
},
1146+
},
1147+
PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{
1148+
{
1149+
Conditions: []metav1.Condition{
1150+
{
1151+
Status: metav1.ConditionFalse,
1152+
Type: string(fleetv1beta1.ResourceScheduledConditionType),
1153+
Reason: condition.ResourceScheduleFailedReason,
1154+
ObservedGeneration: crpGeneration,
1155+
LastTransitionTime: metav1.NewTime(currentTime),
1156+
Message: "filtered",
1157+
},
1158+
},
1159+
},
1160+
{
1161+
Conditions: []metav1.Condition{
1162+
{
1163+
Status: metav1.ConditionFalse,
1164+
Type: string(fleetv1beta1.ResourceScheduledConditionType),
1165+
Reason: condition.ResourceScheduleFailedReason,
1166+
ObservedGeneration: crpGeneration,
1167+
LastTransitionTime: metav1.NewTime(currentTime),
1168+
Message: "filtered",
1169+
},
1170+
},
1171+
},
1172+
},
1173+
},
1174+
},
10581175
{
10591176
name: "the placement is completed with clusterResourceBindings and works",
10601177
policy: &fleetv1beta1.PlacementPolicy{

pkg/controllers/rollout/controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -386,7 +386,6 @@ func (r *Reconciler) pickBindingsToRoll(
386386
bindingSpec := binding.GetBindingSpec()
387387
switch bindingSpec.State {
388388
case placementv1beta1.BindingStateUnscheduled:
389-
// Use updated interface-based bindingutils functions
390389
if bindingutils.HasBindingFailed(binding) {
391390
klog.V(2).InfoS("Found a failed to be ready unscheduled binding", "placement", placementKObj, "binding", bindingKObj)
392391
} else if !bindingutils.IsBindingDiffReported(binding) {
@@ -501,6 +500,7 @@ func determineBindingsToUpdate(
501500
readyBindings, canBeReadyBindings, canBeUnavailableBindings []placementv1beta1.BindingObj,
502501
) ([]toBeUpdatedBinding, []toBeUpdatedBinding) {
503502
toBeUpdatedBindingList := make([]toBeUpdatedBinding, 0)
503+
// TODO: Fix the bug that we don't shrink to zero when there are bindings that are not ready yet.
504504
// calculate the max number of bindings that can be unavailable according to user specified maxUnavailable
505505
maxNumberToRemove := calculateMaxToRemove(placementObj, targetNumber, readyBindings, canBeUnavailableBindings)
506506
// we can still update the bindings that are failed to apply already regardless of the maxNumberToRemove

pkg/scheduler/framework/framework.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -384,7 +384,7 @@ var removeFinalizerAndUpdate = func(ctx context.Context, hubClient client.Client
384384
controllerutil.RemoveFinalizer(binding, placementv1beta1.SchedulerBindingCleanupFinalizer)
385385
err := hubClient.Update(ctx, binding, &client.UpdateOptions{})
386386
if err == nil {
387-
klog.V(2).InfoS("Removed scheduler CRB cleanup finalizer", "binding", klog.KObj(binding))
387+
klog.V(2).InfoS("Removed scheduler binding cleanup finalizer", "binding", klog.KObj(binding))
388388
}
389389
return err
390390
}
@@ -763,7 +763,7 @@ func (f *framework) updatePolicySnapshotStatusFromBindings(
763763
policyRef := klog.KObj(policy)
764764

765765
// Retrieve the corresponding CRP generation.
766-
observedCRPGeneration, err := annotations.ExtractObservedCRPGenerationFromPolicySnapshot(policy)
766+
observedCRPGeneration, err := annotations.ExtractObservedPlacementGenerationFromPolicySnapshot(policy)
767767
if err != nil {
768768
klog.ErrorS(err, "Failed to retrieve CRP generation from annotation", "clusterSchedulingPolicySnapshot", policyRef)
769769
return controller.NewUnexpectedBehaviorError(err)
@@ -1344,7 +1344,7 @@ func (f *framework) updatePolicySnapshotStatusForPickFixedPlacementType(
13441344
policyRef := klog.KObj(policy)
13451345

13461346
// Retrieve the corresponding CRP generation.
1347-
observedCRPGeneration, err := annotations.ExtractObservedCRPGenerationFromPolicySnapshot(policy)
1347+
observedCRPGeneration, err := annotations.ExtractObservedPlacementGenerationFromPolicySnapshot(policy)
13481348
if err != nil {
13491349
klog.ErrorS(err, "Failed to retrieve CRP generation from annotation", "clusterSchedulingPolicySnapshot", policyRef)
13501350
return controller.NewUnexpectedBehaviorError(err)

pkg/utils/annotations/annotations.go

Lines changed: 28 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -42,34 +42,44 @@ func ExtractNumOfClustersFromPolicySnapshot(policy fleetv1beta1.PolicySnapshotOb
4242
return numOfClusters, nil
4343
}
4444

45-
// ExtractObservedCRPGenerationFromPolicySnapshot extracts the observed CRP generation from policySnapshot.
46-
func ExtractObservedCRPGenerationFromPolicySnapshot(policy fleetv1beta1.PolicySnapshotObj) (int64, error) {
47-
crpGenerationStr, ok := policy.GetAnnotations()[fleetv1beta1.CRPGenerationAnnotation]
48-
if !ok {
49-
return 0, fmt.Errorf("cannot find annotation %s", fleetv1beta1.CRPGenerationAnnotation)
45+
// ExtractSubindexFromResourceSnapshot is a helper function to extract subindex from ResourceSnapshot objects.
46+
func ExtractSubindexFromResourceSnapshot(snapshot fleetv1beta1.ResourceSnapshotObj) (doesExist bool, subindex int, err error) {
47+
annotations := snapshot.GetAnnotations()
48+
if annotations == nil {
49+
return false, 0, nil
5050
}
5151

52-
// Cast the annotation to an integer; throw an error if the cast cannot be completed or the value is negative.
53-
observedCRPGeneration, err := strconv.Atoi(crpGenerationStr)
54-
if err != nil || observedCRPGeneration < 0 {
55-
return 0, fmt.Errorf("invalid annotation %s: %s is not a valid value: %w", fleetv1beta1.CRPGenerationAnnotation, crpGenerationStr, err)
52+
subindexStr, exists := annotations[fleetv1beta1.SubindexOfResourceSnapshotAnnotation]
53+
if !exists || subindexStr == "" {
54+
return false, 0, nil
5655
}
5756

58-
return int64(observedCRPGeneration), nil
57+
subindex, err = strconv.Atoi(subindexStr)
58+
if err != nil {
59+
return false, 0, fmt.Errorf("invalid subindex annotation value %q: %w", subindexStr, err)
60+
}
61+
62+
if subindex < 0 {
63+
return false, 0, fmt.Errorf("subindex cannot be negative: %d", subindex)
64+
}
65+
66+
return true, subindex, nil
5967
}
6068

61-
// ExtractSubindexFromClusterResourceSnapshot extracts the subindex value from the annotations of a clusterResourceSnapshot.
62-
func ExtractSubindexFromClusterResourceSnapshot(snapshot fleetv1beta1.ResourceSnapshotObj) (doesExist bool, subindex int, err error) {
63-
subindexStr, ok := snapshot.GetAnnotations()[fleetv1beta1.SubindexOfResourceSnapshotAnnotation]
69+
// ExtractObservedPlacementGenerationFromPolicySnapshot extracts the observed placement generation from policySnapshot.
70+
func ExtractObservedPlacementGenerationFromPolicySnapshot(policy fleetv1beta1.PolicySnapshotObj) (int64, error) {
71+
placementGenerationStr, ok := policy.GetAnnotations()[fleetv1beta1.CRPGenerationAnnotation]
6472
if !ok {
65-
return false, -1, nil
73+
return 0, fmt.Errorf("cannot find annotation %s", fleetv1beta1.CRPGenerationAnnotation)
6674
}
67-
subindex, err = strconv.Atoi(subindexStr)
68-
if err != nil || subindex < 0 {
69-
return true, -1, fmt.Errorf("invalid annotation %s: %s is invalid: %w", fleetv1beta1.SubindexOfResourceSnapshotAnnotation, subindexStr, err)
75+
76+
// Cast the annotation to an integer; throw an error if the cast cannot be completed or the value is negative.
77+
observedplacementGeneration, err := strconv.Atoi(placementGenerationStr)
78+
if err != nil || observedplacementGeneration < 0 {
79+
return 0, fmt.Errorf("invalid annotation %s: %s is not a valid value: %w", fleetv1beta1.CRPGenerationAnnotation, placementGenerationStr, err)
7080
}
7181

72-
return true, subindex, nil
82+
return int64(observedplacementGeneration), nil
7383
}
7484

7585
// ExtractNumberOfResourceSnapshotsFromResourceSnapshot extracts the number of clusterResourceSnapshots in a group from the master clusterResourceSnapshot.
@@ -125,27 +135,3 @@ func ParseResourceGroupHashFromAnnotation(resourceSnapshot fleetv1beta1.Resource
125135
}
126136
return v, nil
127137
}
128-
129-
// ExtractSubindexFromResourceSnapshot is a helper function to extract subindex from ResourceSnapshot objects.
130-
func ExtractSubindexFromResourceSnapshot(snapshot fleetv1beta1.ResourceSnapshotObj) (doesExist bool, subindex int, err error) {
131-
annotations := snapshot.GetAnnotations()
132-
if annotations == nil {
133-
return false, 0, nil
134-
}
135-
136-
subindexStr, exists := annotations[fleetv1beta1.SubindexOfResourceSnapshotAnnotation]
137-
if !exists || subindexStr == "" {
138-
return false, 0, nil
139-
}
140-
141-
subindex, err = strconv.Atoi(subindexStr)
142-
if err != nil {
143-
return false, 0, fmt.Errorf("invalid subindex annotation value %q: %w", subindexStr, err)
144-
}
145-
146-
if subindex < 0 {
147-
return false, 0, fmt.Errorf("subindex cannot be negative: %d", subindex)
148-
}
149-
150-
return true, subindex, nil
151-
}

0 commit comments

Comments
 (0)