Skip to content

Commit 25ce9e3

Browse files
Sync'd changes (#7)
Signed-off-by: michaelawyu <[email protected]> Co-authored-by: Ryan Zhang <[email protected]>
1 parent 1e06b40 commit 25ce9e3

File tree

9 files changed

+119
-29
lines changed

9 files changed

+119
-29
lines changed

go.mod

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,8 +80,8 @@ require (
8080
github.com/go-openapi/swag v0.23.0 // indirect
8181
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
8282
github.com/gogo/protobuf v1.3.2 // indirect
83-
github.com/golang-jwt/jwt/v4 v4.5.1 // indirect
84-
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
83+
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
84+
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
8585
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
8686
github.com/golang/protobuf v1.5.4 // indirect
8787
github.com/google/gnostic-models v0.6.8 // indirect

go.sum

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -137,10 +137,10 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
137137
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
138138
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
139139
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
140-
github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
141-
github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
142-
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
143-
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
140+
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
141+
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
142+
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
143+
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
144144
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
145145
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
146146
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=

pkg/controllers/workgenerator/controller.go

Lines changed: 22 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1541,16 +1541,31 @@ func (r *Reconciler) SetupWithManager(mgr controllerruntime.Manager) error {
15411541
newResourceSnapshot := newWork.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel]
15421542
if oldResourceSnapshot == "" || newResourceSnapshot == "" {
15431543
klog.ErrorS(controller.NewUnexpectedBehaviorError(errors.New("found an invalid work without parent-resource-snapshot-index")),
1544-
"Could not find the parent resource snapshot index label", "oldWork", klog.KObj(oldWork), "oldResourceSnapshotLabelValue", oldResourceSnapshot,
1545-
"newWork", klog.KObj(newWork), "newResourceSnapshotLabelValue", newResourceSnapshot)
1544+
"Could not find the parent resource snapshot index label", "oldWork", klog.KObj(oldWork), "oldWorkLabels", oldWork.Labels,
1545+
"newWork", klog.KObj(newWork), "newWorkLabels", newWork.Labels)
15461546
return
15471547
}
1548-
// There is an edge case that, the work spec is the same but from different resourceSnapshots.
1549-
// WorkGenerator will update the work because of the label changes, but the generation is the same.
1550-
// When the normal update happens, the controller will set the applied condition as false and wait
1551-
// until the work condition has been changed.
1548+
oldClusterResourceOverrideSnapshotHash := oldWork.Annotations[fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation]
1549+
newClusterResourceOverrideSnapshotHash := newWork.Annotations[fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation]
1550+
oldResourceOverrideSnapshotHash := oldWork.Annotations[fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation]
1551+
newResourceOverrideSnapshotHash := newWork.Annotations[fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation]
1552+
if oldClusterResourceOverrideSnapshotHash == "" || newClusterResourceOverrideSnapshotHash == "" ||
1553+
oldResourceOverrideSnapshotHash == "" || newResourceOverrideSnapshotHash == "" {
1554+
klog.ErrorS(controller.NewUnexpectedBehaviorError(errors.New("found an invalid work without override-snapshot-hash")),
1555+
"Could not find the override-snapshot-hash annotation", "oldWork", klog.KObj(oldWork), "oldWorkAnnotations", oldWork.Annotations,
1556+
"newWork", klog.KObj(newWork), "newWorkAnnotations", newWork.Annotations)
1557+
return
1558+
}
1559+
1560+
// There is an edge case that, the work spec is the same but from different resourceSnapshots or resourceOverrideSnapshots.
1561+
// WorkGenerator will update the work because of the label/annotation changes, but the generation is the same.
1562+
// When the override update happens, the rollout controller will set the applied condition as false
1563+
// and wait for the workGenerator to update it. The workGenerator will wait for the work status change,
1564+
// but here the status didn't change as the work's spec didn't change
15521565
// In this edge case, we need to requeue the binding to update the binding status.
1553-
if oldResourceSnapshot == newResourceSnapshot {
1566+
if oldResourceSnapshot == newResourceSnapshot &&
1567+
oldClusterResourceOverrideSnapshotHash == newClusterResourceOverrideSnapshotHash &&
1568+
oldResourceOverrideSnapshotHash == newResourceOverrideSnapshotHash {
15541569
klog.V(2).InfoS("The work applied or available condition stayed as true, no need to reconcile", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork))
15551570
return
15561571
}

test/e2e/enveloped_object_placement_test.go

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,7 @@ var _ = Describe("Process objects with generate name", Ordered, func() {
353353
// Create the namespace with both name and generate name set.
354354
ns := appNamespace()
355355
ns.GenerateName = nsGenerateName
356-
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Namespace)
356+
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name)
357357

358358
// Create an envelope config map.
359359
cm := &corev1.ConfigMap{
@@ -492,11 +492,8 @@ var _ = Describe("Process objects with generate name", Ordered, func() {
492492
})
493493

494494
AfterAll(func() {
495-
// Remove the CRP.
496-
cleanupCRP(crpName)
497-
498-
// Clean the placed resources.
499-
cleanWorkResourcesOnCluster(allMemberClusters[0])
495+
By(fmt.Sprintf("deleting placement %s and related resources", crpName))
496+
ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters)
500497
})
501498
})
502499

@@ -649,7 +646,7 @@ func readEnvelopTestManifests() {
649646
// createWrappedResourcesForEnvelopTest creates some enveloped resources on the hub cluster for testing purposes.
650647
func createWrappedResourcesForEnvelopTest() {
651648
ns := appNamespace()
652-
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Namespace)
649+
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name)
653650
// modify the configMap according to the namespace
654651
testConfigMap.Namespace = ns.Name
655652
Expect(hubClient.Create(ctx, &testConfigMap)).To(Succeed(), "Failed to create config map %s", testConfigMap.Name)

test/e2e/placement_cro_test.go

Lines changed: 39 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ var _ = Context("creating clusterResourceOverride (selecting all clusters) to ov
141141
}, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cro as expected", crpName)
142142
})
143143

144-
It("should update CRP status on demand as expected", func() {
144+
It("should refresh the CRP status even as there is no change on the resources", func() {
145145
wantCRONames := []string{fmt.Sprintf(placementv1alpha1.OverrideSnapshotNameFmt, croName, 1)}
146146
crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", wantCRONames, nil)
147147
Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName)
@@ -155,6 +155,44 @@ var _ = Context("creating clusterResourceOverride (selecting all clusters) to ov
155155
checkIfOverrideAnnotationsOnAllMemberClusters(true, want)
156156
})
157157

158+
It("update cro attached to this CRP only and no updates on the namespace", func() {
159+
Eventually(func() error {
160+
cro := &placementv1alpha1.ClusterResourceOverride{}
161+
if err := hubClient.Get(ctx, types.NamespacedName{Name: croName}, cro); err != nil {
162+
return err
163+
}
164+
cro.Spec.Policy.OverrideRules = append(cro.Spec.Policy.OverrideRules, placementv1alpha1.OverrideRule{
165+
ClusterSelector: &placementv1beta1.ClusterSelector{
166+
ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{
167+
{
168+
LabelSelector: &metav1.LabelSelector{
169+
MatchLabels: map[string]string{
170+
"invalid-key": "invalid-value",
171+
},
172+
},
173+
},
174+
},
175+
},
176+
OverrideType: placementv1alpha1.DeleteOverrideType,
177+
})
178+
return hubClient.Update(ctx, cro)
179+
}, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cro as expected", crpName)
180+
})
181+
182+
It("should update CRP status on demand as expected", func() {
183+
wantCRONames := []string{fmt.Sprintf(placementv1alpha1.OverrideSnapshotNameFmt, croName, 2)}
184+
crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", wantCRONames, nil)
185+
Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName)
186+
})
187+
188+
// This check will ignore the annotation of resources.
189+
It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters)
190+
191+
It("should have new override annotation value on the placed resources", func() {
192+
want := map[string]string{croTestAnnotationKey: croTestAnnotationValue1}
193+
checkIfOverrideAnnotationsOnAllMemberClusters(true, want)
194+
})
195+
158196
It("delete the cro attached to this CRP", func() {
159197
cleanupClusterResourceOverride(croName)
160198
})

test/e2e/placement_ro_test.go

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -154,6 +154,46 @@ var _ = Context("creating resourceOverride (selecting all clusters) to override
154154
want := map[string]string{roTestAnnotationKey: roTestAnnotationValue1}
155155
checkIfOverrideAnnotationsOnAllMemberClusters(false, want)
156156
})
157+
158+
It("update ro attached to this CRP only and no update on the configmap itself", func() {
159+
Eventually(func() error {
160+
ro := &placementv1alpha1.ResourceOverride{}
161+
if err := hubClient.Get(ctx, types.NamespacedName{Name: roName, Namespace: roNamespace}, ro); err != nil {
162+
return err
163+
}
164+
ro.Spec.Policy.OverrideRules = append(ro.Spec.Policy.OverrideRules, placementv1alpha1.OverrideRule{
165+
ClusterSelector: &placementv1beta1.ClusterSelector{
166+
ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{
167+
{
168+
LabelSelector: &metav1.LabelSelector{
169+
MatchLabels: map[string]string{
170+
"invalid-key": "invalid-value",
171+
},
172+
},
173+
},
174+
},
175+
},
176+
OverrideType: placementv1alpha1.DeleteOverrideType,
177+
})
178+
return hubClient.Update(ctx, ro)
179+
}, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", crpName)
180+
})
181+
182+
It("should refresh the CRP status even as there is no change on the resources", func() {
183+
wantRONames := []placementv1beta1.NamespacedName{
184+
{Namespace: roNamespace, Name: fmt.Sprintf(placementv1alpha1.OverrideSnapshotNameFmt, roName, 2)},
185+
}
186+
crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", nil, wantRONames)
187+
Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName)
188+
})
189+
190+
// This check will ignore the annotation of resources.
191+
It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters)
192+
193+
It("should have override annotations on the configmap", func() {
194+
want := map[string]string{roTestAnnotationKey: roTestAnnotationValue1}
195+
checkIfOverrideAnnotationsOnAllMemberClusters(false, want)
196+
})
157197
})
158198

159199
var _ = Context("creating resourceOverride with multiple jsonPatchOverrides to override configMap", Ordered, func() {

test/e2e/utils_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -679,7 +679,7 @@ func createWorkResource(name, namespace string) {
679679
// createWorkResources creates some resources on the hub cluster for testing purposes.
680680
func createWorkResources() {
681681
ns := appNamespace()
682-
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Namespace)
682+
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name)
683683

684684
configMap := appConfigMap()
685685
Expect(hubClient.Create(ctx, &configMap)).To(Succeed(), "Failed to create config map %s", configMap.Name)
@@ -692,7 +692,7 @@ func cleanupWorkResources() {
692692

693693
func cleanWorkResourcesOnCluster(cluster *framework.Cluster) {
694694
ns := appNamespace()
695-
Expect(client.IgnoreNotFound(cluster.KubeClient.Delete(ctx, &ns))).To(Succeed(), "Failed to delete namespace %s", ns.Namespace)
695+
Expect(client.IgnoreNotFound(cluster.KubeClient.Delete(ctx, &ns))).To(Succeed(), "Failed to delete namespace %s", ns.Name)
696696

697697
workResourcesRemovedActual := workNamespaceRemovedFromClusterActual(cluster)
698698
Eventually(workResourcesRemovedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove work resources from %s cluster", cluster.ClusterName)

test/upgrade/before/scenarios_test.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ var _ = Describe("CRP with non-trackable resources, all available (before upgrad
8383
BeforeAll(func() {
8484
// Create the resources.
8585
ns := appNamespace(workNamespaceName, crpName)
86-
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Namespace)
86+
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name)
8787

8888
// Job is currently untrackable in Fleet.
8989
job := batchv1.Job{
@@ -170,7 +170,7 @@ var _ = Describe("CRP with availability failure (before upgrade)", Ordered, func
170170
BeforeAll(func() {
171171
// Create the resources.
172172
ns := appNamespace(workNamespaceName, crpName)
173-
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Namespace)
173+
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name)
174174

175175
// Use a Service of the LoadBalancer type as by default KinD environment does not support
176176
// this service type and such services will always be unavailable.
@@ -277,7 +277,7 @@ var _ = Describe("CRP with apply op failure (before upgrade)", Ordered, func() {
277277

278278
// Create the resources on the member cluster with a custom manager
279279
ns := appNamespace(workNamespaceName, crpName)
280-
Expect(memberCluster1EastProdClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Namespace)
280+
Expect(memberCluster1EastProdClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name)
281281

282282
configMap := appConfigMap(workNamespaceName, appConfigMapName)
283283
configMap.Data = map[string]string{
@@ -379,7 +379,7 @@ var _ = Describe("CRP stuck in the rollout process (blocked by availability fail
379379
BeforeAll(func() {
380380
// Create the resources.
381381
ns := appNamespace(workNamespaceName, crpName)
382-
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Namespace)
382+
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name)
383383

384384
// Use a Service of the ClusterIP type. KinD supports it and it will become available
385385
// once an IP has been assigned.
@@ -708,7 +708,7 @@ var _ = Describe("CRP stuck in the rollout process (long wait time)", Ordered, f
708708
BeforeAll(func() {
709709
// Create the resources.
710710
ns := appNamespace(workNamespaceName, crpName)
711-
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Namespace)
711+
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name)
712712

713713
// Job is currently untrackable in Fleet.
714714
originalJob = &batchv1.Job{

test/upgrade/before/utils_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ func checkIfAllMemberClustersHaveJoined() {
115115
// createWorkResources creates some resources on the hub cluster for testing purposes.
116116
func createWorkResources(workNamespaceName, appConfigMapName, crpName string) {
117117
ns := appNamespace(workNamespaceName, crpName)
118-
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Namespace)
118+
Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name)
119119

120120
configMap := appConfigMap(workNamespaceName, appConfigMapName)
121121
Expect(hubClient.Create(ctx, &configMap)).To(Succeed(), "Failed to create config map %s", configMap.Name)

0 commit comments

Comments
 (0)