diff --git a/apis/placement/v1alpha1/common.go b/apis/placement/v1alpha1/common.go index 44366996c..88023cf63 100644 --- a/apis/placement/v1alpha1/common.go +++ b/apis/placement/v1alpha1/common.go @@ -33,4 +33,7 @@ const ( // ApprovalTaskNameFmt is the format of the approval task name. ApprovalTaskNameFmt = "%s-%s" + + // OverrideClusterNameVariable is the reserved variable in the override value that will be replaced by the actual cluster name. + OverrideClusterNameVariable = "${MEMBER-CLUSTER-NAME}" ) diff --git a/apis/placement/v1alpha1/override_types.go b/apis/placement/v1alpha1/override_types.go index 9459d10cc..aeff12682 100644 --- a/apis/placement/v1alpha1/override_types.go +++ b/apis/placement/v1alpha1/override_types.go @@ -191,6 +191,10 @@ type JSONPatchOverride struct { Path string `json:"path"` // Value defines the content to be applied on the target location. // Value should be empty when operator is `remove`. + // We have reserved a few variables in this field that will be replaced by the actual values. + // Those variables all start with `$` and are case sensitive. + // Here is the list of currently supported variables: + // `${MEMBER-CLUSTER-NAME}`: this will be replaced by the name of the memberCluster CR that represents this cluster. // +optional Value apiextensionsv1.JSON `json:"value,omitempty"` } diff --git a/apis/placement/v1alpha1/zz_generated.deepcopy.go b/apis/placement/v1alpha1/zz_generated.deepcopy.go index 755b15f16..109fb7823 100644 --- a/apis/placement/v1alpha1/zz_generated.deepcopy.go +++ b/apis/placement/v1alpha1/zz_generated.deepcopy.go @@ -10,10 +10,11 @@ Licensed under the MIT license. package v1alpha1 import ( - "go.goms.io/fleet/apis/placement/v1beta1" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" + + "go.goms.io/fleet/apis/placement/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index 81e2def8e..6f8c4b30e 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -18,7 +18,7 @@ import ( "go.uber.org/atomic" "golang.org/x/sync/errgroup" corev1 "k8s.io/api/core/v1" - equality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -145,7 +145,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req controllerruntime.Reques works, syncErr := r.listAllWorksAssociated(ctx, &resourceBinding) if syncErr == nil { // generate and apply the workUpdated works if we have all the works - overrideSucceeded, workUpdated, syncErr = r.syncAllWork(ctx, &resourceBinding, works, cluster) + overrideSucceeded, workUpdated, syncErr = r.syncAllWork(ctx, &resourceBinding, works, &cluster) } // Reset the conditions and failed placements. for i := condition.OverriddenCondition; i < condition.TotalCondition; i++ { @@ -379,7 +379,7 @@ func (r *Reconciler) listAllWorksAssociated(ctx context.Context, resourceBinding // it returns // 1: if we apply the overrides successfully // 2: if we actually made any changes on the hub cluster -func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1beta1.ClusterResourceBinding, existingWorks map[string]*fleetv1beta1.Work, cluster clusterv1beta1.MemberCluster) (bool, bool, error) { +func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1beta1.ClusterResourceBinding, existingWorks map[string]*fleetv1beta1.Work, cluster *clusterv1beta1.MemberCluster) (bool, bool, error) { updateAny := atomic.NewBool(false) resourceBindingRef := klog.KObj(resourceBinding) // the hash256 function can can handle empty list https://go.dev/play/p/_4HW17fooXM diff --git a/pkg/controllers/workgenerator/override.go b/pkg/controllers/workgenerator/override.go index 2fcc93b4e..2f312b513 100644 --- a/pkg/controllers/workgenerator/override.go +++ b/pkg/controllers/workgenerator/override.go @@ -9,6 +9,7 @@ import ( "context" "encoding/json" "fmt" + "strings" jsonpatch "github.com/evanphx/json-patch/v5" "k8s.io/apimachinery/pkg/api/errors" @@ -97,7 +98,7 @@ func (r *Reconciler) fetchResourceOverrideSnapshots(ctx context.Context, resourc // It returns // - true if the resource is deleted by the overrides. // - an error if the override rules are invalid. -func (r *Reconciler) applyOverrides(resource *placementv1beta1.ResourceContent, cluster clusterv1beta1.MemberCluster, +func (r *Reconciler) applyOverrides(resource *placementv1beta1.ResourceContent, cluster *clusterv1beta1.MemberCluster, croMap map[placementv1beta1.ResourceIdentifier][]*placementv1alpha1.ClusterResourceOverrideSnapshot, roMap map[placementv1beta1.ResourceIdentifier][]*placementv1alpha1.ResourceOverrideSnapshot) (bool, error) { if len(croMap) == 0 && len(roMap) == 0 { return false, nil @@ -168,7 +169,7 @@ func (r *Reconciler) applyOverrides(resource *placementv1beta1.ResourceContent, return resource.Raw == nil, nil } -func applyOverrideRules(resource *placementv1beta1.ResourceContent, cluster clusterv1beta1.MemberCluster, rules []placementv1alpha1.OverrideRule) error { +func applyOverrideRules(resource *placementv1beta1.ResourceContent, cluster *clusterv1beta1.MemberCluster, rules []placementv1alpha1.OverrideRule) error { for _, rule := range rules { matched, err := overrider.IsClusterMatched(cluster, rule) if err != nil { @@ -184,7 +185,7 @@ func applyOverrideRules(resource *placementv1beta1.ResourceContent, cluster clus return nil } // Apply JSONPatchOverrides by default - if err := applyJSONPatchOverride(resource, rule.JSONPatchOverrides); err != nil { + if err := applyJSONPatchOverride(resource, cluster, rule.JSONPatchOverrides); err != nil { klog.ErrorS(err, "Failed to apply JSON patch override") return controller.NewUserError(err) } @@ -193,10 +194,18 @@ func applyOverrideRules(resource *placementv1beta1.ResourceContent, cluster clus } // applyJSONPatchOverride applies a JSON patch on the selected resources following [RFC 6902](https://datatracker.ietf.org/doc/html/rfc6902). -func applyJSONPatchOverride(resourceContent *placementv1beta1.ResourceContent, overrides []placementv1alpha1.JSONPatchOverride) error { +func applyJSONPatchOverride(resourceContent *placementv1beta1.ResourceContent, cluster *clusterv1beta1.MemberCluster, overrides []placementv1alpha1.JSONPatchOverride) error { if len(overrides) == 0 { // do nothing return nil } + // go through the JSON patch overrides to replace the built-in variables before json Marshal + // as it may contain the built-in variables that cannot be marshaled directly + for i := range overrides { + // find and replace a few special built-in variables + // replace the built-in variable with the actual cluster name + processedJSONStr := []byte(strings.ReplaceAll(string(overrides[i].Value.Raw), placementv1alpha1.OverrideClusterNameVariable, cluster.Name)) + overrides[i].Value.Raw = processedJSONStr + } jsonPatchBytes, err := json.Marshal(overrides) if err != nil { diff --git a/pkg/controllers/workgenerator/override_test.go b/pkg/controllers/workgenerator/override_test.go index e12adb977..59a7c98f7 100644 --- a/pkg/controllers/workgenerator/override_test.go +++ b/pkg/controllers/workgenerator/override_test.go @@ -8,6 +8,7 @@ package workgenerator import ( "context" "errors" + "fmt" "testing" "github.com/google/go-cmp/cmp" @@ -1102,7 +1103,7 @@ func TestApplyOverrides_clusterScopedResource(t *testing.T) { InformerManager: &fakeInformer, } rc := resource.CreateResourceContentForTest(t, tc.clusterRole) - gotDeleted, err := r.applyOverrides(rc, tc.cluster, tc.croMap, nil) + gotDeleted, err := r.applyOverrides(rc, &tc.cluster, tc.croMap, nil) if gotErr, wantErr := err != nil, tc.wantErr != nil; gotErr != wantErr || !errors.Is(err, tc.wantErr) { t.Fatalf("applyOverrides() got error %v, want error %v", err, tc.wantErr) } @@ -1964,6 +1965,79 @@ func TestApplyOverrides_namespacedScopeResource(t *testing.T) { }, wantDelete: true, }, + { + name: "cluster name as value in json patch of resourceOverride", + deployment: appsv1.Deployment{ + TypeMeta: deploymentType, + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-name", + Namespace: "deployment-namespace", + Labels: map[string]string{ + "app": "app1", + "key2": "value2", + }, + }, + }, + cluster: clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-1", + Labels: map[string]string{ + "app": "value1", + }, + }, + }, + roMap: map[placementv1beta1.ResourceIdentifier][]*placementv1alpha1.ResourceOverrideSnapshot{ + { + Group: utils.DeploymentGVK.Group, + Version: utils.DeploymentGVK.Version, + Kind: utils.DeploymentGVK.Kind, + Name: "deployment-name", + Namespace: "deployment-namespace", + }: { + { + Spec: placementv1alpha1.ResourceOverrideSnapshotSpec{ + OverrideSpec: placementv1alpha1.ResourceOverrideSpec{ + Policy: &placementv1alpha1.OverridePolicy{ + OverrideRules: []placementv1alpha1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{}, // matching all the clusters + OverrideType: placementv1alpha1.JSONPatchOverrideType, + JSONPatchOverrides: []placementv1alpha1.JSONPatchOverride{ + { + Operator: placementv1alpha1.JSONPatchOverrideOpReplace, + Path: "/metadata/labels/app", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s"`, placementv1alpha1.OverrideClusterNameVariable))}, + }, + { + Operator: placementv1alpha1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf("{\"app\": \"%s\", \"test\": \"nginx\"}", placementv1alpha1.OverrideClusterNameVariable))}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + wantDeployment: appsv1.Deployment{ + TypeMeta: deploymentType, + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-name", + Namespace: "deployment-namespace", + Labels: map[string]string{ + "app": "cluster-1", + "key2": "value2", + }, + Annotations: map[string]string{ + "app": "cluster-1", + "test": "nginx", + }, + }, + }, + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -1971,7 +2045,7 @@ func TestApplyOverrides_namespacedScopeResource(t *testing.T) { InformerManager: &fakeInformer, } rc := resource.CreateResourceContentForTest(t, tc.deployment) - gotDeleted, err := r.applyOverrides(rc, tc.cluster, tc.croMap, tc.roMap) + gotDeleted, err := r.applyOverrides(rc, &tc.cluster, tc.croMap, tc.roMap) if gotErr, wantErr := err != nil, tc.wantErr != nil; gotErr != wantErr || !errors.Is(err, tc.wantErr) { t.Fatalf("applyOverrides() got error %v, want error %v", err, tc.wantErr) } @@ -2012,6 +2086,7 @@ func TestApplyJSONPatchOverride(t *testing.T) { name string deployment appsv1.Deployment overrides []placementv1alpha1.JSONPatchOverride + cluster *clusterv1beta1.MemberCluster wantDeployment appsv1.Deployment wantErr bool }{ @@ -2070,6 +2145,7 @@ func TestApplyJSONPatchOverride(t *testing.T) { }, }, }, + { name: "remove a label", deployment: appsv1.Deployment{ @@ -2208,12 +2284,105 @@ func TestApplyJSONPatchOverride(t *testing.T) { }, wantErr: true, }, + { + name: "typo in template variable should just be rendered as is", + deployment: appsv1.Deployment{ + TypeMeta: deploymentType, + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-name", + Namespace: "deployment-namespace", + Labels: map[string]string{ + "app": "nginx", + }, + }, + }, + overrides: []placementv1alpha1.JSONPatchOverride{ + { + Operator: placementv1alpha1.JSONPatchOverrideOpReplace, + Path: "/metadata/labels/app", + Value: apiextensionsv1.JSON{Raw: []byte(`"$CLUSTER_NAME"`)}, + }, + { + Operator: placementv1alpha1.JSONPatchOverrideOpAdd, + Path: "/metadata/labels/${Member-Cluster-Name}", + Value: apiextensionsv1.JSON{Raw: []byte(`"${CLUSTER-NAME}"`)}, + }, + }, + cluster: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-1", + }, + }, + wantDeployment: appsv1.Deployment{ + TypeMeta: deploymentType, + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-name", + Namespace: "deployment-namespace", + Labels: map[string]string{ + "app": "$CLUSTER_NAME", + "${Member-Cluster-Name}": "${CLUSTER-NAME}", + }, + }, + }, + }, + { + name: "multiple rules with cluster name template", + deployment: appsv1.Deployment{ + TypeMeta: deploymentType, + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-name", + Namespace: "deployment-namespace", + Labels: map[string]string{ + "app": "nginx", + }, + }, + }, + overrides: []placementv1alpha1.JSONPatchOverride{ + { + Operator: placementv1alpha1.JSONPatchOverrideOpReplace, + Path: "/metadata/labels/app", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s"`, placementv1alpha1.OverrideClusterNameVariable))}, + }, + { + Operator: placementv1alpha1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf("{\"app\": \"workload-%s\", \"test\": \"nginx\"}", placementv1alpha1.OverrideClusterNameVariable))}, + }, + }, + cluster: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-1", + }, + }, + wantDeployment: appsv1.Deployment{ + TypeMeta: deploymentType, + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-name", + Namespace: "deployment-namespace", + Labels: map[string]string{ + "app": "cluster-1", + }, + Annotations: map[string]string{ + "app": "workload-cluster-1", + "test": "nginx", + }, + }, + }, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { rc := resource.CreateResourceContentForTest(t, tc.deployment) - err := applyJSONPatchOverride(rc, tc.overrides) + cluster := tc.cluster + if cluster == nil { + cluster = &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-1", + }, + } + } + err := applyJSONPatchOverride(rc, cluster, tc.overrides) if gotErr := err != nil; gotErr != tc.wantErr { t.Fatalf("applyJSONPatchOverride() = error %v, want %v", err, tc.wantErr) } diff --git a/pkg/utils/overrider/overrider.go b/pkg/utils/overrider/overrider.go index 3521565fa..e67d8c0bb 100644 --- a/pkg/utils/overrider/overrider.go +++ b/pkg/utils/overrider/overrider.go @@ -172,7 +172,7 @@ func PickFromResourceMatchedOverridesForTargetCluster( croFiltered := make([]*placementv1alpha1.ClusterResourceOverrideSnapshot, 0, len(croList)) for i, cro := range croList { - matched, err := isClusterMatched(cluster, cro.Spec.OverrideSpec.Policy) + matched, err := isClusterMatched(&cluster, cro.Spec.OverrideSpec.Policy) if err != nil { klog.ErrorS(err, "Invalid clusterResourceOverride", "clusterResourceOverride", klog.KObj(cro)) return nil, nil, controller.NewUnexpectedBehaviorError(err) @@ -188,7 +188,7 @@ func PickFromResourceMatchedOverridesForTargetCluster( roFiltered := make([]*placementv1alpha1.ResourceOverrideSnapshot, 0, len(roList)) for i, ro := range roList { - matched, err := isClusterMatched(cluster, ro.Spec.OverrideSpec.Policy) + matched, err := isClusterMatched(&cluster, ro.Spec.OverrideSpec.Policy) if err != nil { klog.ErrorS(err, "Invalid resourceOverride", "resourceOverride", klog.KObj(ro)) return nil, nil, controller.NewUnexpectedBehaviorError(err) @@ -216,7 +216,7 @@ func PickFromResourceMatchedOverridesForTargetCluster( return croNames, roNames, nil } -func isClusterMatched(cluster clusterv1beta1.MemberCluster, policy *placementv1alpha1.OverridePolicy) (bool, error) { +func isClusterMatched(cluster *clusterv1beta1.MemberCluster, policy *placementv1alpha1.OverridePolicy) (bool, error) { if policy == nil { return false, errors.New("policy is nil") } @@ -233,7 +233,7 @@ func isClusterMatched(cluster clusterv1beta1.MemberCluster, policy *placementv1a } // IsClusterMatched checks if the cluster is matched with the override rules. -func IsClusterMatched(cluster clusterv1beta1.MemberCluster, rule placementv1alpha1.OverrideRule) (bool, error) { +func IsClusterMatched(cluster *clusterv1beta1.MemberCluster, rule placementv1alpha1.OverrideRule) (bool, error) { if rule.ClusterSelector == nil { // it means matching no member clusters return false, nil } diff --git a/pkg/utils/overrider/overrider_test.go b/pkg/utils/overrider/overrider_test.go index 8d3ead5d8..71e5033b5 100644 --- a/pkg/utils/overrider/overrider_test.go +++ b/pkg/utils/overrider/overrider_test.go @@ -1886,7 +1886,7 @@ func TestIsClusterMatched(t *testing.T) { } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - got, err := IsClusterMatched(tc.cluster, tc.rule) + got, err := IsClusterMatched(&tc.cluster, tc.rule) if err != nil { t.Fatalf("IsClusterMatched() got error %v, want nil", err) } diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index dddf87dd4..a9e3418d6 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -116,6 +116,27 @@ func workNamespaceAndConfigMapPlacedOnClusterActual(cluster *framework.Cluster) } } +func configMapPlacedOnClusterActual(cluster *framework.Cluster, wantConfigMap *corev1.ConfigMap) func() error { + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + cmName := types.NamespacedName{Namespace: workNamespaceName, Name: appConfigMapName} + return func() error { + configMap := &corev1.ConfigMap{} + if err := cluster.KubeClient.Get(ctx, cmName, configMap); err != nil { + return err + } + + if diff := cmp.Diff( + configMap, wantConfigMap, + ignoreObjectMetaAutoGeneratedFields, + ignoreObjectMetaAnnotationField, + ); diff != "" { + return fmt.Errorf("app config map diff (-got, +want): %s", diff) + } + return nil + } +} + func workNamespacePlacedOnClusterActual(cluster *framework.Cluster) func() error { workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) diff --git a/test/e2e/placement_apply_strategy_test.go b/test/e2e/placement_apply_strategy_test.go index 6fa3a9068..045bcb4e9 100644 --- a/test/e2e/placement_apply_strategy_test.go +++ b/test/e2e/placement_apply_strategy_test.go @@ -60,7 +60,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { // Create the CRP. strategy := &placementv1beta1.ApplyStrategy{AllowCoOwnership: true} - createCRP(crpName, strategy) + createCRPWithApplyStrategy(crpName, strategy) }) AfterAll(func() { @@ -122,7 +122,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { // Create the CRP. strategy := &placementv1beta1.ApplyStrategy{AllowCoOwnership: false} - createCRP(crpName, strategy) + createCRPWithApplyStrategy(crpName, strategy) }) AfterAll(func() { @@ -175,7 +175,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { Type: placementv1beta1.ApplyStrategyTypeServerSideApply, AllowCoOwnership: false, } - createCRP(crpName, strategy) + createCRPWithApplyStrategy(crpName, strategy) }) AfterAll(func() { @@ -233,7 +233,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { Type: placementv1beta1.ApplyStrategyTypeServerSideApply, AllowCoOwnership: false, } - createCRP(crpName, strategy) + createCRPWithApplyStrategy(crpName, strategy) }) AfterAll(func() { @@ -368,7 +368,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { ServerSideApplyConfig: &placementv1beta1.ServerSideApplyConfig{ForceConflicts: false}, AllowCoOwnership: true, } - createCRP(crpName, strategy) + createCRPWithApplyStrategy(crpName, strategy) }) AfterAll(func() { @@ -515,7 +515,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { ServerSideApplyConfig: &placementv1beta1.ServerSideApplyConfig{ForceConflicts: true}, AllowCoOwnership: true, } - createCRP(crpName, strategy) + createCRPWithApplyStrategy(crpName, strategy) }) AfterAll(func() { @@ -584,7 +584,7 @@ var _ = Describe("validating two CRP selecting the same resources", Ordered, fun BeforeAll(func() { for i := 0; i < 2; i++ { crpName := fmt.Sprintf(crpNameWithSubIndexTemplate, GinkgoParallelProcess(), i) - createCRP(crpName, nil) + createCRPWithApplyStrategy(crpName, nil) } }) @@ -620,7 +620,7 @@ var _ = Describe("validating two CRP selecting the same resources", Ordered, fun It("add a new CRP and selecting the same resources", func() { crpName := fmt.Sprintf(crpNameWithSubIndexTemplate, GinkgoParallelProcess(), 2) - createCRP(crpName, nil) + createCRPWithApplyStrategy(crpName, nil) }) It("should update CRP status as expected", func() { @@ -660,7 +660,7 @@ var _ = Describe("validating two CRP selecting the same resources", Ordered, fun crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) conflictCRPName := fmt.Sprintf(crpNameWithSubIndexTemplate, GinkgoParallelProcess(), 0) BeforeAll(func() { - createCRP(crpName, nil) + createCRPWithApplyStrategy(crpName, nil) }) AfterAll(func() { @@ -680,7 +680,7 @@ var _ = Describe("validating two CRP selecting the same resources", Ordered, fun It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) It("create another CRP with different apply strategy", func() { - createCRP(conflictCRPName, &placementv1beta1.ApplyStrategy{AllowCoOwnership: true}) + createCRPWithApplyStrategy(conflictCRPName, &placementv1beta1.ApplyStrategy{AllowCoOwnership: true}) }) It("should update conflicted CRP status as expected", func() { @@ -740,25 +740,6 @@ var _ = Describe("validating two CRP selecting the same resources", Ordered, fun }) }) -func createCRP(crpName string, applyStrategy *placementv1beta1.ApplyStrategy) { - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: workResourceSelector(), - Strategy: placementv1beta1.RolloutStrategy{ - ApplyStrategy: applyStrategy, - }, - }, - } - By(fmt.Sprintf("creating placement %s", crpName)) - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) -} - func buildApplyConflictFailedPlacements(generation int64, cluster []string) []placementv1beta1.ResourcePlacementStatus { workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) res := make([]placementv1beta1.ResourcePlacementStatus, 0, len(cluster)) diff --git a/test/e2e/placement_cro_test.go b/test/e2e/placement_cro_test.go index 5c657f845..ab67e5dac 100644 --- a/test/e2e/placement_cro_test.go +++ b/test/e2e/placement_cro_test.go @@ -19,6 +19,9 @@ import ( placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" ) +// TODO: Add more tests to cover the negative cases that override failed, need to make sure +// the CRP status has all the information to debug the issue. + // Note that this container will run in parallel with other containers. var _ = Describe("creating clusterResourceOverride (selecting all clusters) to override all resources under the namespace", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) @@ -362,6 +365,81 @@ var _ = Describe("creating clusterResourceOverride with different rules for each }) }) +var _ = Describe("creating clusterResourceOverride with different rules for each cluster", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + croName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + createCRP(crpName) + cro := &placementv1alpha1.ClusterResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: croName, + }, + Spec: placementv1alpha1.ClusterResourceOverrideSpec{ + Placement: &placementv1alpha1.PlacementRef{ + Name: crpName, // assigned CRP name + }, + ClusterResourceSelectors: workResourceSelector(), + Policy: &placementv1alpha1.OverridePolicy{ + OverrideRules: []placementv1alpha1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1alpha1.JSONPatchOverride{ + { + Operator: placementv1alpha1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "test-%s"}`, croTestAnnotationKey, placementv1alpha1.OverrideClusterNameVariable))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating clusterResourceOverride %s", croName)) + Expect(hubClient.Create(ctx, cro)).To(Succeed(), "Failed to create clusterResourceOverride %s", croName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeletion(crpName, allMemberClusters) + + By(fmt.Sprintf("deleting clusterResourceOverride %s", croName)) + cleanupClusterResourceOverride(croName) + }) + + It("should update CRP status as expected", func() { + wantCRONames := []string{fmt.Sprintf(placementv1alpha1.OverrideSnapshotNameFmt, croName, 0)} + crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", wantCRONames, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have override annotations on the member clusters", func() { + for _, cluster := range allMemberClusters { + wantAnnotations := map[string]string{croTestAnnotationKey: fmt.Sprintf("test-%s", cluster.ClusterName)} + Expect(validateAnnotationOfWorkNamespaceOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of work namespace on %s", cluster.ClusterName) + Expect(validateOverrideAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) + } + }) +}) + var _ = Describe("creating clusterResourceOverride with incorrect path", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) croName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) diff --git a/test/e2e/placement_ro_test.go b/test/e2e/placement_ro_test.go index 0a5d40283..5ec4aa785 100644 --- a/test/e2e/placement_ro_test.go +++ b/test/e2e/placement_ro_test.go @@ -28,14 +28,18 @@ var _ = Describe("creating resourceOverride (selecting all clusters) to override BeforeAll(func() { By("creating work resources") createWorkResources() - - // Create the ro before crp so that the observed resource index is predictable. + // Create the CRP. + createCRP(crpName) + // Create the ro. ro := &placementv1alpha1.ResourceOverride{ ObjectMeta: metav1.ObjectMeta{ Name: roName, Namespace: roNamespace, }, Spec: placementv1alpha1.ResourceOverrideSpec{ + Placement: &placementv1alpha1.PlacementRef{ + Name: crpName, // assigned CRP name + }, ResourceSelectors: configMapSelector(), Policy: &placementv1alpha1.OverridePolicy{ OverrideRules: []placementv1alpha1.OverrideRule{ @@ -57,21 +61,6 @@ var _ = Describe("creating resourceOverride (selecting all clusters) to override } By(fmt.Sprintf("creating resourceOverride %s", roName)) Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) - - // Create the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: workResourceSelector(), - }, - } - By(fmt.Sprintf("creating placement %s", crpName)) - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) }) AfterAll(func() { @@ -192,19 +181,7 @@ var _ = Describe("creating resourceOverride with multiple jsonPatchOverrides to Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) // Create the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: workResourceSelector(), - }, - } - By(fmt.Sprintf("creating placement %s", crpName)) - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + createCRP(crpName) }) AfterAll(func() { @@ -262,14 +239,18 @@ var _ = Describe("creating resourceOverride with different rules for each cluste BeforeAll(func() { By("creating work resources") createWorkResources() - - // Create the ro before crp so that the observed resource index is predictable. + // Create the CRP. + createCRP(crpName) + // Create the ro. ro := &placementv1alpha1.ResourceOverride{ ObjectMeta: metav1.ObjectMeta{ Name: roName, Namespace: roNamespace, }, Spec: placementv1alpha1.ResourceOverrideSpec{ + Placement: &placementv1alpha1.PlacementRef{ + Name: crpName, // assigned CRP name + }, ResourceSelectors: configMapSelector(), Policy: &placementv1alpha1.OverridePolicy{ OverrideRules: []placementv1alpha1.OverrideRule{ @@ -333,21 +314,6 @@ var _ = Describe("creating resourceOverride with different rules for each cluste } By(fmt.Sprintf("creating resourceOverride %s", roName)) Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) - - // Create the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: workResourceSelector(), - }, - } - By(fmt.Sprintf("creating placement %s", crpName)) - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) }) AfterAll(func() { @@ -442,19 +408,7 @@ var _ = Describe("creating resourceOverride and clusterResourceOverride, resourc Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) // Create the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: workResourceSelector(), - }, - } - By(fmt.Sprintf("creating placement %s", crpName)) - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + createCRP(crpName) }) AfterAll(func() { @@ -502,14 +456,16 @@ var _ = Describe("creating resourceOverride with incorrect path", Ordered, func( BeforeAll(func() { By("creating work resources") createWorkResources() - - // Create the ro before crp so that the observed resource index is predictable. + // Create the ro. ro := &placementv1alpha1.ResourceOverride{ ObjectMeta: metav1.ObjectMeta{ Name: roName, Namespace: roNamespace, }, Spec: placementv1alpha1.ResourceOverrideSpec{ + Placement: &placementv1alpha1.PlacementRef{ + Name: crpName, // assigned CRP name + }, ResourceSelectors: configMapSelector(), Policy: &placementv1alpha1.OverridePolicy{ OverrideRules: []placementv1alpha1.OverrideRule{ @@ -532,20 +488,8 @@ var _ = Describe("creating resourceOverride with incorrect path", Ordered, func( By(fmt.Sprintf("creating resourceOverride %s", roName)) Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) - // Create the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: workResourceSelector(), - }, - } - By(fmt.Sprintf("creating placement %s", crpName)) - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + // Create the CRP later so that failed override won't block the rollout + createCRP(crpName) }) AfterAll(func() { @@ -576,14 +520,18 @@ var _ = Describe("creating resourceOverride and resource becomes invalid after o BeforeAll(func() { By("creating work resources") createWorkResources() - - // Create the ro before crp so that the observed resource index is predictable. + // Create the CRP. + createCRP(crpName) + // Create the ro. ro := &placementv1alpha1.ResourceOverride{ ObjectMeta: metav1.ObjectMeta{ Name: roName, Namespace: roNamespace, }, Spec: placementv1alpha1.ResourceOverrideSpec{ + Placement: &placementv1alpha1.PlacementRef{ + Name: crpName, // assigned CRP name + }, ResourceSelectors: configMapSelector(), Policy: &placementv1alpha1.OverridePolicy{ OverrideRules: []placementv1alpha1.OverrideRule{ @@ -605,21 +553,84 @@ var _ = Describe("creating resourceOverride and resource becomes invalid after o } By(fmt.Sprintf("creating resourceOverride %s", roName)) Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + }) - // Create the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{ + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeletion(crpName, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, roNamespace) + }) + + It("should update CRP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: roNamespace, Name: fmt.Sprintf(placementv1alpha1.OverrideSnapshotNameFmt, roName, 0)}, + } + crpStatusUpdatedActual := crpStatusWithWorkSynchronizedUpdatedFailedActual(workResourceIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + // This check will ignore the annotation of resources. + It("should not place the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) +}) + +var _ = Describe("creating resourceOverride with a templated rules with cluster name to override configMap", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + roNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the ro before crp so that the observed resource index is predictable. + ro := &placementv1alpha1.ResourceOverride{ ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, + Name: roName, + Namespace: roNamespace, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: workResourceSelector(), + Spec: placementv1alpha1.ResourceOverrideSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1alpha1.OverridePolicy{ + OverrideRules: []placementv1alpha1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1alpha1.JSONPatchOverride{ + { + Operator: placementv1alpha1.JSONPatchOverrideOpReplace, + Path: "/data/data", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s"`, placementv1alpha1.OverrideClusterNameVariable))}, + }, + { + Operator: placementv1alpha1.JSONPatchOverrideOpAdd, + Path: "/data/newField", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"new-%s"`, placementv1alpha1.OverrideClusterNameVariable))}, + }, + }, + }, + }, + }, }, } - By(fmt.Sprintf("creating placement %s", crpName)) - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + + // Create the CRP. + createCRP(crpName) }) AfterAll(func() { @@ -634,12 +645,28 @@ var _ = Describe("creating resourceOverride and resource becomes invalid after o wantRONames := []placementv1beta1.NamespacedName{ {Namespace: roNamespace, Name: fmt.Sprintf(placementv1alpha1.OverrideSnapshotNameFmt, roName, 0)}, } - crpStatusUpdatedActual := crpStatusWithWorkSynchronizedUpdatedFailedActual(workResourceIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) - // This check will ignore the annotation of resources. - It("should not place the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) + It("should have override configMap on the member clusters", func() { + cmName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + cmNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + for _, cluster := range allMemberClusters { + wantConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: cmNamespace, + }, + Data: map[string]string{ + "data": cluster.ClusterName, + "newField": fmt.Sprintf("new-%s", cluster.ClusterName), + }, + } + configMapActual := configMapPlacedOnClusterActual(cluster, wantConfigMap) + Eventually(configMapActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update configmap %s data as expected", cmName) + } + }) }) var _ = Describe("creating resourceOverride with delete configMap", Ordered, func() { @@ -699,19 +726,7 @@ var _ = Describe("creating resourceOverride with delete configMap", Ordered, fun Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) // Create the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: workResourceSelector(), - }, - } - By(fmt.Sprintf("creating placement %s", crpName)) - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + createCRP(crpName) }) AfterAll(func() { diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index 99f33e997..c334848f1 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -1212,3 +1212,28 @@ func checkIfStatusErrorWithMessage(err error, errorMsg string) error { } return fmt.Errorf("error message %s not found in error %w", errorMsg, err) } + +// createCRPWithApplyStrategy creates a ClusterResourcePlacement with the given name and apply strategy. +func createCRPWithApplyStrategy(crpName string, applyStrategy *placementv1beta1.ApplyStrategy) { + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + }, + } + if applyStrategy != nil { + crp.Spec.Strategy.ApplyStrategy = applyStrategy + } + By(fmt.Sprintf("creating placement %s", crpName)) + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) +} + +// createCRP creates a ClusterResourcePlacement with the given name. +func createCRP(crpName string) { + createCRPWithApplyStrategy(crpName, nil) +}