diff --git a/.github/workflows/chart.yml b/.github/workflows/chart.yml index b6664c7c6..9d16e834c 100644 --- a/.github/workflows/chart.yml +++ b/.github/workflows/chart.yml @@ -18,7 +18,7 @@ jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: submodules: true fetch-depth: 0 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d2f899c22..f9697cf00 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ on: paths-ignore: [docs/**, "**.md", "**.mdx", "**.png", "**.jpg"] env: - GO_VERSION: '1.24.4' + GO_VERSION: '1.24.6' jobs: detect-noop: @@ -40,7 +40,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Set up Ginkgo CLI run: | @@ -70,7 +70,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Install Kind # Before updating the kind version to use, verify that the current kind image @@ -89,12 +89,16 @@ jobs: strategy: fail-fast: false matrix: - customized-settings: [default, joinleave, custom] + customized-settings: [default, resourceplacement, joinleave, custom] include: - customized-settings: default # to shorten the test duration, set the resource snapshot creation interval to 0 resource-snapshot-creation-minimum-interval: 0m resource-changes-collection-duration: 0m + - customized-settings: resourceplacement + # to shorten the test duration, set the resource snapshot creation interval to 0 + resource-snapshot-creation-minimum-interval: 0m + resource-changes-collection-duration: 0m - customized-settings: joinleave # to shorten the test duration, set the resource snapshot creation interval to 0 resource-snapshot-creation-minimum-interval: 0m @@ -121,7 +125,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Install Ginkgo CLI run: | @@ -136,7 +140,9 @@ jobs: - name: Run e2e tests run: | if [ "${{ matrix.customized-settings }}" = "default" ]; then - make e2e-tests LABEL_FILTER="!custom && !joinleave" + make e2e-tests LABEL_FILTER="!custom && !joinleave && !resourceplacement" + elif [ "${{ matrix.customized-settings }}" = "resourceplacement" ]; then + make e2e-tests LABEL_FILTER="!custom && resourceplacement" elif [ "${{ matrix.customized-settings }}" = "joinleave" ]; then make e2e-tests LABEL_FILTER="!custom && joinleave" else diff --git a/.github/workflows/code-lint.yml b/.github/workflows/code-lint.yml index 92d14e2a6..3cfabf9f6 100644 --- a/.github/workflows/code-lint.yml +++ b/.github/workflows/code-lint.yml @@ -14,7 +14,7 @@ on: env: # Common versions - GO_VERSION: '1.24.4' + GO_VERSION: '1.24.6' jobs: @@ -43,7 +43,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: submodules: true @@ -64,7 +64,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: golangci-lint run: make lint diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 6563883d1..fb98847ad 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -38,7 +38,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index df3664f5e..207b0b0da 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -16,7 +16,7 @@ jobs: with: egress-policy: audit - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@ff7abcd0c3c05ccf6adc123a8cd1fd4fb30fb493 # v4.1.7 - uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # master with: check_filenames: true diff --git a/.github/workflows/markdown-lint.yml b/.github/workflows/markdown-lint.yml index e65a4999c..337f8be6a 100644 --- a/.github/workflows/markdown-lint.yml +++ b/.github/workflows/markdown-lint.yml @@ -10,7 +10,7 @@ jobs: markdown-link-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: tcort/github-action-markdown-link-check@v1 with: # this will only show errors in the output diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index ca5094c18..6e15e88fc 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -18,7 +18,7 @@ env: MEMBER_AGENT_IMAGE_NAME: member-agent REFRESH_TOKEN_IMAGE_NAME: refresh-token - GO_VERSION: '1.24.4' + GO_VERSION: '1.24.6' jobs: export-registry: @@ -44,10 +44,10 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Login to ${{ env.REGISTRY }} - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} diff --git a/.github/workflows/upgrade.yml b/.github/workflows/upgrade.yml index 925d17123..d040275e7 100644 --- a/.github/workflows/upgrade.yml +++ b/.github/workflows/upgrade.yml @@ -17,7 +17,7 @@ on: paths-ignore: [docs/**, "**.md", "**.mdx", "**.png", "**.jpg"] env: - GO_VERSION: '1.24.4' + GO_VERSION: '1.24.6' jobs: detect-noop: @@ -44,7 +44,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: # Fetch the history of all branches and tags. # This is needed for the test suite to switch between releases. @@ -127,7 +127,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: # Fetch the history of all branches and tags. # This is needed for the test suite to switch between releases. @@ -210,7 +210,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: # Fetch the history of all branches and tags. # This is needed for the test suite to switch between releases. diff --git a/.golangci.yml b/.golangci.yml index 43b16e201..e0919ecc4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,6 +1,6 @@ run: timeout: 15m - go: '1.24.4' + go: '1.24.6' linters-settings: stylecheck: diff --git a/CLAUDE.md b/CLAUDE.md index 7833c61ec..0f60520ec 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -159,11 +159,16 @@ cmd/memberagent/ # Member agent main and setup ## Testing Patterns ### Unit Tests -- Use `testify` for assertions +- Avoid the use of ‘assert’ libraries. - Controllers use `envtest` for integration testing with real etcd - Mock external dependencies with `gomock` - Unit test files: `_test.go` in same directory - Table-driven test style preferred +- Use cmp.Equal for equality comparison and cmp.Diff to obtain a human-readable diff between objects. +- Test outputs should output the actual value that the function returned before printing the value that was expected. A usual format for printing test outputs is “YourFunc(%v) = %v, want %v”. +- If your function returns a struct, don’t write test code that performs an individual comparison for each field of the struct. Instead, construct the struct that you’re expecting your function to return, and compare in one shot using diffs or deep comparisons. The same rule applies to arrays and maps. +- If your struct needs to be compared for approximate equality or some other kind of semantic equality, or it contains fields that cannot be compared for equality (e.g. if one of the fields is an io.Reader), tweaking a cmp.Diff or cmp.Equal comparison with cmpopts options such as cmpopts.IgnoreInterfaces may meet your needs (example); otherwise, this technique just won’t work, so do whatever works. +- If your function returns multiple return values, you don’t need to wrap those in a struct before comparing them. Just compare the return values individually and print them. ### Integration Tests - Located in `test/integration/` and `test/scheduler/` @@ -181,6 +186,9 @@ cmd/memberagent/ # Member agent main and setup ### Test Coding Style - Use `want` or `wanted` instead of `expect` or `expected` when creating the desired state +- Comments that are complete sentences should be capitalized and punctuated like standard English sentences. (As an exception, it is okay to begin a sentence with an uncapitalized identifier name if it is otherwise clear. Such cases are probably best done only at the beginning of a paragraph.) +- Comments that are sentence fragments have no such requirements for punctuation or capitalization. +- Documentation comments should always be complete sentences, and as such should always be capitalized and punctuated. Simple end-of-line comments (especially for struct fields) can be simple phrases that assume the field name is the subject. ## Key Patterns diff --git a/Makefile b/Makefile index ce0320121..022c7956f 100644 --- a/Makefile +++ b/Makefile @@ -139,13 +139,14 @@ test: manifests generate fmt vet local-unit-test integration-test ## Run tests. ## ## workaround to bypass the pkg/controllers/workv1alpha1 tests failure +## rollout controller tests need a bit longer to complete, so we increase the timeout ## .PHONY: local-unit-test local-unit-test: $(ENVTEST) ## Run tests. export CGO_ENABLED=1 && \ export KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" && \ go test ./pkg/controllers/workv1alpha1 -race -coverprofile=ut-coverage.xml -covermode=atomic -v && \ - go test `go list ./pkg/... ./cmd/... | grep -v pkg/controllers/workv1alpha1` -race -coverpkg=./... -coverprofile=ut-coverage.xml -covermode=atomic -v + go test `go list ./pkg/... ./cmd/... | grep -v pkg/controllers/workv1alpha1` -race -coverpkg=./... -coverprofile=ut-coverage.xml -covermode=atomic -v -timeout=20m .PHONY: integration-test integration-test: $(ENVTEST) ## Run tests. diff --git a/apis/placement/v1/override_types.go b/apis/placement/v1/override_types.go index e96c3ddad..cf3054f91 100644 --- a/apis/placement/v1/override_types.go +++ b/apis/placement/v1/override_types.go @@ -42,11 +42,13 @@ type ClusterResourceOverride struct { // The ClusterResourceOverride create or update will fail when the resource has been selected by the existing ClusterResourceOverride. // If the resource is selected by both ClusterResourceOverride and ResourceOverride, ResourceOverride will win when resolving // conflicts. +// +kubebuilder:validation:XValidation:rule="(has(oldSelf.placement) && has(self.placement) && oldSelf.placement == self.placement) || (!has(oldSelf.placement) && !has(self.placement))",message="The placement field is immutable" type ClusterResourceOverrideSpec struct { // Placement defines whether the override is applied to a specific placement or not. // If set, the override will trigger the placement rollout immediately when the rollout strategy type is RollingUpdate. // Otherwise, it will be applied to the next rollout. // The recommended way is to set the placement so that the override can be rolled out immediately. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="The placement field is immutable" // +optional Placement *PlacementRef `json:"placement,omitempty"` @@ -66,12 +68,32 @@ type ClusterResourceOverrideSpec struct { Policy *OverridePolicy `json:"policy"` } +// ResourceScope defines the scope of placement reference. +type ResourceScope string + +const ( + // ClusterScoped indicates placement is cluster-scoped. + ClusterScoped ResourceScope = "Cluster" + + // NamespaceScoped indicates placement is namespace-scoped. + NamespaceScoped ResourceScope = "Namespaced" +) + // PlacementRef is the reference to a placement. // For now, we only support ClusterResourcePlacement. type PlacementRef struct { // Name is the reference to the name of placement. // +required + Name string `json:"name"` + // Scope defines the scope of the placement. + // A clusterResourceOverride can only reference a clusterResourcePlacement (cluster-scoped), + // and a resourceOverride can reference either a clusterResourcePlacement or resourcePlacement (namespaced). + // The referenced resourcePlacement must be in the same namespace as the resourceOverride. + // +kubebuilder:validation:Enum=Cluster;Namespaced + // +kubebuilder:default=Cluster + // +optional + Scope ResourceScope `json:"scope,omitempty"` } // OverridePolicy defines how to override the selected resources on the target clusters. @@ -144,6 +166,7 @@ type ResourceOverride struct { // The ResourceOverride create or update will fail when the resource has been selected by the existing ResourceOverride. // If the resource is selected by both ClusterResourceOverride and ResourceOverride, ResourceOverride will win when resolving // conflicts. +// +kubebuilder:validation:XValidation:rule="(has(oldSelf.placement) && has(self.placement) && oldSelf.placement == self.placement) || (!has(oldSelf.placement) && !has(self.placement))",message="The placement field is immutable" type ResourceOverrideSpec struct { // Placement defines whether the override is applied to a specific placement or not. // If set, the override will trigger the placement rollout immediately when the rollout strategy type is RollingUpdate. diff --git a/apis/placement/v1alpha1/override_types.go b/apis/placement/v1alpha1/override_types.go index 0dc872fc5..91d381104 100644 --- a/apis/placement/v1alpha1/override_types.go +++ b/apis/placement/v1alpha1/override_types.go @@ -62,7 +62,7 @@ type ClusterResourceOverrideSpec struct { // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=20 // +required - ClusterResourceSelectors []placementv1beta1.ClusterResourceSelector `json:"clusterResourceSelectors"` + ClusterResourceSelectors []placementv1beta1.ResourceSelectorTerm `json:"clusterResourceSelectors"` // Policy defines how to override the selected resources on the target clusters. // +required diff --git a/apis/placement/v1alpha1/zz_generated.deepcopy.go b/apis/placement/v1alpha1/zz_generated.deepcopy.go index 43ddef819..ec4b45d86 100644 --- a/apis/placement/v1alpha1/zz_generated.deepcopy.go +++ b/apis/placement/v1alpha1/zz_generated.deepcopy.go @@ -309,7 +309,7 @@ func (in *ClusterResourceOverrideSpec) DeepCopyInto(out *ClusterResourceOverride } if in.ClusterResourceSelectors != nil { in, out := &in.ClusterResourceSelectors, &out.ClusterResourceSelectors - *out = make([]v1beta1.ClusterResourceSelector, len(*in)) + *out = make([]v1beta1.ResourceSelectorTerm, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/apis/placement/v1beta1/binding_types.go b/apis/placement/v1beta1/binding_types.go index 2de759b69..f12b36605 100644 --- a/apis/placement/v1beta1/binding_types.go +++ b/apis/placement/v1beta1/binding_types.go @@ -28,7 +28,7 @@ const ( // SchedulerBindingCleanupFinalizer is a finalizer added to bindings to ensure we can look up the // corresponding CRP name for deleting bindings to trigger a new scheduling cycle. // TODO: migrate the finalizer to the new name "scheduler-binding-cleanup" in the future. - SchedulerBindingCleanupFinalizer = fleetPrefix + "scheduler-crb-cleanup" + SchedulerBindingCleanupFinalizer = FleetPrefix + "scheduler-crb-cleanup" ) // make sure the BindingObj and BindingObjList interfaces are implemented by the diff --git a/apis/placement/v1beta1/clusterresourceplacement_types.go b/apis/placement/v1beta1/clusterresourceplacement_types.go index 7c8f53f58..98209fe78 100644 --- a/apis/placement/v1beta1/clusterresourceplacement_types.go +++ b/apis/placement/v1beta1/clusterresourceplacement_types.go @@ -29,11 +29,11 @@ import ( const ( // PlacementCleanupFinalizer is a finalizer added by the placement controller to all placement objects, to make sure // that the placement controller can react to placement object deletions if necessary. - PlacementCleanupFinalizer = fleetPrefix + "crp-cleanup" + PlacementCleanupFinalizer = FleetPrefix + "crp-cleanup" // SchedulerCleanupFinalizer is a finalizer added by the scheduler to placement objects, to make sure // that all bindings derived from a placement object can be cleaned up after the placement object is deleted. - SchedulerCleanupFinalizer = fleetPrefix + "scheduler-cleanup" + SchedulerCleanupFinalizer = FleetPrefix + "scheduler-cleanup" ) // make sure the PlacementObj and PlacementObjList interfaces are implemented by the @@ -114,6 +114,8 @@ type ClusterResourcePlacement struct { // The desired state of ClusterResourcePlacement. // +kubebuilder:validation:Required // +kubebuilder:validation:XValidation:rule="!((has(oldSelf.policy) && !has(self.policy)) || (has(oldSelf.policy) && has(self.policy) && has(self.policy.placementType) && has(oldSelf.policy.placementType) && self.policy.placementType != oldSelf.policy.placementType))",message="placement type is immutable" + // +kubebuilder:validation:XValidation:rule="!(self.statusReportingScope == 'NamespaceAccessible' && size(self.resourceSelectors.filter(x, x.kind == 'Namespace')) != 1)",message="when statusReportingScope is NamespaceAccessible, exactly one resourceSelector with kind 'Namespace' is required" + // +kubebuilder:validation:XValidation:rule="!has(oldSelf.statusReportingScope) || self.statusReportingScope == oldSelf.statusReportingScope",message="statusReportingScope is immutable" Spec PlacementSpec `json:"spec"` // The observed status of ClusterResourcePlacement. @@ -122,14 +124,13 @@ type ClusterResourcePlacement struct { } // PlacementSpec defines the desired state of ClusterResourcePlacement and ResourcePlacement. -// +kubebuilder:validation:XValidation:rule="!(self.statusReportingScope == 'NamespaceAccessible' && size(self.resourceSelectors.filter(x, x.kind == 'Namespace')) != 1)",message="when statusReportingScope is NamespaceAccessible, exactly one resourceSelector with kind 'Namespace' is required" type PlacementSpec struct { // ResourceSelectors is an array of selectors used to select cluster scoped resources. The selectors are `ORed`. // You can have 1-100 selectors. // +kubebuilder:validation:Required // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=100 - ResourceSelectors []ClusterResourceSelector `json:"resourceSelectors"` + ResourceSelectors []ResourceSelectorTerm `json:"resourceSelectors"` // Policy defines how to select member clusters to place the selected resources. // If unspecified, all the joined member clusters are selected. @@ -170,33 +171,31 @@ func (p *PlacementSpec) Tolerations() []Toleration { return nil } -// TODO: rename this to ResourceSelectorTerm - -// ClusterResourceSelector is used to select resources as the target resources to be placed. +// ResourceSelectorTerm is used to select resources as the target resources to be placed. // All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. -type ClusterResourceSelector struct { - // Group name of the cluster-scoped resource. +type ResourceSelectorTerm struct { + // Group name of the be selected resource. // Use an empty string to select resources under the core API group (e.g., namespaces). // +kubebuilder:validation:Required Group string `json:"group"` - // Version of the cluster-scoped resource. + // Version of the to be selected resource. // +kubebuilder:validation:Required Version string `json:"version"` - // Kind of the cluster-scoped resource. + // Kind of the to be selected resource. // Note: When `Kind` is `namespace`, by default ALL the resources under the selected namespaces are selected. // +kubebuilder:validation:Required Kind string `json:"kind"` // You can only specify at most one of the following two fields: Name and LabelSelector. - // If none is specified, all the cluster-scoped resources with the given group, version and kind are selected. + // If none is specified, all the be selected resources with the given group, version and kind are selected. - // Name of the cluster-scoped resource. + // Name of the be selected resource. // +kubebuilder:validation:Optional Name string `json:"name,omitempty"` - // A label query over all the cluster-scoped resources. Resources matching the query are selected. + // A label query over all the be selected resources. Resources matching the query are selected. // Note that namespace-scoped resources can't be selected even if they match the query. // +kubebuilder:validation:Optional LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` @@ -1523,7 +1522,7 @@ func (m *ClusterResourcePlacement) SetPlacementStatus(status PlacementStatus) { const ( // ResourcePlacementCleanupFinalizer is a finalizer added by the RP controller to all RPs, to make sure // that the RP controller can react to RP deletions if necessary. - ResourcePlacementCleanupFinalizer = fleetPrefix + "rp-cleanup" + ResourcePlacementCleanupFinalizer = FleetPrefix + "rp-cleanup" ) // +genclient @@ -1614,14 +1613,15 @@ func (rpl *ResourcePlacementList) GetPlacementObjs() []PlacementObj { // +genclient:Namespaced // +kubebuilder:object:root=true // +kubebuilder:resource:scope="Namespaced",shortName=crps,categories={fleet,fleet-placement} -// +kubebuilder:subresource:status // +kubebuilder:storageversion -// +kubebuilder:printcolumn:JSONPath=`.status.observedResourceIndex`,name="Resource-Index",type=string +// +kubebuilder:printcolumn:JSONPath=`.sourceStatus.observedResourceIndex`,name="Resource-Index",type=string +// +kubebuilder:printcolumn:JSONPath=`.lastUpdatedTime`,name="Last-Updated",type=string // +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterResourcePlacementStatus is a namespaced resource that mirrors the PlacementStatus of a corresponding // ClusterResourcePlacement object. This allows namespace-scoped access to cluster-scoped placement status. +// The LastUpdatedTime field is updated whenever the CRPS object is updated. // // This object will be created within the target namespace that contains resources being managed by the CRP. // When multiple ClusterResourcePlacements target the same namespace, each ClusterResourcePlacementStatus within that @@ -1633,11 +1633,16 @@ type ClusterResourcePlacementStatus struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - // The observed status of ClusterResourcePlacementStatus which mirrors the PlacementStatus of the corresponding ClusterResourcePlacement. - // This includes information about the namespace and resources within that namespace that are being managed by the placement. - // The status will show placement details for resources selected by the ClusterResourcePlacement's ResourceSelectors. - // +kubebuilder:validation:Optional - Status PlacementStatus `json:"status,omitempty"` + // Source status copied from the corresponding ClusterResourcePlacement. + // +kubebuilder:validation:Required + PlacementStatus `json:"sourceStatus,omitempty"` + + // LastUpdatedTime is the timestamp when this CRPS object was last updated. + // This field is set to the current time whenever the CRPS object is created or modified. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + LastUpdatedTime metav1.Time `json:"lastUpdatedTime,omitempty"` } // ClusterResourcePlacementStatusList contains a list of ClusterResourcePlacementStatus. @@ -1649,18 +1654,6 @@ type ClusterResourcePlacementStatusList struct { Items []ClusterResourcePlacementStatus `json:"items"` } -// SetConditions sets the conditions of the ClusterResourcePlacementStatus. -func (m *ClusterResourcePlacementStatus) SetConditions(conditions ...metav1.Condition) { - for _, c := range conditions { - meta.SetStatusCondition(&m.Status.Conditions, c) - } -} - -// GetCondition returns the condition of the ClusterResourcePlacementStatus objects. -func (m *ClusterResourcePlacementStatus) GetCondition(conditionType string) *metav1.Condition { - return meta.FindStatusCondition(m.Status.Conditions, conditionType) -} - func init() { SchemeBuilder.Register(&ClusterResourcePlacement{}, &ClusterResourcePlacementList{}, &ResourcePlacement{}, &ResourcePlacementList{}, &ClusterResourcePlacementStatus{}, &ClusterResourcePlacementStatusList{}) } diff --git a/apis/placement/v1beta1/commons.go b/apis/placement/v1beta1/commons.go index 49bc1683a..479217dcb 100644 --- a/apis/placement/v1beta1/commons.go +++ b/apis/placement/v1beta1/commons.go @@ -58,32 +58,32 @@ const ( ) const ( - // fleetPrefix is the prefix used for official fleet labels/annotations. + // FleetPrefix is the prefix used for official fleet labels/annotations. // Unprefixed labels/annotations are reserved for end-users // See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#label-selector-and-annotation-conventions - fleetPrefix = "kubernetes-fleet.io/" + FleetPrefix = "kubernetes-fleet.io/" // MemberClusterFinalizer is used to make sure that we handle gc of all the member cluster resources on the hub cluster. - MemberClusterFinalizer = fleetPrefix + "membercluster-finalizer" + MemberClusterFinalizer = FleetPrefix + "membercluster-finalizer" // WorkFinalizer is used by the work generator to make sure that the binding is not deleted until the work objects // it generates are all deleted, or used by the work controller to make sure the work has been deleted in the member // cluster. - WorkFinalizer = fleetPrefix + "work-cleanup" + WorkFinalizer = FleetPrefix + "work-cleanup" // ClusterResourcePlacementStatusCleanupFinalizer is a finalizer added by the controller to all ClusterResourcePlacementStatus objects, to make sure // that the controller can react to ClusterResourcePlacementStatus deletions if necessary. - ClusterResourcePlacementStatusCleanupFinalizer = fleetPrefix + "cluster-resource-placement-status-cleanup" + ClusterResourcePlacementStatusCleanupFinalizer = FleetPrefix + "cluster-resource-placement-status-cleanup" // PlacementTrackingLabel points to the placement that creates this resource binding. // TODO: migrate the label content to "parent-placement" to work with both the PR and CRP - PlacementTrackingLabel = fleetPrefix + "parent-CRP" + PlacementTrackingLabel = FleetPrefix + "parent-CRP" // IsLatestSnapshotLabel indicates if the snapshot is the latest one. - IsLatestSnapshotLabel = fleetPrefix + "is-latest-snapshot" + IsLatestSnapshotLabel = FleetPrefix + "is-latest-snapshot" // FleetResourceLabelKey indicates that the resource is a fleet resource. - FleetResourceLabelKey = fleetPrefix + "is-fleet-resource" + FleetResourceLabelKey = FleetPrefix + "is-fleet-resource" // FirstWorkNameFmt is the format of the name of the work generated with the first resource snapshot. // The name of the first work is {crpName}-work. @@ -105,59 +105,59 @@ const ( WorkNameWithEnvelopeCRFmt = "%s-envelope-%s" // ParentClusterResourceOverrideSnapshotHashAnnotation is the annotation to work that contains the hash of the parent cluster resource override snapshot list. - ParentClusterResourceOverrideSnapshotHashAnnotation = fleetPrefix + "parent-cluster-resource-override-snapshot-hash" + ParentClusterResourceOverrideSnapshotHashAnnotation = FleetPrefix + "parent-cluster-resource-override-snapshot-hash" // ParentResourceOverrideSnapshotHashAnnotation is the annotation to work that contains the hash of the parent resource override snapshot list. - ParentResourceOverrideSnapshotHashAnnotation = fleetPrefix + "parent-resource-override-snapshot-hash" + ParentResourceOverrideSnapshotHashAnnotation = FleetPrefix + "parent-resource-override-snapshot-hash" // ParentResourceSnapshotNameAnnotation is the annotation applied to work that contains the name of the master resource snapshot that generates the work. - ParentResourceSnapshotNameAnnotation = fleetPrefix + "parent-resource-snapshot-name" + ParentResourceSnapshotNameAnnotation = FleetPrefix + "parent-resource-snapshot-name" // ParentResourceSnapshotIndexLabel is the label applied to work that contains the index of the resource snapshot that generates the work. - ParentResourceSnapshotIndexLabel = fleetPrefix + "parent-resource-snapshot-index" + ParentResourceSnapshotIndexLabel = FleetPrefix + "parent-resource-snapshot-index" // ParentBindingLabel is the label applied to work that contains the name of the binding that generates the work. - ParentBindingLabel = fleetPrefix + "parent-resource-binding" + ParentBindingLabel = FleetPrefix + "parent-resource-binding" // ParentNamespaceLabel is the label applied to work that contains the namespace of the binding that generates the work. - ParentNamespaceLabel = fleetPrefix + "parent-placement-namespace" + ParentNamespaceLabel = FleetPrefix + "parent-placement-namespace" // CRPGenerationAnnotation indicates the generation of the placement from which an object is derived or last updated. // TODO: rename this variable - CRPGenerationAnnotation = fleetPrefix + "CRP-generation" + CRPGenerationAnnotation = FleetPrefix + "CRP-generation" // EnvelopeConfigMapAnnotation indicates the configmap is an envelope configmap containing resources we need to apply to the member cluster instead of the configMap itself. - EnvelopeConfigMapAnnotation = fleetPrefix + "envelope-configmap" + EnvelopeConfigMapAnnotation = FleetPrefix + "envelope-configmap" // EnvelopeTypeLabel marks the work object as generated from an envelope object. // The value of the annotation is the type of the envelope object. - EnvelopeTypeLabel = fleetPrefix + "envelope-work" + EnvelopeTypeLabel = FleetPrefix + "envelope-work" // EnvelopeNamespaceLabel contains the namespace of the envelope object that the work is generated from. - EnvelopeNamespaceLabel = fleetPrefix + "envelope-namespace" + EnvelopeNamespaceLabel = FleetPrefix + "envelope-namespace" // EnvelopeNameLabel contains the name of the envelope object that the work is generated from. - EnvelopeNameLabel = fleetPrefix + "envelope-name" + EnvelopeNameLabel = FleetPrefix + "envelope-name" // PreviousBindingStateAnnotation records the previous state of a binding. // This is used to remember if an "unscheduled" binding was moved from a "bound" state or a "scheduled" state. - PreviousBindingStateAnnotation = fleetPrefix + "previous-binding-state" + PreviousBindingStateAnnotation = FleetPrefix + "previous-binding-state" // ClusterStagedUpdateRunFinalizer is used by the ClusterStagedUpdateRun controller to make sure that the ClusterStagedUpdateRun // object is not deleted until all its dependent resources are deleted. - ClusterStagedUpdateRunFinalizer = fleetPrefix + "stagedupdaterun-finalizer" + ClusterStagedUpdateRunFinalizer = FleetPrefix + "stagedupdaterun-finalizer" // TargetUpdateRunLabel indicates the target update run on a staged run related object. - TargetUpdateRunLabel = fleetPrefix + "targetupdaterun" + TargetUpdateRunLabel = FleetPrefix + "targetupdaterun" // UpdateRunDeleteStageName is the name of delete stage in the staged update run. - UpdateRunDeleteStageName = fleetPrefix + "deleteStage" + UpdateRunDeleteStageName = FleetPrefix + "deleteStage" // IsLatestUpdateRunApprovalLabel indicates if the approval is the latest approval on a staged run. - IsLatestUpdateRunApprovalLabel = fleetPrefix + "isLatestUpdateRunApproval" + IsLatestUpdateRunApprovalLabel = FleetPrefix + "isLatestUpdateRunApproval" // TargetUpdatingStageNameLabel indicates the updating stage name on a staged run related object. - TargetUpdatingStageNameLabel = fleetPrefix + "targetUpdatingStage" + TargetUpdatingStageNameLabel = FleetPrefix + "targetUpdatingStage" // ApprovalTaskNameFmt is the format of the approval task name. ApprovalTaskNameFmt = "%s-%s" diff --git a/apis/placement/v1beta1/override_types.go b/apis/placement/v1beta1/override_types.go index 2245219da..9c5c79495 100644 --- a/apis/placement/v1beta1/override_types.go +++ b/apis/placement/v1beta1/override_types.go @@ -44,6 +44,7 @@ type ClusterResourceOverride struct { // The ClusterResourceOverride create or update will fail when the resource has been selected by the existing ClusterResourceOverride. // If the resource is selected by both ClusterResourceOverride and ResourceOverride, ResourceOverride will win when resolving // conflicts. +// +kubebuilder:validation:XValidation:rule="(has(oldSelf.placement) && has(self.placement) && oldSelf.placement == self.placement) || (!has(oldSelf.placement) && !has(self.placement))",message="The placement field is immutable" type ClusterResourceOverrideSpec struct { // Placement defines whether the override is applied to a specific placement or not. // If set, the override will trigger the placement rollout immediately when the rollout strategy type is RollingUpdate. @@ -61,7 +62,7 @@ type ClusterResourceOverrideSpec struct { // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=20 // +required - ClusterResourceSelectors []ClusterResourceSelector `json:"clusterResourceSelectors"` + ClusterResourceSelectors []ResourceSelectorTerm `json:"clusterResourceSelectors"` // Policy defines how to override the selected resources on the target clusters. // +required @@ -167,6 +168,7 @@ type ResourceOverride struct { // The ResourceOverride create or update will fail when the resource has been selected by the existing ResourceOverride. // If the resource is selected by both ClusterResourceOverride and ResourceOverride, ResourceOverride will win when resolving // conflicts. +// +kubebuilder:validation:XValidation:rule="(has(oldSelf.placement) && has(self.placement) && oldSelf.placement == self.placement) || (!has(oldSelf.placement) && !has(self.placement))",message="The placement field is immutable" type ResourceOverrideSpec struct { // Placement defines whether the override is applied to a specific placement or not. // If set, the override will trigger the placement rollout immediately when the rollout strategy type is RollingUpdate. diff --git a/apis/placement/v1beta1/overridesnapshot_types.go b/apis/placement/v1beta1/overridesnapshot_types.go index aec163412..00dc8b470 100644 --- a/apis/placement/v1beta1/overridesnapshot_types.go +++ b/apis/placement/v1beta1/overridesnapshot_types.go @@ -21,17 +21,17 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" const ( // OverrideIndexLabel is the label that indicate the policy snapshot index of a cluster policy. - OverrideIndexLabel = fleetPrefix + "override-index" + OverrideIndexLabel = FleetPrefix + "override-index" // OverrideSnapshotNameFmt is clusterResourceOverrideSnapshot name format: {CROName}-{OverrideSnapshotIndex}. OverrideSnapshotNameFmt = "%s-%d" // OverrideTrackingLabel is the label that points to the cluster resource override that creates a resource snapshot. - OverrideTrackingLabel = fleetPrefix + "parent-resource-override" + OverrideTrackingLabel = FleetPrefix + "parent-resource-override" // OverrideFinalizer is a finalizer added by the override controllers to all override, to make sure // that the override controller can react to override deletions if necessary. - OverrideFinalizer = fleetPrefix + "override-cleanup" + OverrideFinalizer = FleetPrefix + "override-cleanup" ) // +genclient diff --git a/apis/placement/v1beta1/policysnapshot_types.go b/apis/placement/v1beta1/policysnapshot_types.go index c409c068f..bbb94b5bf 100644 --- a/apis/placement/v1beta1/policysnapshot_types.go +++ b/apis/placement/v1beta1/policysnapshot_types.go @@ -26,13 +26,13 @@ import ( const ( // PolicyIndexLabel is the label that indicate the policy snapshot index of a cluster policy. - PolicyIndexLabel = fleetPrefix + "policy-index" + PolicyIndexLabel = FleetPrefix + "policy-index" // PolicySnapshotNameFmt is clusterPolicySnapshot name format: {CRPName}-{PolicySnapshotIndex}. PolicySnapshotNameFmt = "%s-%d" // NumberOfClustersAnnotation is the annotation that indicates how many clusters should be selected for selectN placement type. - NumberOfClustersAnnotation = fleetPrefix + "number-of-clusters" + NumberOfClustersAnnotation = FleetPrefix + "number-of-clusters" ) // make sure the PolicySnapshotObj and PolicySnapshotList interfaces are implemented by the @@ -153,7 +153,7 @@ type SchedulingPolicySnapshotStatus struct { // +patchMergeKey=type // +patchStrategy=merge - // ObservedCRPGeneration is the generation of the CRP which the scheduler uses to perform + // ObservedCRPGeneration is the generation of the resource placement which the scheduler uses to perform // the scheduling cycle and prepare the scheduling status. // +required ObservedCRPGeneration int64 `json:"observedCRPGeneration"` diff --git a/apis/placement/v1beta1/resourcesnapshot_types.go b/apis/placement/v1beta1/resourcesnapshot_types.go index 9cfd91094..c5d616eb6 100644 --- a/apis/placement/v1beta1/resourcesnapshot_types.go +++ b/apis/placement/v1beta1/resourcesnapshot_types.go @@ -27,23 +27,23 @@ import ( const ( // ResourceIndexLabel is the label that indicate the resource snapshot index of a cluster resource snapshot. - ResourceIndexLabel = fleetPrefix + "resource-index" + ResourceIndexLabel = FleetPrefix + "resource-index" // ResourceGroupHashAnnotation is the annotation that contains the value of the sha-256 hash // value of all the snapshots belong to the same snapshot index. - ResourceGroupHashAnnotation = fleetPrefix + "resource-hash" + ResourceGroupHashAnnotation = FleetPrefix + "resource-hash" // NumberOfEnvelopedObjectsAnnotation is the annotation that contains the number of the enveloped objects in the resource snapshot group. - NumberOfEnvelopedObjectsAnnotation = fleetPrefix + "number-of-enveloped-object" + NumberOfEnvelopedObjectsAnnotation = FleetPrefix + "number-of-enveloped-object" // NumberOfResourceSnapshotsAnnotation is the annotation that contains the total number of resource snapshots. - NumberOfResourceSnapshotsAnnotation = fleetPrefix + "number-of-resource-snapshots" + NumberOfResourceSnapshotsAnnotation = FleetPrefix + "number-of-resource-snapshots" // SubindexOfResourceSnapshotAnnotation is the annotation to store the subindex of resource snapshot in the group. - SubindexOfResourceSnapshotAnnotation = fleetPrefix + "subindex-of-resource-snapshot" + SubindexOfResourceSnapshotAnnotation = FleetPrefix + "subindex-of-resource-snapshot" // NextResourceSnapshotCandidateDetectionTimeAnnotation is the annotation to store the time of next resourceSnapshot candidate detected by the controller. - NextResourceSnapshotCandidateDetectionTimeAnnotation = fleetPrefix + "next-resource-snapshot-candidate-detection-time" + NextResourceSnapshotCandidateDetectionTimeAnnotation = FleetPrefix + "next-resource-snapshot-candidate-detection-time" // ResourceSnapshotNameFmt is resourcePolicySnapshot name format: {CRPName}-{resourceIndex}-snapshot. ResourceSnapshotNameFmt = "%s-%d-snapshot" diff --git a/apis/placement/v1beta1/work_types.go b/apis/placement/v1beta1/work_types.go index d1339ac7b..4781e72ff 100644 --- a/apis/placement/v1beta1/work_types.go +++ b/apis/placement/v1beta1/work_types.go @@ -40,10 +40,10 @@ import ( // The following definitions are originally declared in the controllers/workv1alpha1/manager.go file. const ( // ManifestHashAnnotation is the annotation that indicates whether the spec of the object has been changed or not. - ManifestHashAnnotation = fleetPrefix + "spec-hash" + ManifestHashAnnotation = FleetPrefix + "spec-hash" // LastAppliedConfigAnnotation is to record the last applied configuration on the object. - LastAppliedConfigAnnotation = fleetPrefix + "last-applied-configuration" + LastAppliedConfigAnnotation = FleetPrefix + "last-applied-configuration" // WorkConditionTypeApplied represents workload in Work is applied successfully on the spoke cluster. WorkConditionTypeApplied = "Applied" diff --git a/apis/placement/v1beta1/zz_generated.deepcopy.go b/apis/placement/v1beta1/zz_generated.deepcopy.go index 72b50ef34..05fef4c3d 100644 --- a/apis/placement/v1beta1/zz_generated.deepcopy.go +++ b/apis/placement/v1beta1/zz_generated.deepcopy.go @@ -631,7 +631,7 @@ func (in *ClusterResourceOverrideSpec) DeepCopyInto(out *ClusterResourceOverride } if in.ClusterResourceSelectors != nil { in, out := &in.ClusterResourceSelectors, &out.ClusterResourceSelectors - *out = make([]ClusterResourceSelector, len(*in)) + *out = make([]ResourceSelectorTerm, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -834,7 +834,8 @@ func (in *ClusterResourcePlacementStatus) DeepCopyInto(out *ClusterResourcePlace *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Status.DeepCopyInto(&out.Status) + in.PlacementStatus.DeepCopyInto(&out.PlacementStatus) + in.LastUpdatedTime.DeepCopyInto(&out.LastUpdatedTime) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourcePlacementStatus. @@ -887,26 +888,6 @@ func (in *ClusterResourcePlacementStatusList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterResourceSelector) DeepCopyInto(out *ClusterResourceSelector) { - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceSelector. -func (in *ClusterResourceSelector) DeepCopy() *ClusterResourceSelector { - if in == nil { - return nil - } - out := new(ClusterResourceSelector) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterResourceSnapshot) DeepCopyInto(out *ClusterResourceSnapshot) { *out = *in @@ -1542,6 +1523,59 @@ func (in *PatchDetail) DeepCopy() *PatchDetail { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerClusterPlacementStatus) DeepCopyInto(out *PerClusterPlacementStatus) { + *out = *in + if in.ApplicableResourceOverrides != nil { + in, out := &in.ApplicableResourceOverrides, &out.ApplicableResourceOverrides + *out = make([]NamespacedName, len(*in)) + copy(*out, *in) + } + if in.ApplicableClusterResourceOverrides != nil { + in, out := &in.ApplicableClusterResourceOverrides, &out.ApplicableClusterResourceOverrides + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.FailedPlacements != nil { + in, out := &in.FailedPlacements, &out.FailedPlacements + *out = make([]FailedResourcePlacement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DriftedPlacements != nil { + in, out := &in.DriftedPlacements, &out.DriftedPlacements + *out = make([]DriftedResourcePlacement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiffedPlacements != nil { + in, out := &in.DiffedPlacements, &out.DiffedPlacements + *out = make([]DiffedResourcePlacement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerClusterPlacementStatus. +func (in *PerClusterPlacementStatus) DeepCopy() *PerClusterPlacementStatus { + if in == nil { + return nil + } + out := new(PerClusterPlacementStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlacementDisruptionBudgetSpec) DeepCopyInto(out *PlacementDisruptionBudgetSpec) { *out = *in @@ -1666,7 +1700,7 @@ func (in *PlacementSpec) DeepCopyInto(out *PlacementSpec) { *out = *in if in.ResourceSelectors != nil { in, out := &in.ResourceSelectors, &out.ResourceSelectors - *out = make([]ClusterResourceSelector, len(*in)) + *out = make([]ResourceSelectorTerm, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -2263,69 +2297,36 @@ func (in *ResourcePlacementList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PerClusterPlacementStatus) DeepCopyInto(out *PerClusterPlacementStatus) { +func (in *ResourceSelector) DeepCopyInto(out *ResourceSelector) { *out = *in - if in.ApplicableResourceOverrides != nil { - in, out := &in.ApplicableResourceOverrides, &out.ApplicableResourceOverrides - *out = make([]NamespacedName, len(*in)) - copy(*out, *in) - } - if in.ApplicableClusterResourceOverrides != nil { - in, out := &in.ApplicableClusterResourceOverrides, &out.ApplicableClusterResourceOverrides - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.FailedPlacements != nil { - in, out := &in.FailedPlacements, &out.FailedPlacements - *out = make([]FailedResourcePlacement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DriftedPlacements != nil { - in, out := &in.DriftedPlacements, &out.DriftedPlacements - *out = make([]DriftedResourcePlacement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DiffedPlacements != nil { - in, out := &in.DiffedPlacements, &out.DiffedPlacements - *out = make([]DiffedResourcePlacement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePlacementStatus. -func (in *PerClusterPlacementStatus) DeepCopy() *PerClusterPlacementStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSelector. +func (in *ResourceSelector) DeepCopy() *ResourceSelector { if in == nil { return nil } - out := new(PerClusterPlacementStatus) + out := new(ResourceSelector) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceSelector) DeepCopyInto(out *ResourceSelector) { +func (in *ResourceSelectorTerm) DeepCopyInto(out *ResourceSelectorTerm) { *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSelector. -func (in *ResourceSelector) DeepCopy() *ResourceSelector { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSelectorTerm. +func (in *ResourceSelectorTerm) DeepCopy() *ResourceSelectorTerm { if in == nil { return nil } - out := new(ResourceSelector) + out := new(ResourceSelectorTerm) in.DeepCopyInto(out) return out } diff --git a/cmd/hubagent/main.go b/cmd/hubagent/main.go index b94c5f526..572653796 100644 --- a/cmd/hubagent/main.go +++ b/cmd/hubagent/main.go @@ -85,9 +85,16 @@ func init() { // +kubebuilder:scaffold:scheme klog.InitFlags(nil) - metrics.Registry.MustRegister(fleetmetrics.JoinResultMetrics, fleetmetrics.LeaveResultMetrics, - fleetmetrics.PlacementApplyFailedCount, fleetmetrics.PlacementApplySucceedCount, - fleetmetrics.SchedulingCycleDurationMilliseconds, fleetmetrics.SchedulerActiveWorkers) + metrics.Registry.MustRegister( + fleetmetrics.JoinResultMetrics, + fleetmetrics.LeaveResultMetrics, + fleetmetrics.PlacementApplyFailedCount, + fleetmetrics.PlacementApplySucceedCount, + fleetmetrics.SchedulingCycleDurationMilliseconds, + fleetmetrics.SchedulerActiveWorkers, + fleetmetrics.FleetPlacementStatusLastTimeStampSeconds, + fleetmetrics.FleetEvictionStatus, + ) } func main() { diff --git a/cmd/hubagent/options/options.go b/cmd/hubagent/options/options.go index ed90921c2..2128aa7bb 100644 --- a/cmd/hubagent/options/options.go +++ b/cmd/hubagent/options/options.go @@ -98,6 +98,8 @@ type Options struct { EnableStagedUpdateRunAPIs bool // EnableEvictionAPIs enables to agents to watch the eviction and placement disruption budget CRs. EnableEvictionAPIs bool + // EnableResourcePlacement enables the agents to watch the ResourcePlacement APIs. + EnableResourcePlacement bool // EnablePprof enables the pprof profiling. EnablePprof bool // PprofPort is the port for pprof profiling. @@ -126,6 +128,7 @@ func NewOptions() *Options { EnableV1Alpha1APIs: false, EnableClusterInventoryAPIs: true, EnableStagedUpdateRunAPIs: true, + EnableResourcePlacement: true, EnablePprof: false, PprofPort: 6065, ResourceSnapshotCreationMinimumInterval: 30 * time.Second, @@ -173,6 +176,7 @@ func (o *Options) AddFlags(flags *flag.FlagSet) { flags.DurationVar(&o.ForceDeleteWaitTime.Duration, "force-delete-wait-time", 15*time.Minute, "The duration the hub agent waits before force deleting a member cluster.") flags.BoolVar(&o.EnableStagedUpdateRunAPIs, "enable-staged-update-run-apis", true, "If set, the agents will watch for the ClusterStagedUpdateRun APIs.") flags.BoolVar(&o.EnableEvictionAPIs, "enable-eviction-apis", true, "If set, the agents will watch for the Eviction and PlacementDisruptionBudget APIs.") + flags.BoolVar(&o.EnableResourcePlacement, "enable-resource-placement", true, "If set, the agents will watch for the ResourcePlacement APIs.") flags.BoolVar(&o.EnablePprof, "enable-pprof", false, "If set, the pprof profiling is enabled.") flags.IntVar(&o.PprofPort, "pprof-port", 6065, "The port for pprof profiling.") flags.BoolVar(&o.DenyModifyMemberClusterLabels, "deny-modify-member-cluster-labels", false, "If set, users not in the system:masters cannot modify member cluster labels.") diff --git a/cmd/hubagent/workload/setup.go b/cmd/hubagent/workload/setup.go index 91e9931a3..5f2e428e8 100644 --- a/cmd/hubagent/workload/setup.go +++ b/cmd/hubagent/workload/setup.go @@ -35,16 +35,16 @@ import ( placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" fleetv1alpha1 "go.goms.io/fleet/apis/v1alpha1" "go.goms.io/fleet/cmd/hubagent/options" + "go.goms.io/fleet/pkg/controllers/bindingwatcher" "go.goms.io/fleet/pkg/controllers/clusterinventory/clusterprofile" - "go.goms.io/fleet/pkg/controllers/clusterresourcebindingwatcher" - "go.goms.io/fleet/pkg/controllers/clusterresourceplacement" "go.goms.io/fleet/pkg/controllers/clusterresourceplacementeviction" - "go.goms.io/fleet/pkg/controllers/clusterresourceplacementwatcher" - "go.goms.io/fleet/pkg/controllers/clusterschedulingpolicysnapshot" "go.goms.io/fleet/pkg/controllers/memberclusterplacement" "go.goms.io/fleet/pkg/controllers/overrider" + "go.goms.io/fleet/pkg/controllers/placement" + "go.goms.io/fleet/pkg/controllers/placementwatcher" "go.goms.io/fleet/pkg/controllers/resourcechange" "go.goms.io/fleet/pkg/controllers/rollout" + "go.goms.io/fleet/pkg/controllers/schedulingpolicysnapshot" "go.goms.io/fleet/pkg/controllers/updaterun" "go.goms.io/fleet/pkg/controllers/workgenerator" "go.goms.io/fleet/pkg/resourcewatcher" @@ -53,10 +53,10 @@ import ( "go.goms.io/fleet/pkg/scheduler/framework" "go.goms.io/fleet/pkg/scheduler/profile" "go.goms.io/fleet/pkg/scheduler/queue" - schedulercrbwatcher "go.goms.io/fleet/pkg/scheduler/watchers/clusterresourcebinding" - schedulercrpwatcher "go.goms.io/fleet/pkg/scheduler/watchers/clusterresourceplacement" - schedulercspswatcher "go.goms.io/fleet/pkg/scheduler/watchers/clusterschedulingpolicysnapshot" + schedulerbindingwatcher "go.goms.io/fleet/pkg/scheduler/watchers/binding" "go.goms.io/fleet/pkg/scheduler/watchers/membercluster" + schedulerplacementwatcher "go.goms.io/fleet/pkg/scheduler/watchers/placement" + schedulerspswatcher "go.goms.io/fleet/pkg/scheduler/watchers/schedulingpolicysnapshot" "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/pkg/utils/controller" "go.goms.io/fleet/pkg/utils/informer" @@ -67,6 +67,8 @@ const ( crpControllerName = "cluster-resource-placement-controller" crpControllerV1Alpha1Name = crpControllerName + "-v1alpha1" crpControllerV1Beta1Name = crpControllerName + "-v1beta1" + rpControllerName = "resource-placement-controller" + placementControllerName = "placement-controller" resourceChangeControllerName = "resource-change-controller" mcPlacementControllerName = "memberCluster-placement-controller" @@ -96,6 +98,14 @@ var ( placementv1beta1.GroupVersion.WithKind(placementv1beta1.ResourceOverrideSnapshotKind), } + // There's a prerequisite that v1Beta1RequiredGVKs must be installed too. + rpRequiredGVKs = []schema.GroupVersionKind{ + placementv1beta1.GroupVersion.WithKind(placementv1beta1.ResourcePlacementKind), + placementv1beta1.GroupVersion.WithKind(placementv1beta1.ResourceBindingKind), + placementv1beta1.GroupVersion.WithKind(placementv1beta1.ResourceSnapshotKind), + placementv1beta1.GroupVersion.WithKind(placementv1beta1.SchedulingPolicySnapshotKind), + } + clusterStagedUpdateRunGVKs = []schema.GroupVersionKind{ placementv1beta1.GroupVersion.WithKind(placementv1beta1.ClusterStagedUpdateRunKind), placementv1beta1.GroupVersion.WithKind(placementv1beta1.ClusterStagedUpdateStrategyKind), @@ -150,10 +160,10 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, validator.ResourceInformer = dynamicInformerManager // webhook needs this to check resource scope validator.RestMapper = mgr.GetRESTMapper() // webhook needs this to validate GVK of resource selector - // Set up a custom controller to reconcile cluster resource placement - crpc := &clusterresourceplacement.Reconciler{ + // Set up a custom controller to reconcile placement objects + pc := &placement.Reconciler{ Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(crpControllerName), + Recorder: mgr.GetEventRecorderFor(placementControllerName), RestMapper: mgr.GetRESTMapper(), InformerManager: dynamicInformerManager, ResourceConfig: resourceConfig, @@ -167,6 +177,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, rateLimiter := options.DefaultControllerRateLimiter(opts.RateLimiterOpts) var clusterResourcePlacementControllerV1Alpha1 controller.Controller var clusterResourcePlacementControllerV1Beta1 controller.Controller + var resourcePlacementController controller.Controller var memberClusterPlacementController controller.Controller if opts.EnableV1Alpha1APIs { for _, gvk := range v1Alpha1RequiredGVKs { @@ -176,7 +187,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } } klog.Info("Setting up clusterResourcePlacement v1alpha1 controller") - clusterResourcePlacementControllerV1Alpha1 = controller.NewController(crpControllerV1Alpha1Name, controller.NamespaceKeyFunc, crpc.ReconcileV1Alpha1, rateLimiter) + clusterResourcePlacementControllerV1Alpha1 = controller.NewController(crpControllerV1Alpha1Name, controller.NamespaceKeyFunc, pc.ReconcileV1Alpha1, rateLimiter) klog.Info("Setting up member cluster change controller") mcp := &memberclusterplacement.Reconciler{ InformerManager: dynamicInformerManager, @@ -193,9 +204,9 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } } klog.Info("Setting up clusterResourcePlacement v1beta1 controller") - clusterResourcePlacementControllerV1Beta1 = controller.NewController(crpControllerV1Beta1Name, controller.NamespaceKeyFunc, crpc.Reconcile, rateLimiter) + clusterResourcePlacementControllerV1Beta1 = controller.NewController(crpControllerV1Beta1Name, controller.NamespaceKeyFunc, pc.Reconcile, rateLimiter) klog.Info("Setting up clusterResourcePlacement watcher") - if err := (&clusterresourceplacementwatcher.Reconciler{ + if err := (&placementwatcher.Reconciler{ PlacementController: clusterResourcePlacementControllerV1Beta1, }).SetupWithManagerForClusterResourcePlacement(mgr); err != nil { klog.ErrorS(err, "Unable to set up the clusterResourcePlacement watcher") @@ -203,7 +214,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } klog.Info("Setting up clusterResourceBinding watcher") - if err := (&clusterresourcebindingwatcher.Reconciler{ + if err := (&bindingwatcher.Reconciler{ PlacementController: clusterResourcePlacementControllerV1Beta1, Client: mgr.GetClient(), }).SetupWithManagerForClusterResourceBinding(mgr); err != nil { @@ -212,7 +223,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } klog.Info("Setting up clusterSchedulingPolicySnapshot watcher") - if err := (&clusterschedulingpolicysnapshot.Reconciler{ + if err := (&schedulingpolicysnapshot.Reconciler{ Client: mgr.GetClient(), PlacementController: clusterResourcePlacementControllerV1Beta1, }).SetupWithManagerForClusterSchedulingPolicySnapshot(mgr); err != nil { @@ -220,7 +231,43 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, return err } - // Set up a new controller to do rollout resources according to CRP rollout strategy + if opts.EnableResourcePlacement { + for _, gvk := range rpRequiredGVKs { + if err = utils.CheckCRDInstalled(discoverClient, gvk); err != nil { + klog.ErrorS(err, "unable to find the required CRD", "GVK", gvk) + return err + } + } + klog.Info("Setting up resourcePlacement controller") + resourcePlacementController = controller.NewController(rpControllerName, controller.NamespaceKeyFunc, pc.Reconcile, rateLimiter) + klog.Info("Setting up resourcePlacement watcher") + if err := (&placementwatcher.Reconciler{ + PlacementController: resourcePlacementController, + }).SetupWithManagerForResourcePlacement(mgr); err != nil { + klog.ErrorS(err, "Unable to set up the resourcePlacement watcher") + return err + } + + klog.Info("Setting up resourceBinding watcher") + if err := (&bindingwatcher.Reconciler{ + PlacementController: resourcePlacementController, + Client: mgr.GetClient(), + }).SetupWithManagerForResourceBinding(mgr); err != nil { + klog.ErrorS(err, "Unable to set up the resourceBinding watcher") + return err + } + + klog.Info("Setting up schedulingPolicySnapshot watcher") + if err := (&schedulingpolicysnapshot.Reconciler{ + Client: mgr.GetClient(), + PlacementController: resourcePlacementController, + }).SetupWithManagerForSchedulingPolicySnapshot(mgr); err != nil { + klog.ErrorS(err, "Unable to set up the schedulingPolicySnapshot watcher") + return err + } + } + + // Set up a new controller to do rollout resources according to CRP/RP rollout strategy klog.Info("Setting up rollout controller") if err := (&rollout.Reconciler{ Client: mgr.GetClient(), @@ -228,10 +275,22 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, MaxConcurrentReconciles: int(math.Ceil(float64(opts.MaxFleetSizeSupported)/30) * math.Ceil(float64(opts.MaxConcurrentClusterPlacement)/10)), InformerManager: dynamicInformerManager, }).SetupWithManagerForClusterResourcePlacement(mgr); err != nil { - klog.ErrorS(err, "Unable to set up rollout controller") + klog.ErrorS(err, "Unable to set up rollout controller for clusterResourcePlacement") return err } + if opts.EnableResourcePlacement { + if err := (&rollout.Reconciler{ + Client: mgr.GetClient(), + UncachedReader: mgr.GetAPIReader(), + MaxConcurrentReconciles: int(math.Ceil(float64(opts.MaxFleetSizeSupported)/30) * math.Ceil(float64(opts.MaxConcurrentClusterPlacement)/10)), + InformerManager: dynamicInformerManager, + }).SetupWithManagerForResourcePlacement(mgr); err != nil { + klog.ErrorS(err, "Unable to set up rollout controller for resourcePlacement") + return err + } + } + if opts.EnableEvictionAPIs { for _, gvk := range evictionGVKs { if err = utils.CheckCRDInstalled(discoverClient, gvk); err != nil { @@ -274,10 +333,21 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, MaxConcurrentReconciles: int(math.Ceil(float64(opts.MaxFleetSizeSupported)/10) * math.Ceil(float64(opts.MaxConcurrentClusterPlacement)/10)), InformerManager: dynamicInformerManager, }).SetupWithManagerForClusterResourceBinding(mgr); err != nil { - klog.ErrorS(err, "Unable to set up work generator") + klog.ErrorS(err, "Unable to set up work generator for clusterResourceBinding") return err } + if opts.EnableResourcePlacement { + if err := (&workgenerator.Reconciler{ + Client: mgr.GetClient(), + MaxConcurrentReconciles: int(math.Ceil(float64(opts.MaxFleetSizeSupported)/10) * math.Ceil(float64(opts.MaxConcurrentClusterPlacement)/10)), + InformerManager: dynamicInformerManager, + }).SetupWithManagerForResourceBinding(mgr); err != nil { + klog.ErrorS(err, "Unable to set up work generator for resourceBinding") + return err + } + } + // Set up the scheduler klog.Info("Setting up scheduler") defaultProfile := profile.NewDefaultProfile() @@ -302,7 +372,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, // Set up the watchers for the controller klog.Info("Setting up the clusterResourcePlacement watcher for scheduler") - if err := (&schedulercrpwatcher.Reconciler{ + if err := (&schedulerplacementwatcher.Reconciler{ Client: mgr.GetClient(), SchedulerWorkQueue: defaultSchedulingQueue, }).SetupWithManagerForClusterResourcePlacement(mgr); err != nil { @@ -311,7 +381,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } klog.Info("Setting up the clusterSchedulingPolicySnapshot watcher for scheduler") - if err := (&schedulercspswatcher.Reconciler{ + if err := (&schedulerspswatcher.Reconciler{ Client: mgr.GetClient(), SchedulerWorkQueue: defaultSchedulingQueue, }).SetupWithManagerForClusterSchedulingPolicySnapshot(mgr); err != nil { @@ -320,7 +390,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } klog.Info("Setting up the clusterResourceBinding watcher for scheduler") - if err := (&schedulercrbwatcher.Reconciler{ + if err := (&schedulerbindingwatcher.Reconciler{ Client: mgr.GetClient(), SchedulerWorkQueue: defaultSchedulingQueue, }).SetupWithManagerForClusterResourceBinding(mgr); err != nil { @@ -328,11 +398,41 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, return err } + if opts.EnableResourcePlacement { + klog.Info("Setting up the resourcePlacement watcher for scheduler") + if err := (&schedulerplacementwatcher.Reconciler{ + Client: mgr.GetClient(), + SchedulerWorkQueue: defaultSchedulingQueue, + }).SetupWithManagerForResourcePlacement(mgr); err != nil { + klog.ErrorS(err, "Unable to set up resourcePlacement watcher for scheduler") + return err + } + + klog.Info("Setting up the schedulingPolicySnapshot watcher for scheduler") + if err := (&schedulerspswatcher.Reconciler{ + Client: mgr.GetClient(), + SchedulerWorkQueue: defaultSchedulingQueue, + }).SetupWithManagerForSchedulingPolicySnapshot(mgr); err != nil { + klog.ErrorS(err, "Unable to set up schedulingPolicySnapshot watcher for scheduler") + return err + } + + klog.Info("Setting up the resourceBinding watcher for scheduler") + if err := (&schedulerbindingwatcher.Reconciler{ + Client: mgr.GetClient(), + SchedulerWorkQueue: defaultSchedulingQueue, + }).SetupWithManagerForResourceBinding(mgr); err != nil { + klog.ErrorS(err, "Unable to set up resourceBinding watcher for scheduler") + return err + } + } + klog.Info("Setting up the memberCluster watcher for scheduler") if err := (&membercluster.Reconciler{ Client: mgr.GetClient(), SchedulerWorkQueue: defaultSchedulingQueue, ClusterEligibilityChecker: clustereligibilitychecker.New(), + EnableResourcePlacement: opts.EnableResourcePlacement, }).SetupWithManager(mgr); err != nil { klog.ErrorS(err, "Unable to set up memberCluster watcher for scheduler") return err @@ -388,6 +488,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, InformerManager: dynamicInformerManager, PlacementControllerV1Alpha1: clusterResourcePlacementControllerV1Alpha1, PlacementControllerV1Beta1: clusterResourcePlacementControllerV1Beta1, + ResourcePlacementController: resourcePlacementController, } resourceChangeController := controller.NewController(resourceChangeControllerName, controller.ClusterWideKeyFunc, rcr.Reconcile, rateLimiter) @@ -397,7 +498,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, RESTMapper: mgr.GetRESTMapper(), ClusterResourcePlacementControllerV1Alpha1: clusterResourcePlacementControllerV1Alpha1, ClusterResourcePlacementControllerV1Beta1: clusterResourcePlacementControllerV1Beta1, - ResourcePlacementController: nil, // TODO: need to enable the resource placement controller when ready + ResourcePlacementController: resourcePlacementController, ResourceChangeController: resourceChangeController, MemberClusterPlacementController: memberClusterPlacementController, InformerManager: dynamicInformerManager, diff --git a/cmd/memberagent/main.go b/cmd/memberagent/main.go index 6ea7e2023..b927fc216 100644 --- a/cmd/memberagent/main.go +++ b/cmd/memberagent/main.go @@ -99,7 +99,7 @@ var ( workApplierRequeueRateLimiterExponentialBaseForSlowBackoff = flag.Float64("work-applier-requeue-rate-limiter-exponential-base-for-slow-backoff", 1.2, "If set, the work applier will start to back off slowly at this factor after it finished requeueing with fixed delays, until it reaches the slow backoff delay cap. Its value should be larger than 1.0 and no larger than 100.0") workApplierRequeueRateLimiterInitialSlowBackoffDelaySeconds = flag.Float64("work-applier-requeue-rate-limiter-initial-slow-backoff-delay-seconds", 2, "If set, the work applier will start to back off slowly at this delay in seconds.") workApplierRequeueRateLimiterMaxSlowBackoffDelaySeconds = flag.Float64("work-applier-requeue-rate-limiter-max-slow-backoff-delay-seconds", 15, "If set, the work applier will not back off longer than this value in seconds when it is in the slow backoff stage.") - workApplierRequeueRateLimiterExponentialBaseForFastBackoff = flag.Float64("work-applier-requeue-rate-limiter-exponential-base-for-fast-backoff", 1.2, "If set, the work applier will start to back off fast at this factor after it completes the slow backoff stage, until it reaches the fast backoff delay cap. Its value should be larger than the base value for the slow backoff stage.") + workApplierRequeueRateLimiterExponentialBaseForFastBackoff = flag.Float64("work-applier-requeue-rate-limiter-exponential-base-for-fast-backoff", 1.5, "If set, the work applier will start to back off fast at this factor after it completes the slow backoff stage, until it reaches the fast backoff delay cap. Its value should be larger than the base value for the slow backoff stage.") workApplierRequeueRateLimiterMaxFastBackoffDelaySeconds = flag.Float64("work-applier-requeue-rate-limiter-max-fast-backoff-delay-seconds", 900, "If set, the work applier will not back off longer than this value in seconds when it is in the fast backoff stage.") workApplierRequeueRateLimiterSkipToFastBackoffForAvailableOrDiffReportedWorkObjs = flag.Bool("work-applier-requeue-rate-limiter-skip-to-fast-backoff-for-available-or-diff-reported-work-objs", true, "If set, the rate limiter will skip the slow backoff stage and start fast backoff immediately for work objects that are available or have diff reported.") ) diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml index dbeb5b4ca..5c92a300d 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml @@ -137,11 +137,24 @@ spec: The recommended way is to set the placement so that the override can be rolled out immediately. properties: name: - description: Name is the reference to the name of placement. + type: string + scope: + default: Cluster + description: |- + Scope defines the scope of the placement. + A clusterResourceOverride can only reference a clusterResourcePlacement (cluster-scoped), + and a resourceOverride can reference either a clusterResourcePlacement or resourcePlacement (namespaced). + The referenced resourcePlacement must be in the same namespace as the resourceOverride. + enum: + - Cluster + - Namespaced type: string required: - name type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: self == oldSelf policy: description: Policy defines how to override the selected resources on the target clusters. @@ -363,6 +376,10 @@ spec: - clusterResourceSelectors - policy type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - spec type: object @@ -404,22 +421,22 @@ spec: We only support Name selector for now. items: description: |- - ClusterResourceSelector is used to select resources as the target resources to be placed. + ResourceSelectorTerm is used to select resources as the target resources to be placed. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: description: |- - Group name of the cluster-scoped resource. + Group name of the be selected resource. Use an empty string to select resources under the core API group (e.g., namespaces). type: string kind: description: |- - Kind of the cluster-scoped resource. + Kind of the to be selected resource. Note: When `Kind` is `namespace`, by default ALL the resources under the selected namespaces are selected. type: string labelSelector: description: |- - A label query over all the cluster-scoped resources. Resources matching the query are selected. + A label query over all the be selected resources. Resources matching the query are selected. Note that namespace-scoped resources can't be selected even if they match the query. properties: matchExpressions: @@ -466,7 +483,7 @@ spec: type: object x-kubernetes-map-type: atomic name: - description: Name of the cluster-scoped resource. + description: Name of the be selected resource. type: string selectionScope: default: NamespaceWithResources @@ -477,7 +494,7 @@ spec: - NamespaceWithResources type: string version: - description: Version of the cluster-scoped resource. + description: Version of the to be selected resource. type: string required: - group @@ -777,22 +794,22 @@ spec: We only support Name selector for now. items: description: |- - ClusterResourceSelector is used to select resources as the target resources to be placed. + ResourceSelectorTerm is used to select resources as the target resources to be placed. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: description: |- - Group name of the cluster-scoped resource. + Group name of the be selected resource. Use an empty string to select resources under the core API group (e.g., namespaces). type: string kind: description: |- - Kind of the cluster-scoped resource. + Kind of the to be selected resource. Note: When `Kind` is `namespace`, by default ALL the resources under the selected namespaces are selected. type: string labelSelector: description: |- - A label query over all the cluster-scoped resources. Resources matching the query are selected. + A label query over all the be selected resources. Resources matching the query are selected. Note that namespace-scoped resources can't be selected even if they match the query. properties: matchExpressions: @@ -839,7 +856,7 @@ spec: type: object x-kubernetes-map-type: atomic name: - description: Name of the cluster-scoped resource. + description: Name of the be selected resource. type: string selectionScope: default: NamespaceWithResources @@ -850,7 +867,7 @@ spec: - NamespaceWithResources type: string version: - description: Version of the cluster-scoped resource. + description: Version of the to be selected resource. type: string required: - group @@ -1105,6 +1122,10 @@ spec: - clusterResourceSelectors - policy type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - spec type: object diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml index 8cf473808..d8289eec7 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml @@ -151,11 +151,24 @@ spec: The recommended way is to set the placement so that the override can be rolled out immediately. properties: name: - description: Name is the reference to the name of placement. + type: string + scope: + default: Cluster + description: |- + Scope defines the scope of the placement. + A clusterResourceOverride can only reference a clusterResourcePlacement (cluster-scoped), + and a resourceOverride can reference either a clusterResourcePlacement or resourcePlacement (namespaced). + The referenced resourcePlacement must be in the same namespace as the resourceOverride. + enum: + - Cluster + - Namespaced type: string required: - name type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: self == oldSelf policy: description: Policy defines how to override the selected resources on the target clusters. @@ -377,6 +390,10 @@ spec: - clusterResourceSelectors - policy type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - overrideHash - overrideSpec @@ -436,22 +453,22 @@ spec: We only support Name selector for now. items: description: |- - ClusterResourceSelector is used to select resources as the target resources to be placed. + ResourceSelectorTerm is used to select resources as the target resources to be placed. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: description: |- - Group name of the cluster-scoped resource. + Group name of the be selected resource. Use an empty string to select resources under the core API group (e.g., namespaces). type: string kind: description: |- - Kind of the cluster-scoped resource. + Kind of the to be selected resource. Note: When `Kind` is `namespace`, by default ALL the resources under the selected namespaces are selected. type: string labelSelector: description: |- - A label query over all the cluster-scoped resources. Resources matching the query are selected. + A label query over all the be selected resources. Resources matching the query are selected. Note that namespace-scoped resources can't be selected even if they match the query. properties: matchExpressions: @@ -498,7 +515,7 @@ spec: type: object x-kubernetes-map-type: atomic name: - description: Name of the cluster-scoped resource. + description: Name of the be selected resource. type: string selectionScope: default: NamespaceWithResources @@ -509,7 +526,7 @@ spec: - NamespaceWithResources type: string version: - description: Version of the cluster-scoped resource. + description: Version of the to be selected resource. type: string required: - group @@ -823,22 +840,22 @@ spec: We only support Name selector for now. items: description: |- - ClusterResourceSelector is used to select resources as the target resources to be placed. + ResourceSelectorTerm is used to select resources as the target resources to be placed. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: description: |- - Group name of the cluster-scoped resource. + Group name of the be selected resource. Use an empty string to select resources under the core API group (e.g., namespaces). type: string kind: description: |- - Kind of the cluster-scoped resource. + Kind of the to be selected resource. Note: When `Kind` is `namespace`, by default ALL the resources under the selected namespaces are selected. type: string labelSelector: description: |- - A label query over all the cluster-scoped resources. Resources matching the query are selected. + A label query over all the be selected resources. Resources matching the query are selected. Note that namespace-scoped resources can't be selected even if they match the query. properties: matchExpressions: @@ -885,7 +902,7 @@ spec: type: object x-kubernetes-map-type: atomic name: - description: Name of the cluster-scoped resource. + description: Name of the be selected resource. type: string selectionScope: default: NamespaceWithResources @@ -896,7 +913,7 @@ spec: - NamespaceWithResources type: string version: - description: Version of the cluster-scoped resource. + description: Version of the to be selected resource. type: string required: - group @@ -1151,6 +1168,10 @@ spec: - clusterResourceSelectors - policy type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - overrideHash - overrideSpec diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml index 1d1c20c92..186898038 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml @@ -1589,22 +1589,22 @@ spec: You can have 1-100 selectors. items: description: |- - ClusterResourceSelector is used to select resources as the target resources to be placed. + ResourceSelectorTerm is used to select resources as the target resources to be placed. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: description: |- - Group name of the cluster-scoped resource. + Group name of the be selected resource. Use an empty string to select resources under the core API group (e.g., namespaces). type: string kind: description: |- - Kind of the cluster-scoped resource. + Kind of the to be selected resource. Note: When `Kind` is `namespace`, by default ALL the resources under the selected namespaces are selected. type: string labelSelector: description: |- - A label query over all the cluster-scoped resources. Resources matching the query are selected. + A label query over all the be selected resources. Resources matching the query are selected. Note that namespace-scoped resources can't be selected even if they match the query. properties: matchExpressions: @@ -1651,7 +1651,7 @@ spec: type: object x-kubernetes-map-type: atomic name: - description: Name of the cluster-scoped resource. + description: Name of the be selected resource. type: string selectionScope: default: NamespaceWithResources @@ -1662,7 +1662,7 @@ spec: - NamespaceWithResources type: string version: - description: Version of the cluster-scoped resource. + description: Version of the to be selected resource. type: string required: - group @@ -2022,6 +2022,9 @@ spec: resourceSelector with kind 'Namespace' is required rule: '!(self.statusReportingScope == ''NamespaceAccessible'' && size(self.resourceSelectors.filter(x, x.kind == ''Namespace'')) != 1)' + - message: statusReportingScope is immutable + rule: '!has(oldSelf.statusReportingScope) || self.statusReportingScope + == oldSelf.statusReportingScope' status: description: The observed status of ClusterResourcePlacement. properties: @@ -2105,7 +2108,7 @@ spec: type: string placementStatuses: description: |- - PlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. + PerClusterPlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. Each selected cluster according to the observed resource placement is guaranteed to have a corresponding placementStatuses. In the pickN case, there are N placement statuses where N = NumberOfClusters; Or in the pickFixed case, there are N placement statuses where N = ClusterNames. diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml index 20ed0c16a..b49d38120 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml @@ -19,355 +19,13 @@ spec: singular: clusterresourceplacementstatus scope: Namespaced versions: - - name: v1 - schema: - openAPIV3Schema: - description: ClusterResourcePlacementStatus defines the observed state of - the ClusterResourcePlacement object. - properties: - conditions: - description: Conditions is an array of current observed conditions for - ClusterResourcePlacement. - items: - description: Condition contains details for one aspect of the current - state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedResourceIndex: - description: |- - Resource index logically represents the generation of the selected resources. - We take a new snapshot of the selected resources whenever the selection or their content change. - Each snapshot has a different resource index. - One resource snapshot can contain multiple clusterResourceSnapshots CRs in order to store large amount of resources. - To get clusterResourceSnapshot of a given resource index, use the following command: - `kubectl get ClusterResourceSnapshot --selector=kubernetes-fleet.io/resource-index=$ObservedResourceIndex ` - ObservedResourceIndex is the resource index that the conditions in the ClusterResourcePlacementStatus observe. - For example, a condition of `ClusterResourcePlacementWorkSynchronized` type - is observing the synchronization status of the resource snapshot with the resource index $ObservedResourceIndex. - type: string - placementStatuses: - description: |- - PlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. - Each selected cluster according to the latest resource placement is guaranteed to have a corresponding placementStatuses. - In the pickN case, there are N placement statuses where N = NumberOfClusters; Or in the pickFixed case, there are - N placement statuses where N = ClusterNames. - In these cases, some of them may not have assigned clusters when we cannot fill the required number of clusters. - items: - description: ResourcePlacementStatus represents the placement status - of selected resources for one target cluster. - properties: - applicableClusterResourceOverrides: - description: |- - ApplicableClusterResourceOverrides contains a list of applicable ClusterResourceOverride snapshots associated with - the selected resources. - - This field is alpha-level and is for the override policy feature. - items: - type: string - type: array - applicableResourceOverrides: - description: |- - ApplicableResourceOverrides contains a list of applicable ResourceOverride snapshots associated with the selected - resources. - - This field is alpha-level and is for the override policy feature. - items: - description: NamespacedName comprises a resource name, with a - mandatory namespace. - properties: - name: - description: Name is the name of the namespaced scope resource. - type: string - namespace: - description: Namespace is namespace of the namespaced scope - resource. - type: string - required: - - name - - namespace - type: object - type: array - clusterName: - description: |- - ClusterName is the name of the cluster this resource is assigned to. - If it is not empty, its value should be unique cross all placement decisions for the Placement. - type: string - conditions: - description: Conditions is an array of current observed conditions - for ResourcePlacementStatus. - items: - description: Condition contains details for one aspect of the - current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - failedPlacements: - description: |- - FailedPlacements is a list of all the resources failed to be placed to the given cluster or the resource is unavailable. - Note that we only include 100 failed resource placements even if there are more than 100. - This field is only meaningful if the `ClusterName` is not empty. - items: - description: FailedResourcePlacement contains the failure details - of a failed resource placement. - properties: - condition: - description: The failed condition status. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - envelope: - description: Envelope identifies the envelope object that - contains this resource. - properties: - name: - description: Name of the envelope object. - type: string - namespace: - description: Namespace is the namespace of the envelope - object. Empty if the envelope object is cluster scoped. - type: string - type: - default: ConfigMap - description: Type of the envelope object. - enum: - - ConfigMap - type: string - required: - - name - type: object - group: - description: Group is the group name of the selected resource. - type: string - kind: - description: Kind represents the Kind of the selected resources. - type: string - name: - description: Name of the target resource. - type: string - namespace: - description: Namespace is the namespace of the resource. Empty - if the resource is cluster scoped. - type: string - version: - description: Version is the version of the selected resource. - type: string - required: - - condition - - kind - - name - - version - type: object - maxItems: 100 - type: array - type: object - type: array - selectedResources: - description: SelectedResources contains a list of resources selected by - ResourceSelectors. - items: - description: ResourceIdentifier identifies one Kubernetes resource. - properties: - envelope: - description: Envelope identifies the envelope object that contains - this resource. - properties: - name: - description: Name of the envelope object. - type: string - namespace: - description: Namespace is the namespace of the envelope object. - Empty if the envelope object is cluster scoped. - type: string - type: - default: ConfigMap - description: Type of the envelope object. - enum: - - ConfigMap - type: string - required: - - name - type: object - group: - description: Group is the group name of the selected resource. - type: string - kind: - description: Kind represents the Kind of the selected resources. - type: string - name: - description: Name of the target resource. - type: string - namespace: - description: Namespace is the namespace of the resource. Empty if - the resource is cluster scoped. - type: string - version: - description: Version is the version of the selected resource. - type: string - required: - - kind - - name - - version - type: object - type: array - type: object - served: true - storage: false - additionalPrinterColumns: - - jsonPath: .status.observedResourceIndex + - jsonPath: .sourceStatus.observedResourceIndex name: Resource-Index type: string + - jsonPath: .lastUpdatedTime + name: Last-Updated + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date @@ -377,6 +35,7 @@ spec: description: |- ClusterResourcePlacementStatus is a namespaced resource that mirrors the PlacementStatus of a corresponding ClusterResourcePlacement object. This allows namespace-scoped access to cluster-scoped placement status. + The LastUpdatedTime field is updated whenever the CRPS object is updated. This object will be created within the target namespace that contains resources being managed by the CRP. When multiple ClusterResourcePlacements target the same namespace, each ClusterResourcePlacementStatus within that @@ -400,13 +59,16 @@ spec: In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string + lastUpdatedTime: + description: |- + LastUpdatedTime is the timestamp when this CRPS object was last updated. + This field is set to the current time whenever the CRPS object is created or modified. + format: date-time + type: string metadata: type: object - status: - description: |- - The observed status of ClusterResourcePlacementStatus which mirrors the PlacementStatus of the corresponding ClusterResourcePlacement. - This includes information about the namespace and resources within that namespace that are being managed by the placement. - The status will show placement details for resources selected by the ClusterResourcePlacement's ResourceSelectors. + sourceStatus: + description: Source status copied from the corresponding ClusterResourcePlacement. properties: conditions: description: |- @@ -488,7 +150,7 @@ spec: type: string placementStatuses: description: |- - PlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. + PerClusterPlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. Each selected cluster according to the observed resource placement is guaranteed to have a corresponding placementStatuses. In the pickN case, there are N placement statuses where N = NumberOfClusters; Or in the pickFixed case, there are N placement statuses where N = ClusterNames. @@ -1005,8 +667,10 @@ spec: type: object type: array type: object + required: + - lastUpdatedTime + - sourceStatus type: object served: true storage: true - subresources: - status: {} + subresources: {} diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterschedulingpolicysnapshots.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterschedulingpolicysnapshots.yaml index fe9057f48..ffa988280 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterschedulingpolicysnapshots.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterschedulingpolicysnapshots.yaml @@ -1187,7 +1187,7 @@ spec: x-kubernetes-list-type: map observedCRPGeneration: description: |- - ObservedCRPGeneration is the generation of the CRP which the scheduler uses to perform + ObservedCRPGeneration is the generation of the resource placement which the scheduler uses to perform the scheduling cycle and prepare the scheduling status. format: int64 type: integer diff --git a/config/crd/bases/placement.kubernetes-fleet.io_resourceoverrides.yaml b/config/crd/bases/placement.kubernetes-fleet.io_resourceoverrides.yaml index ffd438cc0..dc538218c 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_resourceoverrides.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_resourceoverrides.yaml @@ -52,7 +52,17 @@ spec: The recommended way is to set the placement so that the override can be rolled out immediately. properties: name: - description: Name is the reference to the name of placement. + type: string + scope: + default: Cluster + description: |- + Scope defines the scope of the placement. + A clusterResourceOverride can only reference a clusterResourcePlacement (cluster-scoped), + and a resourceOverride can reference either a clusterResourcePlacement or resourcePlacement (namespaced). + The referenced resourcePlacement must be in the same namespace as the resourceOverride. + enum: + - Cluster + - Namespaced type: string required: - name @@ -311,6 +321,10 @@ spec: - policy - resourceSelectors type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - spec type: object @@ -931,6 +945,10 @@ spec: - policy - resourceSelectors type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - spec type: object diff --git a/config/crd/bases/placement.kubernetes-fleet.io_resourceoverridesnapshots.yaml b/config/crd/bases/placement.kubernetes-fleet.io_resourceoverridesnapshots.yaml index dc4e56c14..0d5279618 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_resourceoverridesnapshots.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_resourceoverridesnapshots.yaml @@ -66,7 +66,17 @@ spec: The recommended way is to set the placement so that the override can be rolled out immediately. properties: name: - description: Name is the reference to the name of placement. + type: string + scope: + default: Cluster + description: |- + Scope defines the scope of the placement. + A clusterResourceOverride can only reference a clusterResourcePlacement (cluster-scoped), + and a resourceOverride can reference either a clusterResourcePlacement or resourcePlacement (namespaced). + The referenced resourcePlacement must be in the same namespace as the resourceOverride. + enum: + - Cluster + - Namespaced type: string required: - name @@ -325,6 +335,10 @@ spec: - policy - resourceSelectors type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - overrideHash - overrideSpec @@ -981,6 +995,10 @@ spec: - policy - resourceSelectors type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - overrideHash - overrideSpec diff --git a/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml b/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml index 07f092ef7..743ad2355 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml @@ -524,22 +524,22 @@ spec: You can have 1-100 selectors. items: description: |- - ClusterResourceSelector is used to select resources as the target resources to be placed. + ResourceSelectorTerm is used to select resources as the target resources to be placed. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: description: |- - Group name of the cluster-scoped resource. + Group name of the be selected resource. Use an empty string to select resources under the core API group (e.g., namespaces). type: string kind: description: |- - Kind of the cluster-scoped resource. + Kind of the to be selected resource. Note: When `Kind` is `namespace`, by default ALL the resources under the selected namespaces are selected. type: string labelSelector: description: |- - A label query over all the cluster-scoped resources. Resources matching the query are selected. + A label query over all the be selected resources. Resources matching the query are selected. Note that namespace-scoped resources can't be selected even if they match the query. properties: matchExpressions: @@ -586,7 +586,7 @@ spec: type: object x-kubernetes-map-type: atomic name: - description: Name of the cluster-scoped resource. + description: Name of the be selected resource. type: string selectionScope: default: NamespaceWithResources @@ -597,7 +597,7 @@ spec: - NamespaceWithResources type: string version: - description: Version of the cluster-scoped resource. + description: Version of the to be selected resource. type: string required: - group @@ -948,11 +948,6 @@ spec: required: - resourceSelectors type: object - x-kubernetes-validations: - - message: when statusReportingScope is NamespaceAccessible, exactly one - resourceSelector with kind 'Namespace' is required - rule: '!(self.statusReportingScope == ''NamespaceAccessible'' && size(self.resourceSelectors.filter(x, - x.kind == ''Namespace'')) != 1)' status: description: The observed status of ResourcePlacement. properties: @@ -1036,7 +1031,7 @@ spec: type: string placementStatuses: description: |- - PlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. + PerClusterPlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. Each selected cluster according to the observed resource placement is guaranteed to have a corresponding placementStatuses. In the pickN case, there are N placement statuses where N = NumberOfClusters; Or in the pickFixed case, there are N placement statuses where N = ClusterNames. diff --git a/config/crd/bases/placement.kubernetes-fleet.io_schedulingpolicysnapshots.yaml b/config/crd/bases/placement.kubernetes-fleet.io_schedulingpolicysnapshots.yaml index 4406774f9..b0b6c5ba1 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_schedulingpolicysnapshots.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_schedulingpolicysnapshots.yaml @@ -571,7 +571,7 @@ spec: x-kubernetes-list-type: map observedCRPGeneration: description: |- - ObservedCRPGeneration is the generation of the CRP which the scheduler uses to perform + ObservedCRPGeneration is the generation of the resource placement which the scheduler uses to perform the scheduling cycle and prepare the scheduling status. format: int64 type: integer diff --git a/docker/crd-installer.Dockerfile b/docker/crd-installer.Dockerfile index 95c99b884..0f90216b3 100644 --- a/docker/crd-installer.Dockerfile +++ b/docker/crd-installer.Dockerfile @@ -1,5 +1,5 @@ # Build the crdinstaller binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.4 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.6 AS builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/docker/hub-agent.Dockerfile b/docker/hub-agent.Dockerfile index 48eb7b3e4..7db074fb5 100644 --- a/docker/hub-agent.Dockerfile +++ b/docker/hub-agent.Dockerfile @@ -1,5 +1,5 @@ # Build the hubagent binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.4 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.6 AS builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/docker/member-agent.Dockerfile b/docker/member-agent.Dockerfile index 4761ac3af..26d88e874 100644 --- a/docker/member-agent.Dockerfile +++ b/docker/member-agent.Dockerfile @@ -1,5 +1,5 @@ # Build the memberagent binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.4 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.6 AS builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/docker/refresh-token.Dockerfile b/docker/refresh-token.Dockerfile index 1b59d1ca8..d08165ab3 100644 --- a/docker/refresh-token.Dockerfile +++ b/docker/refresh-token.Dockerfile @@ -1,5 +1,5 @@ # Build the refreshtoken binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.4 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.6 AS builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/examples/resourceplacement/rp-cm.yaml b/examples/resourceplacement/rp-cm.yaml new file mode 100644 index 000000000..ebd0946eb --- /dev/null +++ b/examples/resourceplacement/rp-cm.yaml @@ -0,0 +1,21 @@ +# This tests selecting a single resource in a namespace, +# and applying it to all clusters. +# Prerequisite: create a configMap named "test-cm" in namespace "test-ns". +apiVersion: placement.kubernetes-fleet.io/v1beta1 +kind: ResourcePlacement +metadata: + name: rp-cm + namespace: test-ns +spec: + resourceSelectors: + - group: "" + kind: ConfigMap + name: test-cm + version: v1 + policy: + placementType: PickAll + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 1 diff --git a/examples/resourceplacement/rp-deploy.yaml b/examples/resourceplacement/rp-deploy.yaml new file mode 100644 index 000000000..2d4f32220 --- /dev/null +++ b/examples/resourceplacement/rp-deploy.yaml @@ -0,0 +1,26 @@ +# This tests selecting multiple resources in a namespace, +# and only applying to a subset of clusters. +# Prerequisite: create and expose a deployment named "test-nginx" in namespace "test-ns". +apiVersion: placement.kubernetes-fleet.io/v1beta1 +kind: ResourcePlacement +metadata: + name: rp-nginx + namespace: test-ns +spec: + resourceSelectors: + - group: apps + kind: Deployment + name: test-nginx + version: v1 + - group: "" + kind: Service + name: test-nginx + version: v1 + policy: + placementType: PickN + numberOfClusters: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 1 diff --git a/examples/resourceplacement/test-crp.yaml b/examples/resourceplacement/test-crp.yaml new file mode 100644 index 000000000..b497c9b1c --- /dev/null +++ b/examples/resourceplacement/test-crp.yaml @@ -0,0 +1,20 @@ +# This tests a CRP selecting a namespace only. +# Prerequisite: create a namespace named "test-ns". +apiVersion: placement.kubernetes-fleet.io/v1beta1 +kind: ClusterResourcePlacement +metadata: + name: ns-only-crp +spec: + resourceSelectors: + - group: "" + kind: Namespace + name: test-ns + version: v1 + selectionScope: NamespaceOnly # only namespace itself is placed, no resources within the namespace + policy: + placementType: PickAll + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 1 diff --git a/go.mod b/go.mod index fac062218..b441c34ef 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module go.goms.io/fleet -go 1.24.4 - -toolchain go1.24.6 +go 1.24.6 require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 diff --git a/hack/loadtest/util/help.go b/hack/loadtest/util/help.go index ec6aad494..70b6c280a 100644 --- a/hack/loadtest/util/help.go +++ b/hack/loadtest/util/help.go @@ -218,7 +218,7 @@ func createCRP(crp *v1beta1.ClusterResourcePlacement, crpFile string, crpName st crp.Name = crpName if useTestResources { - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, v1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, v1beta1.ResourceSelectorTerm{ Group: "", Version: "v1", Kind: "Namespace", diff --git a/pkg/authtoken/providers/azure/azure_msi.go b/pkg/authtoken/providers/azure/azure_msi.go index fdcab7700..980e4c42c 100644 --- a/pkg/authtoken/providers/azure/azure_msi.go +++ b/pkg/authtoken/providers/azure/azure_msi.go @@ -18,6 +18,7 @@ package azure import ( "context" "fmt" + "net/http" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" @@ -50,7 +51,14 @@ func New(clientID, scope string) authtoken.Provider { // FetchToken gets a new token to make request to the associated fleet' hub cluster. func (a *AuthTokenProvider) FetchToken(ctx context.Context) (authtoken.AuthToken, error) { token := authtoken.AuthToken{} - opts := &azidentity.ManagedIdentityCredentialOptions{ID: azidentity.ClientID(a.ClientID)} + + httpClient := &http.Client{} + opts := &azidentity.ManagedIdentityCredentialOptions{ + ClientOptions: azcore.ClientOptions{ + Transport: httpClient, + }, + ID: azidentity.ClientID(a.ClientID), + } klog.V(2).InfoS("FetchToken", "client ID", a.ClientID) credential, err := azidentity.NewManagedIdentityCredential(opts) @@ -68,6 +76,16 @@ func (a *AuthTokenProvider) FetchToken(ctx context.Context) (authtoken.AuthToken }) if err != nil { klog.ErrorS(err, "Failed to GetToken", "scope", a.Scope) + // We may race at startup with a sidecar which inserts an iptables rule + // to intercept IMDS calls. If we get here before such an iptables rule + // is inserted, we will inadvertently connect to real IMDS, which won't + // be able to service our request. IMDS does not set 'Connection: + // close' on 400 errors. Default Go HTTP client behavior will keep the + // underlying TCP connection open for reuse, unaffected by iptables, + // causing all further requests to continue to be sent to real IMDS and + // fail. If an error is returned from the IMDS call, explicitly close the + // connection used by the HTTP client. + httpClient.CloseIdleConnections() } return err }) diff --git a/pkg/controllers/clusterresourcebindingwatcher/suite_test.go b/pkg/controllers/bindingwatcher/suite_test.go similarity index 97% rename from pkg/controllers/clusterresourcebindingwatcher/suite_test.go rename to pkg/controllers/bindingwatcher/suite_test.go index 1a12e699b..5d85b0b8f 100644 --- a/pkg/controllers/clusterresourcebindingwatcher/suite_test.go +++ b/pkg/controllers/bindingwatcher/suite_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourcebindingwatcher +package bindingwatcher import ( "context" @@ -52,7 +52,7 @@ var ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "ClusterResourceBinding Watcher Suite") + RunSpecs(t, "Binding Watcher Suite") } var _ = BeforeSuite(func() { diff --git a/pkg/controllers/clusterresourcebindingwatcher/watcher.go b/pkg/controllers/bindingwatcher/watcher.go similarity index 97% rename from pkg/controllers/clusterresourcebindingwatcher/watcher.go rename to pkg/controllers/bindingwatcher/watcher.go index 97a67043e..483a023c0 100644 --- a/pkg/controllers/clusterresourcebindingwatcher/watcher.go +++ b/pkg/controllers/bindingwatcher/watcher.go @@ -14,8 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package clusterresourcebindingwatcher features a controller to watch the clusterResourceBinding and resourceBinding changes. -package clusterresourcebindingwatcher +// Package bindingwatcher features a controller to watch the clusterResourceBinding and resourceBinding changes. +package bindingwatcher import ( "context" diff --git a/pkg/controllers/clusterresourcebindingwatcher/watcher_integration_test.go b/pkg/controllers/bindingwatcher/watcher_integration_test.go similarity index 99% rename from pkg/controllers/clusterresourcebindingwatcher/watcher_integration_test.go rename to pkg/controllers/bindingwatcher/watcher_integration_test.go index a34fb3646..67adca8e2 100644 --- a/pkg/controllers/clusterresourcebindingwatcher/watcher_integration_test.go +++ b/pkg/controllers/bindingwatcher/watcher_integration_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourcebindingwatcher +package bindingwatcher import ( "fmt" @@ -40,7 +40,7 @@ const ( testReason1 = "testReason1" testReason2 = "testReason2" - eventuallyTimeout = time.Second * 10 + eventuallyTimeout = time.Second * 20 consistentlyDuration = time.Second * 10 interval = time.Millisecond * 250 ) diff --git a/pkg/controllers/clusterresourceplacementeviction/controller.go b/pkg/controllers/clusterresourceplacementeviction/controller.go index d07db954d..cd31a3c68 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller.go @@ -35,10 +35,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/metrics" bindingutils "go.goms.io/fleet/pkg/utils/binding" "go.goms.io/fleet/pkg/utils/condition" "go.goms.io/fleet/pkg/utils/controller" - "go.goms.io/fleet/pkg/utils/controller/metrics" "go.goms.io/fleet/pkg/utils/defaulter" evictionutils "go.goms.io/fleet/pkg/utils/eviction" ) diff --git a/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go b/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go index d57b0128d..9583bb512 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go @@ -33,8 +33,8 @@ import ( ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/metrics" "go.goms.io/fleet/pkg/utils/condition" - "go.goms.io/fleet/pkg/utils/controller/metrics" testutilseviction "go.goms.io/fleet/test/utils/eviction" ) @@ -492,7 +492,7 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { PlacementType: placementv1beta1.PickFixedPlacementType, ClusterNames: []string{"test-cluster-1"}, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -537,7 +537,7 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { // Create the CRP. By("Create ClusterResourcePlacement", func() { crp := buildTestPickAllCRP(crpName) - crp.Spec.ResourceSelectors = []placementv1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -709,7 +709,7 @@ func buildTestPickNCRP(crpName string, clusterCount int32) placementv1beta1.Clus PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(clusterCount), }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", diff --git a/pkg/controllers/clusterresourceplacementeviction/controller_test.go b/pkg/controllers/clusterresourceplacementeviction/controller_test.go index 16c45b455..95fed8601 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller_test.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller_test.go @@ -38,8 +38,8 @@ import ( ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/metrics" "go.goms.io/fleet/pkg/utils/condition" - "go.goms.io/fleet/pkg/utils/controller/metrics" "go.goms.io/fleet/pkg/utils/defaulter" ) diff --git a/pkg/controllers/clusterresourceplacementeviction/suite_test.go b/pkg/controllers/clusterresourceplacementeviction/suite_test.go index dc34eaa86..13576f72d 100644 --- a/pkg/controllers/clusterresourceplacementeviction/suite_test.go +++ b/pkg/controllers/clusterresourceplacementeviction/suite_test.go @@ -19,6 +19,7 @@ package clusterresourceplacementeviction import ( "context" "flag" + "os" "path/filepath" "testing" @@ -33,9 +34,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/metrics" ) var ( @@ -47,6 +50,13 @@ var ( cancel context.CancelFunc ) +func TestMain(m *testing.M) { + // Register here as the metric is both tested in ginkgo tests and go unit tests. + ctrlmetrics.Registry.MustRegister(metrics.FleetEvictionStatus) + + os.Exit(m.Run()) +} + func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) diff --git a/pkg/controllers/overrider/clusterresource_controller_integration_test.go b/pkg/controllers/overrider/clusterresource_controller_integration_test.go index 5f1cdccb9..ac5685435 100644 --- a/pkg/controllers/overrider/clusterresource_controller_integration_test.go +++ b/pkg/controllers/overrider/clusterresource_controller_integration_test.go @@ -35,7 +35,7 @@ import ( func getClusterResourceOverrideSpec() placementv1beta1.ClusterResourceOverrideSpec { return placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", diff --git a/pkg/controllers/clusterresourceplacement/cluster_selector.go b/pkg/controllers/placement/cluster_selector.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/cluster_selector.go rename to pkg/controllers/placement/cluster_selector.go index 7e2537f62..ab2654bda 100644 --- a/pkg/controllers/clusterresourceplacement/cluster_selector.go +++ b/pkg/controllers/placement/cluster_selector.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "fmt" diff --git a/pkg/controllers/clusterresourceplacement/cluster_selector_test.go b/pkg/controllers/placement/cluster_selector_test.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/cluster_selector_test.go rename to pkg/controllers/placement/cluster_selector_test.go index 962556b04..8cf8552d7 100644 --- a/pkg/controllers/clusterresourceplacement/cluster_selector_test.go +++ b/pkg/controllers/placement/cluster_selector_test.go @@ -1,4 +1,4 @@ -package clusterresourceplacement +package placement import ( "testing" diff --git a/pkg/controllers/clusterresourceplacement/controller.go b/pkg/controllers/placement/controller.go similarity index 98% rename from pkg/controllers/clusterresourceplacement/controller.go rename to pkg/controllers/placement/controller.go index a4b35c896..b93844a47 100644 --- a/pkg/controllers/clusterresourceplacement/controller.go +++ b/pkg/controllers/placement/controller.go @@ -14,8 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package clusterresourceplacement features a controller to reconcile the clusterResourcePlacement changes. -package clusterresourceplacement +// Package placement features a controller to reconcile the clusterResourcePlacement or resourcePlacement changes. +package placement import ( "context" @@ -37,11 +37,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/metrics" "go.goms.io/fleet/pkg/scheduler/queue" "go.goms.io/fleet/pkg/utils/annotations" "go.goms.io/fleet/pkg/utils/condition" "go.goms.io/fleet/pkg/utils/controller" - "go.goms.io/fleet/pkg/utils/controller/metrics" "go.goms.io/fleet/pkg/utils/defaulter" "go.goms.io/fleet/pkg/utils/labels" "go.goms.io/fleet/pkg/utils/resource" @@ -112,7 +112,7 @@ func (r *Reconciler) handleDelete(ctx context.Context, placementObj fleetv1beta1 return ctrl.Result{}, err } // change the metrics to add nameplace of namespace - metrics.FleetPlacementStatusLastTimeStampSeconds.DeletePartialMatch(prometheus.Labels{"name": placementObj.GetName()}) + metrics.FleetPlacementStatusLastTimeStampSeconds.DeletePartialMatch(prometheus.Labels{"namespace": placementObj.GetNamespace(), "name": placementObj.GetName()}) controllerutil.RemoveFinalizer(placementObj, fleetv1beta1.PlacementCleanupFinalizer) if err := r.Client.Update(ctx, placementObj); err != nil { klog.ErrorS(err, "Failed to remove placement finalizer", "placement", placementKObj) @@ -1238,7 +1238,7 @@ func emitPlacementStatusMetric(placementObj fleetv1beta1.PlacementObj) { status = string(cond.Status) reason = cond.Reason } - metrics.FleetPlacementStatusLastTimeStampSeconds.WithLabelValues(placementObj.GetName(), strconv.FormatInt(placementObj.GetGeneration(), 10), scheduledConditionType, status, reason).SetToCurrentTime() + metrics.FleetPlacementStatusLastTimeStampSeconds.WithLabelValues(placementObj.GetNamespace(), placementObj.GetName(), strconv.FormatInt(placementObj.GetGeneration(), 10), scheduledConditionType, status, reason).SetToCurrentTime() return } @@ -1252,12 +1252,12 @@ func emitPlacementStatusMetric(placementObj fleetv1beta1.PlacementObj) { status = string(cond.Status) reason = cond.Reason } - metrics.FleetPlacementStatusLastTimeStampSeconds.WithLabelValues(placementObj.GetName(), strconv.FormatInt(placementObj.GetGeneration(), 10), conditionType, status, reason).SetToCurrentTime() + metrics.FleetPlacementStatusLastTimeStampSeconds.WithLabelValues(placementObj.GetNamespace(), placementObj.GetName(), strconv.FormatInt(placementObj.GetGeneration(), 10), conditionType, status, reason).SetToCurrentTime() return } } // Emit the "Completed" condition metric to indicate that the placement has completed. // This condition is used solely for metric reporting purposes. - metrics.FleetPlacementStatusLastTimeStampSeconds.WithLabelValues(placementObj.GetName(), strconv.FormatInt(placementObj.GetGeneration(), 10), "Completed", string(metav1.ConditionTrue), "Completed").SetToCurrentTime() + metrics.FleetPlacementStatusLastTimeStampSeconds.WithLabelValues(placementObj.GetNamespace(), placementObj.GetName(), strconv.FormatInt(placementObj.GetGeneration(), 10), "Completed", string(metav1.ConditionTrue), "Completed").SetToCurrentTime() } diff --git a/pkg/controllers/clusterresourceplacement/controller_integration_test.go b/pkg/controllers/placement/controller_integration_test.go similarity index 97% rename from pkg/controllers/clusterresourceplacement/controller_integration_test.go rename to pkg/controllers/placement/controller_integration_test.go index 908c72b95..8ed199cd7 100644 --- a/pkg/controllers/clusterresourceplacement/controller_integration_test.go +++ b/pkg/controllers/placement/controller_integration_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "fmt" @@ -35,9 +35,9 @@ import ( ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/metrics" "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/pkg/utils/condition" - "go.goms.io/fleet/pkg/utils/controller/metrics" "go.goms.io/fleet/pkg/utils/resource" metricsUtils "go.goms.io/fleet/test/utils/metrics" ) @@ -388,7 +388,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Name: testCRPName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -475,6 +475,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -487,6 +488,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType))}, @@ -531,6 +533,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -543,6 +546,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -639,6 +643,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -651,6 +656,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType))}, @@ -754,6 +760,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { By("Ensure placement status metric was emitted") wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType))}, @@ -847,6 +854,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -859,6 +867,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType))}, @@ -962,6 +971,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { By("Ensure placement status metric was emitted for 1st generation") wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType))}, @@ -976,7 +986,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { By("Update CRP spec to add another resource selector") gotCRP.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, - placementv1beta1.ClusterResourceSelector{ + placementv1beta1.ResourceSelectorTerm{ Group: corev1.GroupName, Version: "v1", Kind: "Namespace", @@ -1024,6 +1034,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { // In this case we have 2 metrics for Scheduled condition type as crp generation goes from 1 to 2. wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -1145,6 +1156,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { // In this case we have 2 metrics for different condition types as crp updates and its generation goes from 1 to 2. wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType))}, @@ -1266,6 +1278,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -1278,6 +1291,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType))}, @@ -1290,6 +1304,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType))}, @@ -1333,6 +1348,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { By("Ensure placement status applied metric was emitted") wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(crp.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementAppliedConditionType))}, @@ -1375,6 +1391,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { By("Ensure placement status completed metric was emitted") wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To("Completed")}, @@ -1400,7 +1417,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Name: testCRPName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -1560,6 +1577,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -1572,6 +1590,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType))}, @@ -1584,6 +1603,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType))}, @@ -1627,6 +1647,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { By("Ensure placement status metric for reportDiff was emitted") wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementDiffReportedConditionType))}, @@ -1743,6 +1764,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -1755,6 +1777,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType))}, @@ -1767,6 +1790,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType))}, @@ -1884,6 +1908,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To("Completed")}, @@ -1918,7 +1943,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Name: testCRPName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -1954,6 +1979,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -1980,7 +2006,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Name: testCRPName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -2036,6 +2062,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -2111,6 +2138,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { By("Ensure placement status metric for rollout external was emitted") wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType))}, diff --git a/pkg/controllers/clusterresourceplacement/controller_test.go b/pkg/controllers/placement/controller_test.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/controller_test.go rename to pkg/controllers/placement/controller_test.go index 2f725fa98..0f07e9958 100644 --- a/pkg/controllers/clusterresourceplacement/controller_test.go +++ b/pkg/controllers/placement/controller_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "context" @@ -105,7 +105,7 @@ func clusterResourcePlacementForTest() *fleetv1beta1.ClusterResourcePlacement { Generation: placementGeneration, }, Spec: fleetv1beta1.PlacementSpec{ - ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ + ResourceSelectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", diff --git a/pkg/controllers/clusterresourceplacement/placement_controllerv1alpha1.go b/pkg/controllers/placement/placement_controllerv1alpha1.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/placement_controllerv1alpha1.go rename to pkg/controllers/placement/placement_controllerv1alpha1.go index 85b1fe755..fe2995e96 100644 --- a/pkg/controllers/clusterresourceplacement/placement_controllerv1alpha1.go +++ b/pkg/controllers/placement/placement_controllerv1alpha1.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "context" diff --git a/pkg/controllers/clusterresourceplacement/placement_status.go b/pkg/controllers/placement/placement_status.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/placement_status.go rename to pkg/controllers/placement/placement_status.go index d4d12702b..20c981f2b 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status.go +++ b/pkg/controllers/placement/placement_status.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "context" diff --git a/pkg/controllers/clusterresourceplacement/placement_status_test.go b/pkg/controllers/placement/placement_status_test.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/placement_status_test.go rename to pkg/controllers/placement/placement_status_test.go index 4f88bc6ea..a08fd6611 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status_test.go +++ b/pkg/controllers/placement/placement_status_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "context" @@ -5968,7 +5968,7 @@ func TestSetPlacementStatusForClusterResourcePlacement(t *testing.T) { Name: testCRPName, }, Spec: fleetv1beta1.PlacementSpec{ - ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ + ResourceSelectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -6647,7 +6647,7 @@ func TestSetResourcePlacementStatus(t *testing.T) { Namespace: testRPNamespace, }, Spec: fleetv1beta1.PlacementSpec{ - ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ + ResourceSelectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", diff --git a/pkg/controllers/clusterresourceplacement/resource_selector.go b/pkg/controllers/placement/resource_selector.go similarity index 80% rename from pkg/controllers/clusterresourceplacement/resource_selector.go rename to pkg/controllers/placement/resource_selector.go index bf18e840f..b7ffb6226 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector.go +++ b/pkg/controllers/placement/resource_selector.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "fmt" @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" @@ -83,7 +84,7 @@ var ( // selectResources selects the resources according to the placement resourceSelectors. // It also generates an array of manifests obj based on the selected resources. func (r *Reconciler) selectResources(placement *fleetv1alpha1.ClusterResourcePlacement) ([]workv1alpha1.Manifest, error) { - selectedObjects, err := r.gatherSelectedResource(placement.GetName(), convertResourceSelector(placement.Spec.ResourceSelectors)) + selectedObjects, err := r.gatherSelectedResource(types.NamespacedName{Name: placement.GetName()}, convertResourceSelector(placement.Spec.ResourceSelectors)) if err != nil { return nil, err } @@ -112,10 +113,10 @@ func (r *Reconciler) selectResources(placement *fleetv1alpha1.ClusterResourcePla // Note: temporary solution to share the same set of utils between v1alpha1 and v1beta1 APIs so that v1alpha1 implementation // won't be broken. v1alpha1 implementation should be removed when new API is ready. // The clusterResourceSelect has no changes between different versions. -func convertResourceSelector(old []fleetv1alpha1.ClusterResourceSelector) []fleetv1beta1.ClusterResourceSelector { - res := make([]fleetv1beta1.ClusterResourceSelector, len(old)) +func convertResourceSelector(old []fleetv1alpha1.ClusterResourceSelector) []fleetv1beta1.ResourceSelectorTerm { + res := make([]fleetv1beta1.ResourceSelectorTerm, len(old)) for i, item := range old { - res[i] = fleetv1beta1.ClusterResourceSelector{ + res[i] = fleetv1beta1.ResourceSelectorTerm{ Group: item.Group, Version: item.Version, Kind: item.Kind, @@ -128,8 +129,7 @@ func convertResourceSelector(old []fleetv1alpha1.ClusterResourceSelector) []flee } // gatherSelectedResource gets all the resources according to the resource selector. -// TODO: treat the RP selector differently to not allow RP to select cluster scoped resources -func (r *Reconciler) gatherSelectedResource(placement string, selectors []fleetv1beta1.ClusterResourceSelector) ([]*unstructured.Unstructured, error) { +func (r *Reconciler) gatherSelectedResource(placementKey types.NamespacedName, selectors []fleetv1beta1.ResourceSelectorTerm) ([]*unstructured.Unstructured, error) { var resources []*unstructured.Unstructured var resourceMap = make(map[fleetv1beta1.ResourceIdentifier]bool) for _, selector := range selectors { @@ -145,10 +145,10 @@ func (r *Reconciler) gatherSelectedResource(placement string, selectors []fleetv } var objs []runtime.Object var err error - if gvk == utils.NamespaceGVK { - objs, err = r.fetchNamespaceResources(selector, placement) + if gvk == utils.NamespaceGVK && placementKey.Namespace == "" && selector.SelectionScope != fleetv1beta1.NamespaceOnly { + objs, err = r.fetchNamespaceResources(selector, placementKey.Name) } else { - objs, err = r.fetchClusterScopedResources(selector, placement) + objs, err = r.fetchResources(selector, placementKey) } if err != nil { return nil, err @@ -164,7 +164,7 @@ func (r *Reconciler) gatherSelectedResource(placement string, selectors []fleetv } if _, exist := resourceMap[ri]; exist { err = fmt.Errorf("found duplicate resource %+v", ri) - klog.ErrorS(err, "user selected one resource more than once", "resource", ri, "placement", placement) + klog.ErrorS(err, "User selected one resource more than once", "resource", ri, "placement", placementKey) return nil, controller.NewUserError(err) } resourceMap[ri] = true @@ -230,16 +230,16 @@ func buildApplyOrderMap() map[string]int { return ordering } -// fetchClusterScopedResources retrieves the objects based on the selector. -func (r *Reconciler) fetchClusterScopedResources(selector fleetv1beta1.ClusterResourceSelector, placeName string) ([]runtime.Object, error) { - klog.V(2).InfoS("start to fetch the cluster scoped resources by the selector", "selector", selector) +// fetchResources retrieves the objects based on the selector. +func (r *Reconciler) fetchResources(selector fleetv1beta1.ResourceSelectorTerm, placementKey types.NamespacedName) ([]runtime.Object, error) { + klog.V(2).InfoS("Start to fetch resources by the selector", "selector", selector, "placement", placementKey) gk := schema.GroupKind{ Group: selector.Group, Kind: selector.Kind, } restMapping, err := r.RestMapper.RESTMapping(gk, selector.Version) if err != nil { - return nil, controller.NewUserError(fmt.Errorf("invalid placement %s, failed to get GVR of the selector: %w", placeName, err)) + return nil, controller.NewUserError(fmt.Errorf("invalid placement %s, failed to get GVR of the selector: %w", placementKey, err)) } gvr := restMapping.Resource gvk := schema.GroupVersionKind{ @@ -247,26 +247,47 @@ func (r *Reconciler) fetchClusterScopedResources(selector fleetv1beta1.ClusterRe Version: selector.Version, Kind: selector.Kind, } - if !r.InformerManager.IsClusterScopedResources(gvk) { - return nil, controller.NewUserError(fmt.Errorf("invalid placement %s: %+v is not a cluster scoped resource", placeName, restMapping.Resource)) + + isNamespacedResource := !r.InformerManager.IsClusterScopedResources(gvk) + if isNamespacedResource && placementKey.Namespace == "" { + // If it's a namespace-scoped resource but placement has no namespace, return error. + err := fmt.Errorf("invalid placement %s: cannot select namespace-scoped resource %v in a clusterResourcePlacement", placementKey, gvr) + klog.ErrorS(err, "Invalid resource selector", "selector", selector) + return nil, controller.NewUserError(err) + } else if !isNamespacedResource && placementKey.Namespace != "" { + // If it's a cluster-scoped resource but placement has a namespace, return error. + err := fmt.Errorf("invalid placement %s: cannot select cluster-scoped resource %v in a resourcePlacement", placementKey, gvr) + klog.ErrorS(err, "Invalid resource selector", "selector", selector) + return nil, controller.NewUserError(err) } + if !r.InformerManager.IsInformerSynced(gvr) { - return nil, controller.NewExpectedBehaviorError(fmt.Errorf("informer cache for %+v is not synced yet", restMapping.Resource)) + err := fmt.Errorf("informer cache for %+v is not synced yet", restMapping.Resource) + klog.ErrorS(err, "Informer cache is not synced", "gvr", gvr, "placement", placementKey) + return nil, controller.NewExpectedBehaviorError(err) } lister := r.InformerManager.Lister(gvr) + // TODO: validator should enforce the mutual exclusiveness between the `name` and `labelSelector` fields if len(selector.Name) != 0 { - obj, err := lister.Get(selector.Name) + var obj runtime.Object + var err error + + if isNamespacedResource { + obj, err = lister.ByNamespace(placementKey.Namespace).Get(selector.Name) + } else { + obj, err = lister.Get(selector.Name) + } + if err != nil { - klog.ErrorS(err, "cannot get the resource", "gvr", gvr, "name", selector.Name) + klog.ErrorS(err, "Cannot get the resource", "gvr", gvr, "name", selector.Name, "namespace", placementKey.Namespace) return nil, controller.NewAPIServerError(true, client.IgnoreNotFound(err)) } - uObj := obj.DeepCopyObject().(*unstructured.Unstructured) - if uObj.GetDeletionTimestamp() != nil { - // skip a to be deleted namespace - klog.V(2).InfoS("skip the deleting cluster scoped resources by the selector", - "selector", selector, "placeName", placeName, "resource name", uObj.GetName()) + if uObj := obj.DeepCopyObject().(*unstructured.Unstructured); uObj.GetDeletionTimestamp() != nil { + // skip a to be deleted resource + klog.V(2).InfoS("Skip the deleting resource by the selector", + "selector", selector, "placement", placementKey, "resourceName", uObj.GetName()) return []runtime.Object{}, nil } return []runtime.Object{obj}, nil @@ -282,18 +303,26 @@ func (r *Reconciler) fetchClusterScopedResources(selector fleetv1beta1.ClusterRe return nil, controller.NewUnexpectedBehaviorError(fmt.Errorf("cannot convert the label selector to a selector: %w", err)) } } + var selectedObjs []runtime.Object - objects, err := lister.List(labelSelector) + var objects []runtime.Object + + if isNamespacedResource { + objects, err = lister.ByNamespace(placementKey.Namespace).List(labelSelector) + } else { + objects, err = lister.List(labelSelector) + } if err != nil { - return nil, controller.NewAPIServerError(true, fmt.Errorf("cannot list all the objects: %w", err)) + klog.ErrorS(err, "Cannot list all the objects", "gvr", gvr, "labelSelector", labelSelector, "placement", placementKey) + return nil, controller.NewAPIServerError(true, err) } + // go ahead and claim all objects by adding a finalizer and insert the placement in its annotation for i := 0; i < len(objects); i++ { - uObj := objects[i].DeepCopyObject().(*unstructured.Unstructured) - if uObj.GetDeletionTimestamp() != nil { - // skip a to be deleted namespace - klog.V(2).InfoS("skip the deleting cluster scoped resources by the selector", - "selector", selector, "placeName", placeName, "resource name", uObj.GetName()) + if uObj := objects[i].DeepCopyObject().(*unstructured.Unstructured); uObj.GetDeletionTimestamp() != nil { + // skip a to be deleted resource + klog.V(2).InfoS("Skip the deleting resource by the selector", + "selector", selector, "placement", placementKey, "resourceName", uObj.GetName()) continue } selectedObjs = append(selectedObjs, objects[i]) @@ -303,13 +332,13 @@ func (r *Reconciler) fetchClusterScopedResources(selector fleetv1beta1.ClusterRe } // fetchNamespaceResources retrieves all the objects for a ResourceSelectorTerm that is for namespace. -func (r *Reconciler) fetchNamespaceResources(selector fleetv1beta1.ClusterResourceSelector, placeName string) ([]runtime.Object, error) { +func (r *Reconciler) fetchNamespaceResources(selector fleetv1beta1.ResourceSelectorTerm, placementName string) ([]runtime.Object, error) { klog.V(2).InfoS("start to fetch the namespace resources by the selector", "selector", selector) var resources []runtime.Object if len(selector.Name) != 0 { // just a single namespace - objs, err := r.fetchAllResourcesInOneNamespace(selector.Name, placeName) + objs, err := r.fetchAllResourcesInOneNamespace(selector.Name, placementName) if err != nil { klog.ErrorS(err, "failed to fetch all the selected resource in a namespace", "namespace", selector.Name) return nil, err @@ -330,7 +359,8 @@ func (r *Reconciler) fetchNamespaceResources(selector fleetv1beta1.ClusterResour } namespaces, err := r.InformerManager.Lister(utils.NamespaceGVR).List(labelSelector) if err != nil { - return nil, controller.NewAPIServerError(true, fmt.Errorf("cannot list all the namespaces given the label selector: %w", err)) + klog.ErrorS(err, "Cannot list all the namespaces by the label selector", "labelSelector", labelSelector, "placement", placementName) + return nil, controller.NewAPIServerError(true, err) } for _, namespace := range namespaces { @@ -338,7 +368,7 @@ func (r *Reconciler) fetchNamespaceResources(selector fleetv1beta1.ClusterResour if err != nil { return nil, controller.NewUnexpectedBehaviorError(fmt.Errorf("cannot get the name of a namespace object: %w", err)) } - objs, err := r.fetchAllResourcesInOneNamespace(ns.GetName(), placeName) + objs, err := r.fetchAllResourcesInOneNamespace(ns.GetName(), placementName) if err != nil { klog.ErrorS(err, "failed to fetch all the selected resource in a namespace", "namespace", ns.GetName()) return nil, err @@ -384,10 +414,17 @@ func (r *Reconciler) fetchAllResourcesInOneNamespace(namespaceName string, place lister := r.InformerManager.Lister(gvr) objs, err := lister.ByNamespace(namespaceName).List(labels.Everything()) if err != nil { - return nil, controller.NewAPIServerError(true, fmt.Errorf("cannot list all the objects of type %+v in namespace %s: %w", gvr, namespaceName, err)) + klog.ErrorS(err, "Cannot list all the objects in namespace", "gvr", gvr, "namespace", namespaceName) + return nil, controller.NewAPIServerError(true, err) } for _, obj := range objs { uObj := obj.DeepCopyObject().(*unstructured.Unstructured) + if uObj.GetDeletionTimestamp() != nil { + // skip a to be deleted resource + klog.V(2).InfoS("skip the deleting resource by the selector", + "placeName", placeName, "namespace", namespaceName, "object", klog.KObj(uObj)) + continue + } shouldInclude, err := utils.ShouldPropagateObj(r.InformerManager, uObj) if err != nil { klog.ErrorS(err, "cannot determine if we should propagate an object", "object", klog.KObj(uObj)) @@ -520,8 +557,10 @@ func generateResourceContent(object *unstructured.Unstructured) (*fleetv1beta1.R // It also returns the number of envelope configmaps so the CRP controller can have the right expectation of the number of work objects. func (r *Reconciler) selectResourcesForPlacement(placementObj fleetv1beta1.PlacementObj) (int, []fleetv1beta1.ResourceContent, []fleetv1beta1.ResourceIdentifier, error) { envelopeObjCount := 0 - placementSpec := placementObj.GetPlacementSpec() - selectedObjects, err := r.gatherSelectedResource(placementObj.GetName(), placementSpec.ResourceSelectors) + selectedObjects, err := r.gatherSelectedResource(types.NamespacedName{ + Name: placementObj.GetName(), + Namespace: placementObj.GetNamespace(), + }, placementObj.GetPlacementSpec().ResourceSelectors) if err != nil { return 0, nil, nil, err } diff --git a/pkg/controllers/clusterresourceplacement/resource_selector_test.go b/pkg/controllers/placement/resource_selector_test.go similarity index 55% rename from pkg/controllers/clusterresourceplacement/resource_selector_test.go rename to pkg/controllers/placement/resource_selector_test.go index 0140ba14f..08b3103ed 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector_test.go +++ b/pkg/controllers/placement/resource_selector_test.go @@ -14,9 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( + "errors" "math/rand" "testing" "time" @@ -26,15 +27,21 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" utilrand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/utils/ptr" workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/utils" + "go.goms.io/fleet/pkg/utils/controller" + testinformer "go.goms.io/fleet/test/utils/informer" ) func TestGenerateManifest(t *testing.T) { @@ -807,6 +814,956 @@ func createResourceContentForTest(t *testing.T, obj interface{}) *fleetv1beta1.R } } +func TestGatherSelectedResource(t *testing.T) { + // Common test deployment object used across multiple test cases. + testDeployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "test-ns", + }, + }, + } + testDeployment.SetGroupVersionKind(utils.DeploymentGVK) + + // Common test configmap object used across multiple test cases. + testConfigMap := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "test-configmap", + "namespace": "test-ns", + }, + }, + } + testConfigMap.SetGroupVersionKind(utils.ConfigMapGVK) + + kubeRootCAConfigMap := &unstructured.Unstructured{ // reserved configmap object + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "kube-root-ca.crt", + "namespace": "test-ns", + }, + }, + } + kubeRootCAConfigMap.SetGroupVersionKind(utils.ConfigMapGVK) + + // Common test deployment object in deleting state. + testDeletingDeployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deleting-deployment", + "namespace": "test-ns", + "deletionTimestamp": "2025-01-01T00:00:00Z", + "labels": map[string]interface{}{ + "tier": "api", + }, + }, + }, + } + testDeletingDeployment.SetGroupVersionKind(utils.DeploymentGVK) + + // Common test deployment with app=frontend label. + testFrontendDeployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "frontend-deployment", + "namespace": "test-ns", + "labels": map[string]interface{}{ + "app": "frontend", + "tier": "web", + }, + }, + }, + } + testFrontendDeployment.SetGroupVersionKind(utils.DeploymentGVK) + + // Common test deployment with app=backend label. + testBackendDeployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "backend-deployment", + "namespace": "test-ns", + "labels": map[string]interface{}{ + "app": "backend", + "tier": "api", + }, + }, + }, + } + testBackendDeployment.SetGroupVersionKind(utils.DeploymentGVK) + + // Common test namespace object (cluster-scoped). + testNamespace := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "test-ns", + "labels": map[string]interface{}{ + "environment": "test", + }, + }, + }, + } + testNamespace.SetGroupVersionKind(utils.NamespaceGVK) + + testDeletingNamespace := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "deleting-ns", + "labels": map[string]interface{}{ + "environment": "test", + }, + "deletionTimestamp": "2025-01-01T00:00:00Z", + }, + }, + } + testDeletingNamespace.SetGroupVersionKind(utils.NamespaceGVK) + + prodNamespace := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "prod-ns", + "labels": map[string]interface{}{ + "environment": "production", + }, + }, + }, + } + prodNamespace.SetGroupVersionKind(utils.NamespaceGVK) + + // Common test cluster role object (cluster-scoped). + testClusterRole := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRole", + "metadata": map[string]interface{}{ + "name": "test-cluster-role", + }, + }, + } + testClusterRole.SetGroupVersionKind(utils.ClusterRoleGVK) + + // Common test cluster role object #2 (cluster-scoped). + testClusterRole2 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRole", + "metadata": map[string]interface{}{ + "name": "test-cluster-role-2", + }, + }, + } + testClusterRole2.SetGroupVersionKind(utils.ClusterRoleGVK) + + kubeSystemNamespace := &unstructured.Unstructured{ // reserved namespace object + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "kube-system", + "labels": map[string]interface{}{ + "environment": "test", + }, + }, + }, + } + kubeSystemNamespace.SetGroupVersionKind(utils.NamespaceGVK) + + tests := []struct { + name string + placementName types.NamespacedName + selectors []fleetv1beta1.ResourceSelectorTerm + resourceConfig *utils.ResourceConfig + informerManager *testinformer.FakeManager + want []*unstructured.Unstructured + wantError error + }{ + { + name: "should handle empty selectors", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{}, + want: nil, + }, + { + name: "should skip disabled resources", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(true), // make this allow list - nothing is allowed + want: nil, + }, + { + name: "should return error for cluster-scoped resource", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-clusterrole", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{}, + }, + want: nil, + wantError: controller.ErrUserError, + }, + { + name: "should handle single resource selection successfully", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, + }, + } + }(), + want: []*unstructured.Unstructured{testDeployment}, + wantError: nil, + }, + { + name: "should return empty result when informer manager returns not found error", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: { + Objects: []runtime.Object{}, + Err: apierrors.NewNotFound(schema.GroupResource{Group: "apps", Resource: "deployments"}, "test-deployment"), + }, + }, + } + }(), + want: nil, // should return nil when informer returns not found error + }, + { + name: "should return error when informer manager returns non-NotFound error", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: { + Objects: []runtime.Object{}, + Err: errors.New("connection timeout"), + }, + }, + } + }(), + wantError: controller.ErrUnexpectedBehavior, + }, + { + name: "should return error using label selector when informer manager returns error", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: { + Objects: []runtime.Object{}, + Err: apierrors.NewNotFound(schema.GroupResource{Group: "apps", Resource: "deployments"}, "test-deployment"), + }, + }, + } + }(), + wantError: controller.ErrAPIServerError, + }, + { + name: "should return only non-deleting resources when mixed with deleting resources", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", // non-deleting deployment + }, + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deleting-deployment", // deleting deployment + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + }, + } + }(), + want: []*unstructured.Unstructured{testDeployment}, + wantError: nil, + }, + { + name: "should handle resource selection successfully by using label selector", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "frontend", + }, + }, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testFrontendDeployment, testBackendDeployment, testDeployment}}, + }, + } + }(), + want: []*unstructured.Unstructured{testFrontendDeployment}, + wantError: nil, + }, + { + name: "should handle label selector with MatchExpressions", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "tier", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"web", "api"}, + }, + }, + }, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testFrontendDeployment, testBackendDeployment, testDeployment, testDeletingDeployment}}, + }, + } + }(), + want: []*unstructured.Unstructured{testBackendDeployment, testFrontendDeployment}, // should return both deployments (order may vary) + wantError: nil, + }, + { + name: "should detect duplicate resources", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", // same deployment selected twice + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, + }, + } + }(), + wantError: controller.ErrUserError, + }, + { + name: "should sort resources according to apply order", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-configmap", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap}}, + }, + } + }(), + // ConfigMap should come first according to apply order. + want: []*unstructured.Unstructured{testConfigMap, testDeployment}, + }, + // tests for cluster-scoped placements + { + name: "should return error for namespace-scoped resource for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{}, + }, + want: nil, + wantError: controller.ErrUserError, + }, + { + name: "should sort resources for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + // Empty name means select all ClusterRoles (or use label selector). + }, + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.ClusterRoleGVR: {Objects: []runtime.Object{testClusterRole, testClusterRole2}}, + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace}}, + }, + } + }(), + // Namespace should come first according to apply order (namespace comes before ClusterRole). + // Both ClusterRoles should be included since we're selecting all ClusterRoles with empty name. + want: []*unstructured.Unstructured{testNamespace, testClusterRole, testClusterRole2}, + }, + { + name: "should select resources by name for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-cluster-role", + }, + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.ClusterRoleGVR: {Objects: []runtime.Object{testClusterRole, testClusterRole2}}, + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace}}, + }, + } + }(), + // Namespace should come first according to apply order (namespace comes before ClusterRole). + want: []*unstructured.Unstructured{testNamespace, testClusterRole}, + }, + { + name: "should select namespaces and its children resources by using label selector for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "environment": "test", + }, + }, + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // Should select only non-reserved namespaces with matching labels and their children resources + want: []*unstructured.Unstructured{testNamespace, testConfigMap, testDeployment}, + }, + { + name: "should skip the resource for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "environment": "test", + }, + }, + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: func() *utils.ResourceConfig { + cfg := utils.NewResourceConfig(false) + cfg.AddGroupVersionKind(utils.DeploymentGVK) + return cfg + }(), + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // should skip the deployment resource since it is not allowed by resource config + want: []*unstructured.Unstructured{testNamespace, testConfigMap}, + }, + { + name: "should select namespaces using nil label selector for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // Should select only non-reserved namespaces with matching labels and their child resources + want: []*unstructured.Unstructured{prodNamespace, testNamespace, testConfigMap, testDeployment}, + }, + { + name: "should select only namespaces for namespace only scope for a namespace", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceOnly, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // Should select only the namespace with name "test-ns" and none of its child resources + want: []*unstructured.Unstructured{testNamespace}, + }, + { + name: "should select only namespaces for namespace only scope for namespaces with labels", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + SelectionScope: fleetv1beta1.NamespaceOnly, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // Should select only non-deleting namespaces with matching labels and none of their child resources + want: []*unstructured.Unstructured{prodNamespace, testNamespace}, + }, + { + name: "should return error if a resourceplacement selects namespaces even for namespace only scope", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceOnly, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + wantError: controller.ErrUserError, + }, + { + name: "should return error when selecting a reserved namespace for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "environment": "test", + }, + }, + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace, kubeSystemNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + wantError: controller.ErrUserError, + }, + { + name: "should return empty result when informer manager returns not found error for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: { + Objects: []runtime.Object{}, + Err: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "namespaces"}, "test-ns"), + }, + }, + } + }(), + want: nil, // should return nil when informer returns not found error + }, + { + name: "should return error when informer manager returns non-NotFound error (getting namespace) for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: { + Objects: []runtime.Object{}, + Err: errors.New("connection timeout"), + }, + }, + } + }(), + wantError: controller.ErrUnexpectedBehavior, + }, + { + name: "should return error using label selector when informer manager returns error (getting namespace) for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: { + Objects: []runtime.Object{}, + Err: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "namespaces"}, "test-ns"), + }, + }, + } + }(), + wantError: controller.ErrAPIServerError, + }, + { + name: "should return error when informer manager returns non-NotFound error (getting deployment) for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace, kubeSystemNamespace}}, + utils.DeploymentGVR: { + Objects: []runtime.Object{}, + Err: errors.New("connection timeout"), + }, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR}, + } + }(), + wantError: controller.ErrUnexpectedBehavior, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &Reconciler{ + ResourceConfig: tt.resourceConfig, + InformerManager: tt.informerManager, + RestMapper: newFakeRESTMapper(), + } + + got, err := r.gatherSelectedResource(tt.placementName, tt.selectors) + if gotErr, wantErr := err != nil, tt.wantError != nil; gotErr != wantErr || !errors.Is(err, tt.wantError) { + t.Fatalf("gatherSelectedResource() = %v, want error %v", err, tt.wantError) + } + if tt.wantError != nil { + return + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("gatherSelectedResource() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +// fakeRESTMapper is a minimal RESTMapper implementation for testing +type fakeRESTMapper struct { + mappings map[schema.GroupKind]*meta.RESTMapping +} + +// newFakeRESTMapper creates a new fakeRESTMapper with default mappings +func newFakeRESTMapper() *fakeRESTMapper { + return &fakeRESTMapper{ + mappings: map[schema.GroupKind]*meta.RESTMapping{ + {Group: "", Kind: "Namespace"}: { + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}, + }, + {Group: "apps", Kind: "Deployment"}: { + Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, + }, + {Group: "", Kind: "ConfigMap"}: { + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, + }, + {Group: "", Kind: "Node"}: { + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"}, + }, + {Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}: { + Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"}, + }, + }, + } +} + +func (f *fakeRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if mapping, exists := f.mappings[gk]; exists { + return mapping, nil + } + return nil, errors.New("resource not found") +} + +func (f *fakeRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + mapping, err := f.RESTMapping(gk, versions...) + if err != nil { + return nil, err + } + return []*meta.RESTMapping{mapping}, nil +} + +func (f *fakeRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return input, nil +} + +func (f *fakeRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return []schema.GroupVersionResource{input}, nil +} + +func (f *fakeRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + switch { + case resource.Group == "" && resource.Resource == "namespaces": + return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}, nil + case resource.Group == "apps" && resource.Resource == "deployments": + return schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, nil + case resource.Group == "" && resource.Resource == "configmaps": + return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, nil + case resource.Group == "" && resource.Resource == "nodes": + return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}, nil + } + return schema.GroupVersionKind{}, errors.New("kind not found") +} + +func (f *fakeRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + kind, err := f.KindFor(resource) + if err != nil { + return nil, err + } + return []schema.GroupVersionKind{kind}, nil +} + +func (f *fakeRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + return resource, nil +} + func TestSortResources(t *testing.T) { // Create the ingressClass object ingressClass := &unstructured.Unstructured{ diff --git a/pkg/controllers/clusterresourceplacement/suite_test.go b/pkg/controllers/placement/suite_test.go similarity index 90% rename from pkg/controllers/clusterresourceplacement/suite_test.go rename to pkg/controllers/placement/suite_test.go index 8b377183e..fca00ca98 100644 --- a/pkg/controllers/clusterresourceplacement/suite_test.go +++ b/pkg/controllers/placement/suite_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "context" @@ -35,13 +35,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/cmd/hubagent/options" - "go.goms.io/fleet/pkg/controllers/clusterresourcebindingwatcher" - "go.goms.io/fleet/pkg/controllers/clusterresourceplacementwatcher" - "go.goms.io/fleet/pkg/controllers/clusterschedulingpolicysnapshot" + "go.goms.io/fleet/pkg/controllers/bindingwatcher" + "go.goms.io/fleet/pkg/controllers/placementwatcher" + "go.goms.io/fleet/pkg/controllers/schedulingpolicysnapshot" + "go.goms.io/fleet/pkg/metrics" "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/pkg/utils/controller" "go.goms.io/fleet/pkg/utils/informer" @@ -63,7 +65,7 @@ const ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "ClusterResourcePlacement Controller Suite") + RunSpecs(t, "Placement Controller Suite") } var _ = BeforeSuite(func() { @@ -133,23 +135,26 @@ var _ = BeforeSuite(func() { crpController := controller.NewController(controllerName, controller.NamespaceKeyFunc, reconciler.Reconcile, rateLimiter) // Set up the watchers - err = (&clusterschedulingpolicysnapshot.Reconciler{ + err = (&schedulingpolicysnapshot.Reconciler{ Client: mgr.GetClient(), PlacementController: crpController, }).SetupWithManagerForClusterSchedulingPolicySnapshot(mgr) Expect(err).Should(Succeed(), "failed to create clusterSchedulingPolicySnapshot watcher") - err = (&clusterresourceplacementwatcher.Reconciler{ + err = (&placementwatcher.Reconciler{ PlacementController: crpController, }).SetupWithManagerForClusterResourcePlacement(mgr) Expect(err).Should(Succeed(), "failed to create clusterResourcePlacement watcher") - err = (&clusterresourcebindingwatcher.Reconciler{ + err = (&bindingwatcher.Reconciler{ Client: mgr.GetClient(), PlacementController: crpController, }).SetupWithManagerForClusterResourceBinding(mgr) Expect(err).Should(Succeed(), "failed to create clusterResourceBinding watcher") + // Register metrics. + ctrlmetrics.Registry.MustRegister(metrics.FleetPlacementStatusLastTimeStampSeconds) + ctx, cancel = context.WithCancel(context.TODO()) // Run the controller manager go func() { diff --git a/pkg/controllers/clusterresourceplacement/work_propagation.go b/pkg/controllers/placement/work_propagation.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/work_propagation.go rename to pkg/controllers/placement/work_propagation.go index c29ad4720..2e35eeae9 100644 --- a/pkg/controllers/clusterresourceplacement/work_propagation.go +++ b/pkg/controllers/placement/work_propagation.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "context" diff --git a/pkg/controllers/clusterresourceplacementwatcher/suite_test.go b/pkg/controllers/placementwatcher/suite_test.go similarity index 97% rename from pkg/controllers/clusterresourceplacementwatcher/suite_test.go rename to pkg/controllers/placementwatcher/suite_test.go index d5f5c5010..018ded36c 100644 --- a/pkg/controllers/clusterresourceplacementwatcher/suite_test.go +++ b/pkg/controllers/placementwatcher/suite_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacementwatcher +package placementwatcher import ( "context" @@ -53,7 +53,7 @@ var ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "ClusterResourcePlacement Watcher Suite") + RunSpecs(t, "Placement Watcher Suite") } var _ = BeforeSuite(func() { diff --git a/pkg/controllers/clusterresourceplacementwatcher/watcher.go b/pkg/controllers/placementwatcher/watcher.go similarity index 93% rename from pkg/controllers/clusterresourceplacementwatcher/watcher.go rename to pkg/controllers/placementwatcher/watcher.go index 51e3cec75..1ebd913bf 100644 --- a/pkg/controllers/clusterresourceplacementwatcher/watcher.go +++ b/pkg/controllers/placementwatcher/watcher.go @@ -14,8 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package clusterresourceplacementwatcher features a controller to watch the clusterResourcePlacement and resourcePlacement changes. -package clusterresourceplacementwatcher +// Package placementwatcher features a controller to watch the clusterResourcePlacement and resourcePlacement changes. +package placementwatcher import ( "context" diff --git a/pkg/controllers/clusterresourceplacementwatcher/watcher_integration_test.go b/pkg/controllers/placementwatcher/watcher_integration_test.go similarity index 97% rename from pkg/controllers/clusterresourceplacementwatcher/watcher_integration_test.go rename to pkg/controllers/placementwatcher/watcher_integration_test.go index 0f4d8f57f..82c0bf2f2 100644 --- a/pkg/controllers/clusterresourceplacementwatcher/watcher_integration_test.go +++ b/pkg/controllers/placementwatcher/watcher_integration_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacementwatcher +package placementwatcher import ( "time" @@ -45,7 +45,7 @@ func clusterResourcePlacementForTest() *fleetv1beta1.ClusterResourcePlacement { Name: testCRPName, }, Spec: fleetv1beta1.PlacementSpec{ - ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ + ResourceSelectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -67,7 +67,7 @@ func resourcePlacementForTest() *fleetv1beta1.ResourcePlacement { Namespace: testNamespace, }, Spec: fleetv1beta1.PlacementSpec{ - ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ + ResourceSelectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", diff --git a/pkg/controllers/resourcechange/resourcechange_controller.go b/pkg/controllers/resourcechange/resourcechange_controller.go index 7db6f7c26..de8fb281c 100644 --- a/pkg/controllers/resourcechange/resourcechange_controller.go +++ b/pkg/controllers/resourcechange/resourcechange_controller.go @@ -115,22 +115,21 @@ func (r *Reconciler) handleDeletedResource(key keys.ClusterWideKey, isClusterSco } // handleUpdatedResourceForClusterResourcePlacement handles the updated resource for cluster resource placement. -func (r *Reconciler) handleUpdatedResourceForClusterResourcePlacement(key keys.ClusterWideKey, clusterObj runtime.Object, isClusterScoped bool) error { +func (r *Reconciler) handleUpdatedResourceForClusterResourcePlacement(key keys.ClusterWideKey, obj runtime.Object, isClusterScoped bool) error { if isClusterScoped { klog.V(2).InfoS("Find clusterResourcePlacement that selects the cluster scoped object", "obj", key) - return r.triggerAffectedPlacementsForUpdatedRes(key, clusterObj.(*unstructured.Unstructured), true) + return r.triggerAffectedPlacementsForUpdatedRes(key, obj.(*unstructured.Unstructured), true) } klog.V(2).InfoS("Find namespace that contains the namespace scoped object", "obj", key) // we will use the parent namespace object to search for the affected placements - var err error - clusterObj, err = r.InformerManager.Lister(utils.NamespaceGVR).Get(key.Namespace) + nsObj, err := r.InformerManager.Lister(utils.NamespaceGVR).Get(key.Namespace) if err != nil { klog.ErrorS(err, "Failed to find the namespace the resource belongs to", "obj", key) return client.IgnoreNotFound(err) } klog.V(2).InfoS("Find clusterResourcePlacement that selects the namespace", "obj", key) - if err := r.triggerAffectedPlacementsForUpdatedRes(key, clusterObj.(*unstructured.Unstructured), true); err != nil { + if err := r.triggerAffectedPlacementsForUpdatedRes(key, nsObj.(*unstructured.Unstructured), true); err != nil { klog.ErrorS(err, "Failed to trigger affected placements for updated cluster resource", "obj", key) return err } @@ -293,7 +292,7 @@ func (r *Reconciler) getUnstructuredObject(objectKey keys.ClusterWideKey) (runti } // triggerAffectedPlacementsForUpdatedRes find the affected placements for a given updated cluster scoped or namespace scoped resources. -// If the key is namespace scoped, res will be the namespace object. +// If the key is namespace scoped, res will be the namespace object for the clusterResourcePlacement. // If triggerCRP is true, it will trigger the cluster resource placement controller, otherwise it will trigger the resource placement controller. func (r *Reconciler) triggerAffectedPlacementsForUpdatedRes(key keys.ClusterWideKey, res *unstructured.Unstructured, triggerCRP bool) error { if triggerCRP { @@ -327,7 +326,7 @@ func (r *Reconciler) triggerAffectedPlacementsForUpdatedRes(key keys.ClusterWide } // Find all matching CRPs. - matchedCRPs := collectAllAffectedPlacementsV1Beta1(key.Namespace == "", res, convertToClusterResourcePlacements(crpList)) + matchedCRPs := collectAllAffectedPlacementsV1Beta1(key, res, convertToClusterResourcePlacements(crpList)) if len(matchedCRPs) == 0 { klog.V(2).InfoS("Change in object does not affect any v1beta1 cluster resource placement", "obj", key) return nil @@ -351,7 +350,7 @@ func (r *Reconciler) triggerAffectedPlacementsForUpdatedRes(key keys.ClusterWide } // Find all matching ResourcePlacements. - matchedRPs := collectAllAffectedPlacementsV1Beta1(key.Namespace == "", res, convertToResourcePlacements(rpList)) + matchedRPs := collectAllAffectedPlacementsV1Beta1(key, res, convertToResourcePlacements(rpList)) if len(matchedRPs) == 0 { klog.V(2).InfoS("Change in object does not affect any resource placement", "obj", key) return nil @@ -405,21 +404,22 @@ func collectAllAffectedPlacementsV1Alpha1(res *unstructured.Unstructured, crpLis return placements } -func isSelectNamespaceOnly(selector placementv1beta1.ClusterResourceSelector) bool { +func isSelectNamespaceOnly(selector placementv1beta1.ResourceSelectorTerm) bool { return selector.Group == "" && selector.Version == "v1" && selector.Kind == "Namespace" && selector.SelectionScope == placementv1beta1.NamespaceOnly } -// collectAllAffectedPlacementsV1Beta1 goes through all v1beta1 placements and collect the ones whose resource selector matches the object given its gvk -func collectAllAffectedPlacementsV1Beta1(isClusterScoped bool, res *unstructured.Unstructured, placementList []placementv1beta1.PlacementObj) map[string]bool { +// collectAllAffectedPlacementsV1Beta1 goes through all v1beta1 placements and collect the ones whose resource selector matches the object given its gvk. +// If the key is namespace scoped, res will be the namespace object for the clusterResourcePlacement. +func collectAllAffectedPlacementsV1Beta1(key keys.ClusterWideKey, res *unstructured.Unstructured, placementList []placementv1beta1.PlacementObj) map[string]bool { placements := make(map[string]bool) for _, placement := range placementList { match := false // find the placements selected this resource (before this change) - // For the resource placement, we do not compare the namespace in the selectedResources status. - // We assume the namespace is the same as the resource placement's namespace. + // If the namespaced scope resource is in the clusterResourcePlacement status and placement is namespaceOnly, + // the placement should be triggered to create a new resourceSnapshot. for _, selectedRes := range placement.GetPlacementStatus().SelectedResources { - if selectedRes.Group == res.GroupVersionKind().Group && selectedRes.Version == res.GroupVersionKind().Version && - selectedRes.Kind == res.GroupVersionKind().Kind && selectedRes.Name == res.GetName() { + if selectedRes.Group == key.GroupVersionKind().Group && selectedRes.Version == key.GroupVersionKind().Version && + selectedRes.Kind == key.GroupVersionKind().Kind && selectedRes.Name == key.Name && selectedRes.Namespace == key.Namespace { placements[placement.GetName()] = true match = true break @@ -434,9 +434,9 @@ func collectAllAffectedPlacementsV1Beta1(isClusterScoped bool, res *unstructured // will validate the resource placement's namespace matches the resource's namespace. for _, selector := range placement.GetPlacementSpec().ResourceSelectors { // For the clusterResourcePlacement, we skip the namespace scoped resources if the placement is cluster scoped. - if !isClusterScoped && isSelectNamespaceOnly(selector) && placement.GetNamespace() == "" { + if key.Namespace != "" && isSelectNamespaceOnly(selector) && placement.GetNamespace() == "" { // If the selector is namespace only, we skip the namespace scoped resources. - klog.V(2).InfoS("Skipping namespace scoped resource for namespace only selector", "obj", klog.KRef(res.GetNamespace(), res.GetName()), "selector", selector, "placement", klog.KObj(placement)) + klog.V(2).InfoS("Skipping namespace scoped resource for namespace only selector", "key", key, "obj", klog.KRef(res.GetNamespace(), res.GetName()), "selector", selector, "placement", klog.KObj(placement)) continue } @@ -491,7 +491,7 @@ func matchSelectorGVKV1Alpha1(targetGVK schema.GroupVersionKind, selector fleetv selector.Kind == targetGVK.Kind } -func matchSelectorGVKV1Beta1(targetGVK schema.GroupVersionKind, selector placementv1beta1.ClusterResourceSelector) bool { +func matchSelectorGVKV1Beta1(targetGVK schema.GroupVersionKind, selector placementv1beta1.ResourceSelectorTerm) bool { return selector.Group == targetGVK.Group && selector.Version == targetGVK.Version && selector.Kind == targetGVK.Kind } @@ -506,7 +506,7 @@ func matchSelectorLabelSelectorV1Alpha1(targetLabels map[string]string, selector return s.Matches(labels.Set(targetLabels)) } -func matchSelectorLabelSelectorV1Beta1(targetLabels map[string]string, selector placementv1beta1.ClusterResourceSelector) bool { +func matchSelectorLabelSelectorV1Beta1(targetLabels map[string]string, selector placementv1beta1.ResourceSelectorTerm) bool { if selector.LabelSelector == nil { // if the labelselector not set, it means select all return true diff --git a/pkg/controllers/resourcechange/resourcechange_controller_test.go b/pkg/controllers/resourcechange/resourcechange_controller_test.go index 2ee8cd831..e27d17581 100644 --- a/pkg/controllers/resourcechange/resourcechange_controller_test.go +++ b/pkg/controllers/resourcechange/resourcechange_controller_test.go @@ -26,15 +26,11 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/tools/cache" ctrl "sigs.k8s.io/controller-runtime" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" @@ -43,6 +39,7 @@ import ( "go.goms.io/fleet/pkg/utils/controller" "go.goms.io/fleet/pkg/utils/informer" "go.goms.io/fleet/pkg/utils/keys" + testinformer "go.goms.io/fleet/test/utils/informer" ) var _ controller.Controller = &fakeController{} @@ -71,104 +68,6 @@ func (w *fakeController) Enqueue(obj interface{}) { w.QueueObj = append(w.QueueObj, obj.(string)) } -// fakeLister is a simple fake lister for testing -type fakeLister struct { - objects []runtime.Object - err error -} - -func (f *fakeLister) List(_ labels.Selector) ([]runtime.Object, error) { - if f.err != nil { - return nil, f.err - } - return f.objects, nil -} - -func (f *fakeLister) Get(name string) (runtime.Object, error) { - if f.err != nil { - return nil, f.err - } - for _, obj := range f.objects { - if obj.(*unstructured.Unstructured).GetName() == name { - return obj, nil - } - } - return nil, apierrors.NewNotFound(schema.GroupResource{Resource: "test"}, name) -} - -func (f *fakeLister) ByNamespace(namespace string) cache.GenericNamespaceLister { - return &fakeNamespaceLister{objects: f.objects, namespace: namespace, err: f.err} -} - -// fakeNamespaceLister implements cache.GenericNamespaceLister -type fakeNamespaceLister struct { - objects []runtime.Object - namespace string - err error -} - -func (f *fakeNamespaceLister) List(_ labels.Selector) ([]runtime.Object, error) { - if f.err != nil { - return nil, f.err - } - return f.objects, nil -} - -func (f *fakeNamespaceLister) Get(name string) (runtime.Object, error) { - if f.err != nil { - return nil, f.err - } - for _, obj := range f.objects { - if obj.(*unstructured.Unstructured).GetName() == name { - return obj, nil - } - } - return nil, apierrors.NewNotFound(schema.GroupResource{Resource: "test"}, name) -} - -// fakeInformerManager is a test-specific informer manager -type fakeInformerManager struct { - listers map[schema.GroupVersionResource]*fakeLister -} - -func (f *fakeInformerManager) AddDynamicResources(_ []informer.APIResourceMeta, _ cache.ResourceEventHandler, _ bool) { -} - -func (f *fakeInformerManager) AddStaticResource(_ informer.APIResourceMeta, _ cache.ResourceEventHandler) { -} - -func (f *fakeInformerManager) IsInformerSynced(_ schema.GroupVersionResource) bool { - return true -} - -func (f *fakeInformerManager) Start() { -} - -func (f *fakeInformerManager) Stop() { -} - -func (f *fakeInformerManager) Lister(gvr schema.GroupVersionResource) cache.GenericLister { - if lister, exists := f.listers[gvr]; exists { - return lister - } - return &fakeLister{objects: []runtime.Object{}} -} - -func (f *fakeInformerManager) GetNameSpaceScopedResources() []schema.GroupVersionResource { - return nil -} - -func (f *fakeInformerManager) IsClusterScopedResources(_ schema.GroupVersionKind) bool { - return true -} - -func (f *fakeInformerManager) WaitForCacheSync() { -} - -func (f *fakeInformerManager) GetClient() dynamic.Interface { - return nil -} - func TestFindPlacementsSelectedDeletedResV1Alpha1(t *testing.T) { deletedRes := fleetv1alpha1.ResourceIdentifier{ Group: "abc", @@ -772,20 +671,41 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: "test-nameSpace", + Name: "test-namespace", Labels: map[string]string{ "region": rand.String(10), "version": rand.String(4), }, }, } + + // Common ResourceIdentifier for Namespace tests (cluster-scoped) + namespaceResourceIdentifier := fleetv1alpha1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-namespace", + } + + // Common ResourceIdentifier for namespace-scoped resource tests + namespaceScopedResourceIdentifier := fleetv1alpha1.ResourceIdentifier{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "test-namespace", + } + tests := map[string]struct { - isClusterScoped bool - res *corev1.Namespace - crpList []*placementv1beta1.ClusterResourcePlacement - wantCRP map[string]bool + key keys.ClusterWideKey + res *corev1.Namespace + crpList []*placementv1beta1.ClusterResourcePlacement + wantCRP map[string]bool }{ "match a place with the matching label": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -793,7 +713,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -809,6 +729,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "Skip a placement with selecting ns only": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceScopedResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -816,7 +739,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -833,30 +756,34 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: make(map[string]bool), }, "does not match a place with no selector": { - isClusterScoped: true, - res: matchRes, + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, + res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, }, }, }, wantCRP: make(map[string]bool), }, "match a place with the name selector": { - isClusterScoped: true, - res: matchRes, + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, + res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -870,6 +797,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "match a place with a match Expressions label": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -877,7 +807,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -898,6 +828,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "match a place with a single matching label": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -905,7 +838,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -921,6 +854,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "does not match a place with a miss matching label": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -928,7 +864,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -948,6 +884,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: make(map[string]bool), }, "match a place with multiple matching resource selectors": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -955,7 +894,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -984,6 +923,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "match a place with only one matching resource selectors": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -991,7 +933,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -1021,6 +963,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "match a place with a miss matching label but was selected": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -1029,7 +974,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing }, Spec: placementv1beta1.PlacementSpec{ // the mis-matching resource selector - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -1065,6 +1010,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "does not match a place with a miss matching label and was not selected": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -1072,7 +1020,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -1101,6 +1049,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: make(map[string]bool), }, "don't select placement with name, nil label selector for namespace with different name": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -1108,7 +1059,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -1122,6 +1073,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: make(map[string]bool), }, "select placement with empty name, nil label selector for namespace": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -1129,7 +1083,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -1142,6 +1096,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "match placement through status SelectedResources when selector does not match": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -1150,7 +1107,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing }, Spec: placementv1beta1.PlacementSpec{ // Selector that does not match the resource - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -1180,6 +1137,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"status-matched-placement": true}, }, "does not match placement with different GVK selector": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -1187,7 +1147,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-not-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -1202,6 +1162,124 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing }, wantCRP: make(map[string]bool), }, + "match ClusterResourcePlacement with previously selected resource": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceScopedResourceIdentifier, + }, + res: matchRes, + crpList: []*placementv1beta1.ClusterResourcePlacement{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "crp-with-selected-resource", + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: corev1.GroupName, + Version: "v1", + Kind: matchRes.Kind, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "nonexistent": "label", + }, + }, + }, + }, + }, + Status: placementv1beta1.PlacementStatus{ + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "test-namespace", + }, + }, + }, + }, + }, + wantCRP: map[string]bool{"crp-with-selected-resource": true}, + }, + "match ClusterResourcePlacement (even with namespace only) with previously selected resource": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceScopedResourceIdentifier, + }, + res: matchRes, + crpList: []*placementv1beta1.ClusterResourcePlacement{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "crp-with-selected-resource", + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: corev1.GroupName, + Version: "v1", + Kind: matchRes.Kind, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "nonexistent": "label", + }, + }, + SelectionScope: placementv1beta1.NamespaceOnly, + }, + }, + }, + Status: placementv1beta1.PlacementStatus{ + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "test-namespace", + }, + }, + }, + }, + }, + wantCRP: map[string]bool{"crp-with-selected-resource": true}, + }, + "does not match ClusterResourcePlacement with previously selected resource when namespace is different": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceScopedResourceIdentifier, + }, + res: matchRes, + crpList: []*placementv1beta1.ClusterResourcePlacement{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "crp-with-selected-resource", + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: corev1.GroupName, + Version: "v1", + Kind: matchRes.Kind, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "nonexistent": "label", + }, + }, + }, + }, + }, + Status: placementv1beta1.PlacementStatus{ + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "different-namespace", + }, + }, + }, + }, + }, + wantCRP: make(map[string]bool), + }, } for name, tt := range tests { t.Run(name, func(t *testing.T) { @@ -1212,7 +1290,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing } uRes, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(tt.res) clusterPlacements := convertToClusterResourcePlacements(crpList) - got := collectAllAffectedPlacementsV1Beta1(tt.isClusterScoped, &unstructured.Unstructured{Object: uRes}, clusterPlacements) + got := collectAllAffectedPlacementsV1Beta1(tt.key, &unstructured.Unstructured{Object: uRes}, clusterPlacements) if !reflect.DeepEqual(got, tt.wantCRP) { t.Errorf("test case `%s` got = %v, wantResult %v", name, got, tt.wantCRP) } @@ -1243,12 +1321,25 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Kind: "Deployment", }) + // Common ResourceIdentifier for Deployment tests + deploymentResourceIdentifier := fleetv1alpha1.ResourceIdentifier{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "test-namespace", + } + tests := map[string]struct { + key keys.ClusterWideKey res *unstructured.Unstructured rpList []*placementv1beta1.ResourcePlacement wantRP map[string]bool }{ "match ResourcePlacement with the matching label": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1257,7 +1348,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1275,6 +1366,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { wantRP: map[string]bool{"resource-selected": true}, }, "does not match ResourcePlacement with no selector": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1283,13 +1377,16 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, }, }, }, wantRP: make(map[string]bool), }, "match ResourcePlacement with the name selector": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1298,7 +1395,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1313,6 +1410,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { wantRP: map[string]bool{"resource-selected": true}, }, "does not match ResourcePlacement with different name": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1321,7 +1421,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1335,6 +1435,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { wantRP: make(map[string]bool), }, "match ResourcePlacement with previously selected resource": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1343,7 +1446,8 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + // Selector that does not match the resource + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1357,6 +1461,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { }, }, Status: placementv1beta1.PlacementStatus{ + // But the resource is in the selected resources status SelectedResources: []placementv1beta1.ResourceIdentifier{ { Group: "apps", @@ -1372,6 +1477,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { wantRP: map[string]bool{"resource-selected": true}, }, "select ResourcePlacement with empty name, nil label selector for deployment": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1380,7 +1488,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1393,6 +1501,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { wantRP: map[string]bool{"resource-selected": true}, }, "does not match ResourcePlacement with different GVK selector": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1401,7 +1512,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1419,7 +1530,10 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { }, wantRP: make(map[string]bool), }, - "match ResourcePlacement through status SelectedResources when selector does not match": { + "does not match ResourcePlacement through status SelectedResources when name is different": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1429,7 +1543,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { }, Spec: placementv1beta1.PlacementSpec{ // Selector that does not match the resource - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1449,14 +1563,14 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Group: "apps", Version: "v1", Kind: "Deployment", - Name: "test-deployment", + Name: "different-deployment", Namespace: "test-namespace", }, }, }, }, }, - wantRP: map[string]bool{"status-matched-rp": true}, + wantRP: make(map[string]bool), }, } @@ -1468,7 +1582,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { rpList = append(rpList, &unstructured.Unstructured{Object: uMap}) } resourcePlacements := convertToResourcePlacements(rpList) - got := collectAllAffectedPlacementsV1Beta1(false, tt.res, resourcePlacements) + got := collectAllAffectedPlacementsV1Beta1(tt.key, tt.res, resourcePlacements) if !reflect.DeepEqual(got, tt.wantRP) { t.Errorf("test case `%s` got = %v, wantResult %v", name, got, tt.wantRP) } @@ -1540,7 +1654,7 @@ func TestHandleUpdatedResource(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1561,7 +1675,7 @@ func TestHandleUpdatedResource(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1576,22 +1690,22 @@ func TestHandleUpdatedResource(t *testing.T) { }, } - defaultFakeInformerManager := &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + defaultFakeInformerManager := &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ {Group: "placement.kubernetes-fleet.io", Version: "v1beta1", Resource: "clusterresourceplacements"}: { - objects: func() []runtime.Object { + Objects: func() []runtime.Object { uMap, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(testCRP) return []runtime.Object{&unstructured.Unstructured{Object: uMap}} }(), }, {Group: "placement.kubernetes-fleet.io", Version: "v1beta1", Resource: "resourceplacements"}: { - objects: func() []runtime.Object { + Objects: func() []runtime.Object { uMap, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(testRP) return []runtime.Object{&unstructured.Unstructured{Object: uMap}} }(), }, {Group: "", Version: "v1", Resource: "namespaces"}: { - objects: func() []runtime.Object { + Objects: func() []runtime.Object { uObj, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(testNamespace) return []runtime.Object{&unstructured.Unstructured{Object: uObj}} }(), @@ -1684,16 +1798,16 @@ func TestHandleUpdatedResource(t *testing.T) { }, clusterObj: testDeployment, isClusterScoped: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ {Group: "placement.kubernetes-fleet.io", Version: "v1beta1", Resource: "clusterresourceplacements"}: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, {Group: "placement.kubernetes-fleet.io", Version: "v1beta1", Resource: "resourceplacements"}: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, {Group: "", Version: "v1", Resource: "namespaces"}: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, }, }, @@ -1714,19 +1828,19 @@ func TestHandleUpdatedResource(t *testing.T) { }, clusterObj: testDeployment, isClusterScoped: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ {Group: "placement.kubernetes-fleet.io", Version: "v1beta1", Resource: "clusterresourceplacements"}: { - objects: func() []runtime.Object { + Objects: func() []runtime.Object { uMap, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(testCRP) return []runtime.Object{&unstructured.Unstructured{Object: uMap}} }(), }, {Group: "placement.kubernetes-fleet.io", Version: "v1beta1", Resource: "resourceplacements"}: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, {Group: "", Version: "v1", Resource: "namespaces"}: { - objects: func() []runtime.Object { + Objects: func() []runtime.Object { uObj, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(testNamespace) return []runtime.Object{&unstructured.Unstructured{Object: uObj}} }(), @@ -1842,10 +1956,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, }, }, @@ -1862,10 +1976,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, }, }, @@ -1883,10 +1997,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, }, }, @@ -1904,10 +2018,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, }, }, @@ -1924,11 +2038,11 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, - err: errors.New("test lister error"), + Objects: []runtime.Object{}, + Err: errors.New("test lister error"), }, }, }, @@ -1947,11 +2061,11 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ResourcePlacementGVR: { - objects: []runtime.Object{}, - err: errors.New("test lister error"), + Objects: []runtime.Object{}, + Err: errors.New("test lister error"), }, }, }, @@ -1969,10 +2083,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, }, }, @@ -1990,10 +2104,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, }, }, @@ -2010,10 +2124,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: func() []runtime.Object { + Objects: func() []runtime.Object { // Create multiple CRPs that have selected this namespace testCRP1 := &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -2089,10 +2203,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ResourcePlacementGVR: { - objects: func() []runtime.Object { + Objects: func() []runtime.Object { // Create multiple ResourcePlacements that have selected this deployment testRP1 := &placementv1beta1.ResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -2210,7 +2324,7 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -2231,7 +2345,7 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -2275,13 +2389,13 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { }, }, resource: testNamespace, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, }, }, @@ -2300,13 +2414,13 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { }, }, resource: createDeploymentUnstructured(createTestDeployment()), - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, }, }, @@ -2348,7 +2462,7 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -2365,13 +2479,13 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { uMap, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(nonMatchingRP) rpObjects := []runtime.Object{&unstructured.Unstructured{Object: uMap}} - return &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + return &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, }, } @@ -2404,13 +2518,13 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { } return createNamespaceUnstructured(otherResource) }(), - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, utils.ResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, }, }, @@ -2435,7 +2549,7 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { Name: "test-crp-2", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -2451,13 +2565,13 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { } uMap1, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(testClusterResourcePlacement) uMap2, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(testCRP2) - return &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + return &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{&unstructured.Unstructured{Object: uMap1}, &unstructured.Unstructured{Object: uMap2}}, + Objects: []runtime.Object{&unstructured.Unstructured{Object: uMap1}, &unstructured.Unstructured{Object: uMap2}}, }, utils.ResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, }, } @@ -2478,13 +2592,13 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { }, }, resource: testNamespace, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, }, }, @@ -2510,7 +2624,7 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { Name: "crp-namespace-only", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -2528,13 +2642,13 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { uMap, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(crpWithNamespaceOnlySelector) crpObjects := []runtime.Object{&unstructured.Unstructured{Object: uMap}} - return &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + return &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, utils.ResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, }, } @@ -2588,6 +2702,13 @@ func TestHandleDeletedResource(t *testing.T) { Name: "test-namespace", Namespace: "", }, + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "test-namespace", + }, }, }, } @@ -2643,10 +2764,10 @@ func TestHandleDeletedResource(t *testing.T) { }, }, isClusterScoped: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, }, }, @@ -2665,10 +2786,10 @@ func TestHandleDeletedResource(t *testing.T) { }, }, isClusterScoped: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, }, }, @@ -2688,16 +2809,16 @@ func TestHandleDeletedResource(t *testing.T) { }, }, isClusterScoped: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, utils.NamespaceGVR: { - objects: []runtime.Object{testNamespace}, + Objects: []runtime.Object{testNamespace}, }, }, }, @@ -2717,14 +2838,14 @@ func TestHandleDeletedResource(t *testing.T) { }, }, isClusterScoped: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, - err: errors.New("CRP lister error"), + Objects: []runtime.Object{}, + Err: errors.New("CRP lister error"), }, utils.NamespaceGVR: { - objects: []runtime.Object{testNamespace}, + Objects: []runtime.Object{testNamespace}, }, }, }, @@ -2744,14 +2865,14 @@ func TestHandleDeletedResource(t *testing.T) { }, }, isClusterScoped: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, utils.ResourcePlacementGVR: { - objects: []runtime.Object{}, - err: errors.New("RP lister error"), + Objects: []runtime.Object{}, + Err: errors.New("RP lister error"), }, }, }, @@ -2770,11 +2891,11 @@ func TestHandleDeletedResource(t *testing.T) { }, }, isClusterScoped: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, - err: errors.New("CRP lister error"), + Objects: []runtime.Object{}, + Err: errors.New("CRP lister error"), }, }, }, @@ -2801,7 +2922,7 @@ func TestHandleDeletedResource(t *testing.T) { Name: "crp-namespace-only", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -2813,16 +2934,16 @@ func TestHandleDeletedResource(t *testing.T) { } uMap, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(crpWithNamespaceOnlySelector) crpObjects := []runtime.Object{&unstructured.Unstructured{Object: uMap}} - return &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + return &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, utils.NamespaceGVR: { - objects: []runtime.Object{testNamespace}, + Objects: []runtime.Object{testNamespace}, }, }, } @@ -2872,11 +2993,11 @@ func TestHandleDeletedResource(t *testing.T) { func TestIsSelectNamespaceOnly(t *testing.T) { tests := map[string]struct { - selector placementv1beta1.ClusterResourceSelector + selector placementv1beta1.ResourceSelectorTerm want bool }{ "namespace with namespace only scope": { - selector: placementv1beta1.ClusterResourceSelector{ + selector: placementv1beta1.ResourceSelectorTerm{ Group: "", Version: "v1", Kind: "Namespace", @@ -2885,7 +3006,7 @@ func TestIsSelectNamespaceOnly(t *testing.T) { want: true, }, "namespace with namespace with resources scope": { - selector: placementv1beta1.ClusterResourceSelector{ + selector: placementv1beta1.ResourceSelectorTerm{ Group: "", Version: "v1", Kind: "Namespace", @@ -2894,7 +3015,7 @@ func TestIsSelectNamespaceOnly(t *testing.T) { want: false, }, "configmap with namespace only scope": { - selector: placementv1beta1.ClusterResourceSelector{ + selector: placementv1beta1.ResourceSelectorTerm{ Group: "", Version: "v1", Kind: "ConfigMap", @@ -2903,7 +3024,7 @@ func TestIsSelectNamespaceOnly(t *testing.T) { want: false, }, "deployment with namespace only scope": { - selector: placementv1beta1.ClusterResourceSelector{ + selector: placementv1beta1.ResourceSelectorTerm{ Group: "apps", Version: "v1", Kind: "Deployment", @@ -2912,7 +3033,7 @@ func TestIsSelectNamespaceOnly(t *testing.T) { want: false, }, "namespace with wrong group": { - selector: placementv1beta1.ClusterResourceSelector{ + selector: placementv1beta1.ResourceSelectorTerm{ Group: "core", Version: "v1", Kind: "Namespace", @@ -2921,7 +3042,7 @@ func TestIsSelectNamespaceOnly(t *testing.T) { want: false, }, "namespace with wrong version": { - selector: placementv1beta1.ClusterResourceSelector{ + selector: placementv1beta1.ResourceSelectorTerm{ Group: "", Version: "v2", Kind: "Namespace", @@ -2930,7 +3051,7 @@ func TestIsSelectNamespaceOnly(t *testing.T) { want: false, }, "namespace with default selection scope (NamespaceWithResources)": { - selector: placementv1beta1.ClusterResourceSelector{ + selector: placementv1beta1.ResourceSelectorTerm{ Group: "", Version: "v1", Kind: "Namespace", diff --git a/pkg/controllers/rollout/controller_integration_test.go b/pkg/controllers/rollout/controller_integration_test.go index cfdd88b0c..61d48051b 100644 --- a/pkg/controllers/rollout/controller_integration_test.go +++ b/pkg/controllers/rollout/controller_integration_test.go @@ -25,6 +25,8 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -32,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" + clusterv1beta1 "go.goms.io/fleet/apis/cluster/v1beta1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/pkg/utils/condition" @@ -44,7 +47,7 @@ const ( consistentTimeout = time.Second * 60 consistentInterval = time.Second * 5 customBindingFinalizer = "custom-binding-finalizer" - testNamespace = "test-namespace" + testNamespace = "app" // to align with the test resources in rollout/manifests ) var ( @@ -314,6 +317,85 @@ var _ = Describe("Test the rollout Controller", func() { }, timeout, interval).Should(Succeed(), "Failed to verify that all the bindings have their status refreshed") }) + It("should trigger binding rollout for clusterResourceOverrideSnapshot but not resourceOverrideSnapshot with Namespaced scope", func() { + // Create a CRP. + targetClusterCount := int32(2) + rolloutCRP = clusterResourcePlacementForTest( + testCRPName, + createPlacementPolicyForTest(placementv1beta1.PickNPlacementType, targetClusterCount), + createPlacementRolloutStrategyForTest(placementv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)) + Expect(k8sClient.Create(ctx, rolloutCRP)).Should(Succeed(), "Failed to create CRP") + + // Create a master cluster resource snapshot. + resourceSnapshot := generateClusterResourceSnapshot(rolloutCRP.Name, 0, true) + Expect(k8sClient.Create(ctx, resourceSnapshot)).Should(Succeed(), "Failed to create cluster resource snapshot") + + // Create bindings. + clusters := make([]string, targetClusterCount) + for i := 0; i < int(targetClusterCount); i++ { + clusters[i] = "cluster-" + utils.RandStr() + binding := generateClusterResourceBinding(placementv1beta1.BindingStateScheduled, resourceSnapshot.Name, clusters[i]) + Expect(k8sClient.Create(ctx, binding)).Should(Succeed(), "Failed to create cluster resource binding") + bindings = append(bindings, binding) + + memberCluster := generateMemberCluster(i, clusters[i]) + Expect(k8sClient.Create(ctx, memberCluster)).Should(Succeed(), "Failed to create member cluster") + } + + // Verify that all the bindings are rolled out initially. + verifyBindingsRolledOut(controller.ConvertCRBArrayToBindingObjs(bindings), resourceSnapshot, timeout) + + // Mark the bindings to be available. + for _, binding := range bindings { + markBindingAvailable(binding, true) + } + + // Create a resourceOverrideSnapshot with the same placement name but Namespaced scope and verify bindings are not updated. + testROName1 := "ro" + utils.RandStr() + resourceOverrideSnapshot1 := generateResourceOverrideSnapshot(testROName1, testCRPName, placementv1beta1.NamespaceScoped) + By(fmt.Sprintf("Creating resourceOverrideSnapshot %s to refer a resourcePlacement", resourceOverrideSnapshot1.Name)) + Expect(k8sClient.Create(ctx, resourceOverrideSnapshot1)).Should(Succeed(), "Failed to create resource override snapshot") + + // Verify bindings are NOT updated (rollout not triggered) by resourceOverrideSnapshot. + verifyBindingsNotUpdatedWithOverridesConsistently(controller.ConvertCRBArrayToBindingObjs(bindings), nil, nil) + + // Create a clusterResourceOverrideSnapshot and verify it triggers rollout. + testCROName := "cro" + utils.RandStr() + clusterResourceOverrideSnapshot := generateClusterResourceOverrideSnapshot(testCROName, testCRPName) + By(fmt.Sprintf("Creating clusterResourceOverrideSnapshot %s to refer the clusterResourcePlacement", clusterResourceOverrideSnapshot.Name)) + Expect(k8sClient.Create(ctx, clusterResourceOverrideSnapshot)).Should(Succeed(), "Failed to create cluster resource override snapshot") + + // Verify bindings are updated, note that both clusterResourceOverrideSnapshot and resourceOverrideSnapshot are set in the bindings. + waitUntilRolloutCompleted(controller.ConvertCRBArrayToBindingObjs(bindings), []string{clusterResourceOverrideSnapshot.Name}, nil) + + // Create another resourceOverrideSnapshot referencing the same CRP and verify bindings are updated again. + testROName2 := "ro" + utils.RandStr() + resourceOverrideSnapshot2 := generateResourceOverrideSnapshot(testROName2, testCRPName, placementv1beta1.ClusterScoped) + By(fmt.Sprintf("Creating resourceOverrideSnapshot %s to refer a clusterResourcePlacement", resourceOverrideSnapshot2.Name)) + Expect(k8sClient.Create(ctx, resourceOverrideSnapshot2)).Should(Succeed(), "Failed to create resource override snapshot") + + // Verify bindings are updated, note that both clusterResourceOverrideSnapshot and resourceOverrideSnapshot are set in the bindings. + waitUntilRolloutCompleted(controller.ConvertCRBArrayToBindingObjs(bindings), []string{clusterResourceOverrideSnapshot.Name}, + []placementv1beta1.NamespacedName{ + {Name: resourceOverrideSnapshot2.Name, Namespace: resourceOverrideSnapshot2.Namespace}, + }, + ) + + // Clean up the override snapshots. + Expect(k8sClient.Delete(ctx, resourceOverrideSnapshot1)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, clusterResourceOverrideSnapshot)).Should(Succeed()) + + // Clean up the member clusters. + for _, cluster := range clusters { + memberCluster := &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster, + }, + } + Expect(k8sClient.Delete(ctx, memberCluster)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{})) + } + }) + It("Should rollout all the selected bindings when the rollout strategy is not set", func() { // create CRP var targetCluster int32 = 11 @@ -619,7 +701,7 @@ var _ = Describe("Test the rollout Controller", func() { By("Verified that the rollout is finally unblocked") }) - It("Should rollout both the old applied and failed to apply bond the new resources", func() { + It("Should rollout both the old applied and failed to apply bound the new resources", func() { // create CRP var targetCluster int32 = 5 rolloutCRP = clusterResourcePlacementForTest(testCRPName, @@ -1481,7 +1563,7 @@ var _ = Describe("Test the rollout Controller for ResourcePlacement", func() { By("Verified that the rollout is finally unblocked") }) - It("Should rollout both the old applied and failed to apply bond the new resources", func() { + It("Should rollout both the old applied and failed to apply bound the new resources", func() { // create RP var targetCluster int32 = 5 rolloutRP = resourcePlacementForTest(testNamespace, testRPName, @@ -1538,6 +1620,79 @@ var _ = Describe("Test the rollout Controller for ResourcePlacement", func() { return allMatch }, 5*defaultUnavailablePeriod*time.Second, interval).Should(BeTrue(), "rollout controller should roll all the bindings to use the latest resource snapshot") }) + + It("should trigger binding rollout for resourceOverrideSnapshot but not clusterResourceOverrideSnapshot", func() { + // Create a RP. + targetClusterCount := int32(2) + rolloutRP = resourcePlacementForTest( + testNamespace, testRPName, + createPlacementPolicyForTest(placementv1beta1.PickNPlacementType, targetClusterCount), + createPlacementRolloutStrategyForTest(placementv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)) + Expect(k8sClient.Create(ctx, rolloutRP)).Should(Succeed(), "Failed to create RP") + + // Create a master resource snapshot. + resourceSnapshot := generateResourceSnapshot(rolloutRP.Namespace, rolloutRP.Name, 0, true) + Expect(k8sClient.Create(ctx, resourceSnapshot)).Should(Succeed(), "Failed to create resource snapshot") + + // Create bindings. + clusters := make([]string, targetClusterCount) + for i := 0; i < int(targetClusterCount); i++ { + clusters[i] = "cluster-" + utils.RandStr() + binding := generateResourceBinding(placementv1beta1.BindingStateScheduled, resourceSnapshot.Name, clusters[i], testNamespace) + Expect(k8sClient.Create(ctx, binding)).Should(Succeed(), "Failed to create resource binding") + bindings = append(bindings, binding) + + memberCluster := generateMemberCluster(i, clusters[i]) + Expect(k8sClient.Create(ctx, memberCluster)).Should(Succeed(), "Failed to create member cluster") + } + + // Verify that all the bindings are rolled out initially. + verifyBindingsRolledOut(controller.ConvertRBArrayToBindingObjs(bindings), resourceSnapshot, timeout) + + // Mark the bindings to be available. + for _, binding := range bindings { + markBindingAvailable(binding, true) + } + + // Create a clusterResourceOverrideSnapshot and a resourceOverrideSnapshot with cluster-scope placement and verify bindings are not updated. + testCROName := "cro" + utils.RandStr() + clusterResourceOverrideSnapshot := generateClusterResourceOverrideSnapshot(testCROName, testRPName) + By(fmt.Sprintf("Creating cluster resource override snapshot %s", clusterResourceOverrideSnapshot.Name)) + Expect(k8sClient.Create(ctx, clusterResourceOverrideSnapshot)).Should(Succeed(), "Failed to create cluster resource override snapshot") + + testROName1 := "ro" + utils.RandStr() + resourceOverrideSnapshot1 := generateResourceOverrideSnapshot(testROName1, testRPName, placementv1beta1.ClusterScoped) + By(fmt.Sprintf("Creating resource override snapshot %s", resourceOverrideSnapshot1.Name)) + Expect(k8sClient.Create(ctx, resourceOverrideSnapshot1)).Should(Succeed(), "Failed to create resource override snapshot") + + // Verify bindings are NOT updated (rollout not triggered) by clusterResourceOverrideSnapshot. + verifyBindingsNotUpdatedWithOverridesConsistently(controller.ConvertRBArrayToBindingObjs(bindings), nil, nil) + + // Create a resourceOverrideSnapshot and verify it triggers rollout. + testROName2 := "ro" + utils.RandStr() + resourceOverrideSnapshot2 := generateResourceOverrideSnapshot(testROName2, testRPName, placementv1beta1.NamespaceScoped) + By(fmt.Sprintf("Creating resource override snapshot %s", resourceOverrideSnapshot2.Name)) + Expect(k8sClient.Create(ctx, resourceOverrideSnapshot2)).Should(Succeed(), "Failed to create resource override snapshot") + + waitUntilRolloutCompleted(controller.ConvertRBArrayToBindingObjs(bindings), nil, []placementv1beta1.NamespacedName{ + {Name: resourceOverrideSnapshot2.Name, Namespace: resourceOverrideSnapshot2.Namespace}, + }) + + // Clean up the override snapshots. + Expect(k8sClient.Delete(ctx, resourceOverrideSnapshot1)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, resourceOverrideSnapshot2)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, clusterResourceOverrideSnapshot)).Should(Succeed()) + + // Clean up the member clusters. + for _, cluster := range clusters { + memberCluster := &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster, + }, + } + Expect(k8sClient.Delete(ctx, memberCluster)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{})) + } + }) }) func resourcePlacementForTest(namespace, rpName string, policy *placementv1beta1.PlacementPolicy, strategy placementv1beta1.RolloutStrategy) *placementv1beta1.ResourcePlacement { @@ -1547,7 +1702,7 @@ func resourcePlacementForTest(namespace, rpName string, policy *placementv1beta1 Namespace: namespace, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "v1", Version: "v1", @@ -1609,6 +1764,92 @@ func verifyBindingsRolledOut(bindings []placementv1beta1.BindingObj, masterSnaps }, timeout, interval).Should(Succeed(), "rollout controller should roll out all the bindings") } +func verifyBindingsNotUpdatedWithOverridesConsistently( + bindings []placementv1beta1.BindingObj, + wantClusterResourceOverrideSnapshots []string, + wantResourceOverrideSnapshots []placementv1beta1.NamespacedName, +) { + Consistently(func() error { + for _, binding := range bindings { + bindingKey := types.NamespacedName{Name: binding.GetName(), Namespace: binding.GetNamespace()} + if _, err := checkIfBindingUpdatedWithOverrides(bindingKey, wantClusterResourceOverrideSnapshots, wantResourceOverrideSnapshots); err != nil { + return fmt.Errorf("binding %s should not be updated with overrides: %w", bindingKey, err) + } + } + return nil + }, consistentTimeout, interval).Should(Succeed(), "Bindings should not be updated with new overrides consistently") +} + +func waitUntilRolloutCompleted( + bindings []placementv1beta1.BindingObj, + wantClusterResourceOverrideSnapshots []string, + wantResourceOverrideSnapshots []placementv1beta1.NamespacedName, +) { + notUpdatedBindings := make(map[types.NamespacedName]bool, len(bindings)) + for _, binding := range bindings { + notUpdatedBindings[types.NamespacedName{Name: binding.GetName(), Namespace: binding.GetNamespace()}] = true + } + + for len(notUpdatedBindings) > 0 { + // In each round, try to find a binding that has been updated and update it to available so rollout can proceed. + var gotBinding placementv1beta1.BindingObj + var err error + Eventually(func() error { + for bindingKey := range notUpdatedBindings { + gotBinding, err = checkIfBindingUpdatedWithOverrides(bindingKey, wantClusterResourceOverrideSnapshots, wantResourceOverrideSnapshots) + if err != nil { + continue // current binding not updated yet, continue to check the next one. + } + delete(notUpdatedBindings, bindingKey) + return nil // found an updated binding, can exit this round. + } + return fmt.Errorf("failed to find a binding with updated overrides") + }, timeout, interval).Should(Succeed(), "One of the bindings should be updated with overrides") + // Mark the binding as available so rollout can proceed. + markBindingAvailable(gotBinding, true) + } +} + +func checkIfBindingUpdatedWithOverrides( + bindingKey types.NamespacedName, + wantClusterResourceOverrideSnapshots []string, + wantResourceOverrideSnapshots []placementv1beta1.NamespacedName, +) (placementv1beta1.BindingObj, error) { + var gotBinding placementv1beta1.BindingObj + if bindingKey.Namespace == "" { + gotBinding = &placementv1beta1.ClusterResourceBinding{} + } else { + gotBinding = &placementv1beta1.ResourceBinding{} + } + if err := k8sClient.Get(ctx, bindingKey, gotBinding); err != nil { + return gotBinding, fmt.Errorf("failed to get binding %s: %w", bindingKey, err) + } + + // Check that RolloutStarted condition is True. + if !condition.IsConditionStatusTrue(gotBinding.GetCondition(string(placementv1beta1.ResourceBindingRolloutStarted)), gotBinding.GetGeneration()) { + return gotBinding, fmt.Errorf("binding %s RolloutStarted condition is not True", bindingKey) + } + + // Check that override snapshots in spec are the want ones. + cmpOptions := []cmp.Option{ + cmpopts.EquateEmpty(), + cmpopts.SortSlices(func(a, b string) bool { return a < b }), + cmpopts.SortSlices(func(a, b placementv1beta1.NamespacedName) bool { + if a.Namespace == b.Namespace { + return a.Name < b.Name + } + return a.Namespace < b.Namespace + }), + } + if !cmp.Equal(gotBinding.GetBindingSpec().ClusterResourceOverrideSnapshots, wantClusterResourceOverrideSnapshots, cmpOptions...) || + !cmp.Equal(gotBinding.GetBindingSpec().ResourceOverrideSnapshots, wantResourceOverrideSnapshots, cmpOptions...) { + return gotBinding, fmt.Errorf("binding %s override snapshots mismatch: want %v and %v, got %v and %v", bindingKey, + wantClusterResourceOverrideSnapshots, wantResourceOverrideSnapshots, + gotBinding.GetBindingSpec().ClusterResourceOverrideSnapshots, gotBinding.GetBindingSpec().ResourceOverrideSnapshots) + } + return gotBinding, nil +} + func markBindingAvailable(binding placementv1beta1.BindingObj, trackable bool) { Eventually(func() error { reason := "trackable" @@ -1707,7 +1948,8 @@ func generateClusterResourceSnapshot(testCRPName string, resourceIndex int, isLa placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(isLatest), }, Annotations: map[string]string{ - placementv1beta1.ResourceGroupHashAnnotation: "hash", + placementv1beta1.ResourceGroupHashAnnotation: "hash", + placementv1beta1.NumberOfResourceSnapshotsAnnotation: "1", }, }, } @@ -1734,7 +1976,8 @@ func generateResourceSnapshot(namespace, testRPName string, resourceIndex int, i placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(isLatest), }, Annotations: map[string]string{ - placementv1beta1.ResourceGroupHashAnnotation: "hash", + placementv1beta1.ResourceGroupHashAnnotation: "hash", + placementv1beta1.NumberOfResourceSnapshotsAnnotation: "1", }, }, } @@ -1750,3 +1993,116 @@ func generateResourceSnapshot(namespace, testRPName string, resourceIndex int, i } return resourceSnapshot } + +func generateMemberCluster(idx int, clusterName string) *clusterv1beta1.MemberCluster { + clusterLabels := map[string]string{ + "index": strconv.Itoa(idx), + } + return &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Labels: clusterLabels, + }, + Spec: clusterv1beta1.MemberClusterSpec{ + Identity: rbacv1.Subject{ + Name: "testUser", + Kind: "ServiceAccount", + Namespace: utils.FleetSystemNamespace, + }, + HeartbeatPeriodSeconds: 60, + }, + } +} + +func generateClusterResourceOverrideSnapshot(testCROName, testPlacementName string) *placementv1beta1.ClusterResourceOverrideSnapshot { + return &placementv1beta1.ClusterResourceOverrideSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, testCROName, 0), + Labels: map[string]string{ + placementv1beta1.OverrideIndexLabel: "0", + placementv1beta1.IsLatestSnapshotLabel: "true", + placementv1beta1.OverrideTrackingLabel: testCROName, + }, + }, + Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ + OverrideHash: []byte("cluster-override-hash"), + OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: testPlacementName, + Scope: placementv1beta1.ClusterScoped, + }, + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/labels/test", + Value: apiextensionsv1.JSON{Raw: []byte(`"test"`)}, + }, + }, + }, + }, + }, + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "app", // from manifests/test_namespace.yaml + }, + }, + }, + }, + } +} + +func generateResourceOverrideSnapshot(testROName, testPlacementName string, scope placementv1beta1.ResourceScope) *placementv1beta1.ResourceOverrideSnapshot { + return &placementv1beta1.ResourceOverrideSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, testROName, 0), + Namespace: testNamespace, + Labels: map[string]string{ + placementv1beta1.OverrideIndexLabel: "0", + placementv1beta1.IsLatestSnapshotLabel: "true", + placementv1beta1.OverrideTrackingLabel: testROName, + }, + }, + Spec: placementv1beta1.ResourceOverrideSnapshotSpec{ + OverrideHash: []byte("resource-override-hash"), + OverrideSpec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: testPlacementName, + Scope: scope, + }, + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/labels/test", + Value: apiextensionsv1.JSON{Raw: []byte(`"test"`)}, + }, + }, + }, + }, + }, + ResourceSelectors: []placementv1beta1.ResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-configmap", + }, + }, + }, + }, + } +} diff --git a/pkg/controllers/rollout/controller_test.go b/pkg/controllers/rollout/controller_test.go index 7db16c30a..5d2c9ca25 100644 --- a/pkg/controllers/rollout/controller_test.go +++ b/pkg/controllers/rollout/controller_test.go @@ -2613,7 +2613,7 @@ func clusterResourcePlacementForTest(crpName string, policy *placementv1beta1.Pl Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", diff --git a/pkg/controllers/rollout/suite_test.go b/pkg/controllers/rollout/suite_test.go index d71697d40..6b1ed737a 100644 --- a/pkg/controllers/rollout/suite_test.go +++ b/pkg/controllers/rollout/suite_test.go @@ -29,6 +29,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/klog/v2" @@ -42,6 +43,8 @@ import ( clusterv1beta1 "go.goms.io/fleet/apis/cluster/v1beta1" placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/utils" + "go.goms.io/fleet/pkg/utils/informer" ) var ( @@ -114,17 +117,29 @@ var _ = BeforeSuite(func() { } Expect(k8sClient.Create(ctx, namespace)).Should(Succeed()) + // setup informer manager for the reconciler + dynamicClient, err := dynamic.NewForConfig(cfg) + Expect(err).Should(Succeed()) + dynamicInformerManager := informer.NewInformerManager(dynamicClient, 0, ctx.Done()) + dynamicInformerManager.AddStaticResource(informer.APIResourceMeta{ + GroupVersionKind: utils.NamespaceGVK, + GroupVersionResource: utils.NamespaceGVR, + IsClusterScoped: true, + }, nil) + // setup our cluster scoped reconciler err = (&Reconciler{ - Client: k8sClient, - UncachedReader: mgr.GetAPIReader(), + Client: k8sClient, + UncachedReader: mgr.GetAPIReader(), + InformerManager: dynamicInformerManager, }).SetupWithManagerForClusterResourcePlacement(mgr) Expect(err).Should(Succeed()) // setup our namespace scoped reconciler err = (&Reconciler{ - Client: k8sClient, - UncachedReader: mgr.GetAPIReader(), + Client: k8sClient, + UncachedReader: mgr.GetAPIReader(), + InformerManager: dynamicInformerManager, }).SetupWithManagerForResourcePlacement(mgr) Expect(err).Should(Succeed()) diff --git a/pkg/controllers/clusterschedulingpolicysnapshot/controller.go b/pkg/controllers/schedulingpolicysnapshot/controller.go similarity index 96% rename from pkg/controllers/clusterschedulingpolicysnapshot/controller.go rename to pkg/controllers/schedulingpolicysnapshot/controller.go index 52dd9377e..484636b80 100644 --- a/pkg/controllers/clusterschedulingpolicysnapshot/controller.go +++ b/pkg/controllers/schedulingpolicysnapshot/controller.go @@ -14,8 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package clusterschedulingpolicysnapshot features a controller to reconcile the clusterSchedulingPolicySnapshot object. -package clusterschedulingpolicysnapshot +// Package schedulingpolicysnapshot features a controller to reconcile the clusterSchedulingPolicySnapshot or the schedulingPolicySnapshot objects. +package schedulingpolicysnapshot import ( "context" diff --git a/pkg/controllers/clusterschedulingpolicysnapshot/controller_integration_test.go b/pkg/controllers/schedulingpolicysnapshot/controller_integration_test.go similarity index 99% rename from pkg/controllers/clusterschedulingpolicysnapshot/controller_integration_test.go rename to pkg/controllers/schedulingpolicysnapshot/controller_integration_test.go index 796edad7c..da6a66e59 100644 --- a/pkg/controllers/clusterschedulingpolicysnapshot/controller_integration_test.go +++ b/pkg/controllers/schedulingpolicysnapshot/controller_integration_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterschedulingpolicysnapshot +package schedulingpolicysnapshot import ( "time" diff --git a/pkg/controllers/clusterschedulingpolicysnapshot/suite_test.go b/pkg/controllers/schedulingpolicysnapshot/suite_test.go similarity index 97% rename from pkg/controllers/clusterschedulingpolicysnapshot/suite_test.go rename to pkg/controllers/schedulingpolicysnapshot/suite_test.go index 915cb2b25..2832989c7 100644 --- a/pkg/controllers/clusterschedulingpolicysnapshot/suite_test.go +++ b/pkg/controllers/schedulingpolicysnapshot/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterschedulingpolicysnapshot +package schedulingpolicysnapshot import ( "context" @@ -54,7 +54,7 @@ var ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "ClusterSchedulingPolicySnapshot Controller Suite") + RunSpecs(t, "SchedulingPolicySnapshot Controller Suite") } var _ = BeforeSuite(func() { diff --git a/pkg/controllers/updaterun/controller.go b/pkg/controllers/updaterun/controller.go index d07987e4e..ca50c2d0d 100644 --- a/pkg/controllers/updaterun/controller.go +++ b/pkg/controllers/updaterun/controller.go @@ -41,10 +41,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/metrics" "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/pkg/utils/condition" "go.goms.io/fleet/pkg/utils/controller" - "go.goms.io/fleet/pkg/utils/controller/metrics" "go.goms.io/fleet/pkg/utils/informer" ) diff --git a/pkg/controllers/updaterun/controller_integration_test.go b/pkg/controllers/updaterun/controller_integration_test.go index 86bc9d7a1..e829d48fe 100644 --- a/pkg/controllers/updaterun/controller_integration_test.go +++ b/pkg/controllers/updaterun/controller_integration_test.go @@ -43,9 +43,9 @@ import ( clusterv1beta1 "go.goms.io/fleet/apis/cluster/v1beta1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/metrics" "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/pkg/utils/condition" - "go.goms.io/fleet/pkg/utils/controller/metrics" metricsutils "go.goms.io/fleet/test/utils/metrics" ) @@ -350,7 +350,7 @@ func generateTestClusterResourcePlacement() *placementv1beta1.ClusterResourcePla Name: testCRPName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -607,7 +607,7 @@ func generateTestClusterResourceOverride() *placementv1beta1.ClusterResourceOver }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", diff --git a/pkg/controllers/updaterun/suite_test.go b/pkg/controllers/updaterun/suite_test.go index e9ece84d6..ab4e712bf 100644 --- a/pkg/controllers/updaterun/suite_test.go +++ b/pkg/controllers/updaterun/suite_test.go @@ -35,11 +35,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" clusterv1beta1 "go.goms.io/fleet/apis/cluster/v1beta1" placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/metrics" "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/pkg/utils/informer" ) @@ -96,11 +98,11 @@ var _ = BeforeSuite(func() { }) Expect(err).Should(Succeed()) - // make sure the k8s client is same as the controller client, or we can have cache delay + // Make sure the k8s client is same as the controller client, or we can have cache delay. By("set k8s client same as the controller manager") k8sClient = mgr.GetClient() - // setup informer manager for the reconciler + // Setup informer manager for the reconciler. dynamicClient, err := dynamic.NewForConfig(cfg) Expect(err).Should(Succeed()) dynamicInformerManager := informer.NewInformerManager(dynamicClient, 0, ctx.Done()) @@ -110,13 +112,16 @@ var _ = BeforeSuite(func() { IsClusterScoped: true, }, nil) - // setup our main reconciler + // Setup our main reconciler. err = (&Reconciler{ Client: k8sClient, InformerManager: dynamicInformerManager, }).SetupWithManager(mgr) Expect(err).Should(Succeed()) + // Register metrics. + ctrlmetrics.Registry.MustRegister(metrics.FleetUpdateRunStatusLastTimestampSeconds) + go func() { defer GinkgoRecover() err = mgr.Start(ctx) diff --git a/pkg/controllers/workapplier/availability_tracker.go b/pkg/controllers/workapplier/availability_tracker.go index 43fa634b8..52bfbc7e9 100644 --- a/pkg/controllers/workapplier/availability_tracker.go +++ b/pkg/controllers/workapplier/availability_tracker.go @@ -48,10 +48,10 @@ func (r *Reconciler) trackInMemberClusterObjAvailability(ctx context.Context, bu doWork := func(pieces int) { bundle := bundles[pieces] - if !isManifestObjectApplied(bundle.applyResTyp) { + if !isManifestObjectApplied(bundle.applyOrReportDiffResTyp) { // The manifest object in the bundle has not been applied yet. No availability check // is needed. - bundle.availabilityResTyp = ManifestProcessingAvailabilityResultTypeSkipped + bundle.availabilityResTyp = AvailabilityResultTypeSkipped // Note that some of the objects might have failed the pre-processing stage and do not // even have a GVR or a manifest object. @@ -69,7 +69,7 @@ func (r *Reconciler) trackInMemberClusterObjAvailability(ctx context.Context, bu if err != nil { // An unexpected error has occurred during the availability check. bundle.availabilityErr = err - bundle.availabilityResTyp = ManifestProcessingAvailabilityResultTypeFailed + bundle.availabilityResTyp = AvailabilityResultTypeFailed klog.ErrorS(err, "Failed to track the availability of the applied object in the member cluster", "work", workRef, "GVR", *bundle.gvr, "inMemberClusterObj", klog.KObj(bundle.inMemberClusterObj)) @@ -108,11 +108,11 @@ func trackInMemberClusterObjAvailabilityByGVR( if isDataResource(*gvr) { klog.V(2).InfoS("The object from the member cluster is a data object, consider it to be immediately available", "gvr", *gvr, "inMemberClusterObj", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Cannot determine the availability of the object from the member cluster; untrack its availability", "gvr", *gvr, "resource", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeNotTrackable, nil + return AvailabilityResultTypeNotTrackable, nil } } @@ -123,7 +123,7 @@ func trackDeploymentAvailability(inMemberClusterObj *unstructured.Unstructured) // Normally this branch should never run. wrappedErr := fmt.Errorf("failed to convert the unstructured object to a deployment: %w", err) _ = controller.NewUnexpectedBehaviorError(wrappedErr) - return ManifestProcessingAvailabilityResultTypeFailed, wrappedErr + return AvailabilityResultTypeFailed, wrappedErr } // Check if the deployment is available. @@ -136,10 +136,10 @@ func trackDeploymentAvailability(inMemberClusterObj *unstructured.Unstructured) requiredReplicas == deploy.Status.UpdatedReplicas && deploy.Status.UnavailableReplicas == 0 { klog.V(2).InfoS("Deployment is available", "deployment", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Deployment is not ready yet, will check later to see if it becomes available", "deployment", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeNotYetAvailable, nil + return AvailabilityResultTypeNotYetAvailable, nil } // trackStatefulSetAvailability tracks the availability of a stateful set in the member cluster. @@ -149,7 +149,7 @@ func trackStatefulSetAvailability(inMemberClusterObj *unstructured.Unstructured) // Normally this branch should never run. wrappedErr := fmt.Errorf("failed to convert the unstructured object to a stateful set: %w", err) _ = controller.NewUnexpectedBehaviorError(wrappedErr) - return ManifestProcessingAvailabilityResultTypeFailed, wrappedErr + return AvailabilityResultTypeFailed, wrappedErr } // Check if the stateful set is available. @@ -165,10 +165,10 @@ func trackStatefulSetAvailability(inMemberClusterObj *unstructured.Unstructured) statefulSet.Status.CurrentReplicas == statefulSet.Status.UpdatedReplicas && statefulSet.Status.CurrentRevision == statefulSet.Status.UpdateRevision { klog.V(2).InfoS("StatefulSet is available", "statefulSet", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Stateful set is not ready yet, will check later to see if it becomes available", "statefulSet", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeNotYetAvailable, nil + return AvailabilityResultTypeNotYetAvailable, nil } // trackDaemonSetAvailability tracks the availability of a daemon set in the member cluster. @@ -178,7 +178,7 @@ func trackDaemonSetAvailability(inMemberClusterObj *unstructured.Unstructured) ( wrappedErr := fmt.Errorf("failed to convert the unstructured object to a daemon set: %w", err) _ = controller.NewUnexpectedBehaviorError(wrappedErr) // Normally this branch should never run. - return ManifestProcessingAvailabilityResultTypeFailed, wrappedErr + return AvailabilityResultTypeFailed, wrappedErr } // Check if the daemonSet is available. @@ -190,10 +190,10 @@ func trackDaemonSetAvailability(inMemberClusterObj *unstructured.Unstructured) ( daemonSet.Status.NumberAvailable == daemonSet.Status.DesiredNumberScheduled && daemonSet.Status.CurrentNumberScheduled == daemonSet.Status.UpdatedNumberScheduled { klog.V(2).InfoS("DaemonSet is available", "daemonSet", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Daemon set is not ready yet, will check later to see if it becomes available", "daemonSet", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeNotYetAvailable, nil + return AvailabilityResultTypeNotYetAvailable, nil } // trackServiceAvailability tracks the availability of a service in the member cluster. @@ -202,7 +202,7 @@ func trackServiceAvailability(inMemberClusterObj *unstructured.Unstructured) (Ma if err := runtime.DefaultUnstructuredConverter.FromUnstructured(inMemberClusterObj.Object, &svc); err != nil { wrappedErr := fmt.Errorf("failed to convert the unstructured object to a service: %w", err) _ = controller.NewUnexpectedBehaviorError(wrappedErr) - return ManifestProcessingAvailabilityResultTypeFailed, wrappedErr + return AvailabilityResultTypeFailed, wrappedErr } switch svc.Spec.Type { case "": @@ -214,25 +214,25 @@ func trackServiceAvailability(inMemberClusterObj *unstructured.Unstructured) (Ma // IP assigned. if len(svc.Spec.ClusterIPs) > 0 && len(svc.Spec.ClusterIPs[0]) > 0 { klog.V(2).InfoS("Service is available", "service", klog.KObj(inMemberClusterObj), "serviceType", svc.Spec.Type) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Service is not ready yet, will check later to see if it becomes available", "service", klog.KObj(inMemberClusterObj), "serviceType", svc.Spec.Type) - return ManifestProcessingAvailabilityResultTypeNotYetAvailable, nil + return AvailabilityResultTypeNotYetAvailable, nil case corev1.ServiceTypeLoadBalancer: // Fleet considers a loadBalancer service to be available if it has at least one load // balancer IP or hostname assigned. if len(svc.Status.LoadBalancer.Ingress) > 0 && (len(svc.Status.LoadBalancer.Ingress[0].IP) > 0 || len(svc.Status.LoadBalancer.Ingress[0].Hostname) > 0) { klog.V(2).InfoS("Service is available", "service", klog.KObj(inMemberClusterObj), "serviceType", svc.Spec.Type) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Service is not ready yet, will check later to see if it becomes available", "service", klog.KObj(inMemberClusterObj), "serviceType", svc.Spec.Type) - return ManifestProcessingAvailabilityResultTypeNotYetAvailable, nil + return AvailabilityResultTypeNotYetAvailable, nil } // we don't know how to track the availability of when the service type is externalName klog.V(2).InfoS("Cannot determine the availability of external name services; untrack its availability", "service", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeNotTrackable, nil + return AvailabilityResultTypeNotTrackable, nil } // trackCRDAvailability tracks the availability of a custom resource definition in the member cluster. @@ -241,32 +241,32 @@ func trackCRDAvailability(inMemberClusterObj *unstructured.Unstructured) (Manife if err := runtime.DefaultUnstructuredConverter.FromUnstructured(inMemberClusterObj.Object, &crd); err != nil { wrappedErr := fmt.Errorf("failed to convert the unstructured object to a custom resource definition: %w", err) _ = controller.NewUnexpectedBehaviorError(wrappedErr) - return ManifestProcessingAvailabilityResultTypeFailed, wrappedErr + return AvailabilityResultTypeFailed, wrappedErr } // If both conditions are True, the CRD has become available. if apiextensionshelpers.IsCRDConditionTrue(&crd, apiextensionsv1.Established) && apiextensionshelpers.IsCRDConditionTrue(&crd, apiextensionsv1.NamesAccepted) { klog.V(2).InfoS("CustomResourceDefinition is available", "customResourceDefinition", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Custom resource definition is not ready yet, will check later to see if it becomes available", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeNotYetAvailable, nil + return AvailabilityResultTypeNotYetAvailable, nil } // trackPDBAvailability tracks the availability of a pod disruption budget in the member cluster func trackPDBAvailability(curObj *unstructured.Unstructured) (ManifestProcessingAvailabilityResultType, error) { var pdb policyv1.PodDisruptionBudget if err := runtime.DefaultUnstructuredConverter.FromUnstructured(curObj.Object, &pdb); err != nil { - return ManifestProcessingAvailabilityResultTypeFailed, controller.NewUnexpectedBehaviorError(err) + return AvailabilityResultTypeFailed, controller.NewUnexpectedBehaviorError(err) } // Check if conditions are up-to-date if poddisruptionbudget.ConditionsAreUpToDate(&pdb) { klog.V(2).InfoS("PodDisruptionBudget is available", "pdb", klog.KObj(curObj)) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Still need to wait for PodDisruptionBudget to be available", "pdb", klog.KObj(curObj)) - return ManifestProcessingAvailabilityResultTypeNotYetAvailable, nil + return AvailabilityResultTypeNotYetAvailable, nil } // isDataResource checks if the resource is a data resource; such resources are diff --git a/pkg/controllers/workapplier/availability_tracker_test.go b/pkg/controllers/workapplier/availability_tracker_test.go index f15c9c545..ffe220d2e 100644 --- a/pkg/controllers/workapplier/availability_tracker_test.go +++ b/pkg/controllers/workapplier/availability_tracker_test.go @@ -224,39 +224,39 @@ func TestTrackDeploymentAvailability(t *testing.T) { } testCases := []struct { - name string - deploy *appsv1.Deployment - wantManifestProcessingAvailabilityResultType ManifestProcessingAvailabilityResultType + name string + deploy *appsv1.Deployment + wantAvailabilityResultType ManifestProcessingAvailabilityResultType }{ { - name: "available deployment (w/ fixed replica count)", - deploy: availableDeployWithFixedReplicaCount, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available deployment (w/ fixed replica count)", + deploy: availableDeployWithFixedReplicaCount, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available deployment (w/ default replica count)", - deploy: availableDeployWithDefaultReplicaCount, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available deployment (w/ default replica count)", + deploy: availableDeployWithDefaultReplicaCount, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "unavailable deployment with stale status", - deploy: unavailableDeployWithStaleStatus, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable deployment with stale status", + deploy: unavailableDeployWithStaleStatus, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable deployment with not enough available replicas", - deploy: unavailableDeployWithNotEnoughAvailableReplicas, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable deployment with not enough available replicas", + deploy: unavailableDeployWithNotEnoughAvailableReplicas, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable deployment with not enough updated replicas", - deploy: unavailableDeployWithNotEnoughUpdatedReplicas, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable deployment with not enough updated replicas", + deploy: unavailableDeployWithNotEnoughUpdatedReplicas, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable deployment with unavailable replicas", - deploy: unavailableDeployWithMoreReplicasThanRequired, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable deployment with unavailable replicas", + deploy: unavailableDeployWithMoreReplicasThanRequired, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, } @@ -266,8 +266,8 @@ func TestTrackDeploymentAvailability(t *testing.T) { if err != nil { t.Fatalf("trackDeploymentAvailability() = %v, want no error", err) } - if gotResTyp != tc.wantManifestProcessingAvailabilityResultType { - t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantManifestProcessingAvailabilityResultType) + if gotResTyp != tc.wantAvailabilityResultType { + t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantAvailabilityResultType) } }) } @@ -341,39 +341,39 @@ func TestTrackStatefulSetAvailability(t *testing.T) { } testCases := []struct { - name string - statefulSet *appsv1.StatefulSet - wantManifestProcessingAvailabilityResultType ManifestProcessingAvailabilityResultType + name string + statefulSet *appsv1.StatefulSet + wantAvailabilityResultType ManifestProcessingAvailabilityResultType }{ { - name: "available stateful set (w/ fixed replica count)", - statefulSet: availableStatefulSetWithFixedReplicaCount, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available stateful set (w/ fixed replica count)", + statefulSet: availableStatefulSetWithFixedReplicaCount, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available stateful set (w/ default replica count)", - statefulSet: availableStatefulSetWithDefaultReplicaCount, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available stateful set (w/ default replica count)", + statefulSet: availableStatefulSetWithDefaultReplicaCount, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "unavailable stateful set with stale status", - statefulSet: unavailableStatefulSetWithStaleStatus, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable stateful set with stale status", + statefulSet: unavailableStatefulSetWithStaleStatus, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable stateful set with not enough available replicas", - statefulSet: unavailableStatefulSetWithNotEnoughAvailableReplicas, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable stateful set with not enough available replicas", + statefulSet: unavailableStatefulSetWithNotEnoughAvailableReplicas, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable stateful set with not enough current replicas", - statefulSet: unavailableStatefulSetWithNotEnoughCurrentReplicas, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable stateful set with not enough current replicas", + statefulSet: unavailableStatefulSetWithNotEnoughCurrentReplicas, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable stateful set with not latest revision", - statefulSet: unavailableStatefulSetWithNotLatestRevision, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable stateful set with not latest revision", + statefulSet: unavailableStatefulSetWithNotLatestRevision, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, } @@ -383,8 +383,8 @@ func TestTrackStatefulSetAvailability(t *testing.T) { if err != nil { t.Fatalf("trackStatefulSetAvailability() = %v, want no error", err) } - if gotResTyp != tc.wantManifestProcessingAvailabilityResultType { - t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantManifestProcessingAvailabilityResultType) + if gotResTyp != tc.wantAvailabilityResultType { + t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantAvailabilityResultType) } }) } @@ -427,29 +427,29 @@ func TestTrackDaemonSetAvailability(t *testing.T) { } testCases := []struct { - name string - daemonSet *appsv1.DaemonSet - wantManifestProcessingAvailabilityResultType ManifestProcessingAvailabilityResultType + name string + daemonSet *appsv1.DaemonSet + wantAvailabilityResultType ManifestProcessingAvailabilityResultType }{ { - name: "available daemon set", - daemonSet: availableDaemonSet, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available daemon set", + daemonSet: availableDaemonSet, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "unavailable daemon set with stale status", - daemonSet: unavailableDaemonSetWithStaleStatus, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable daemon set with stale status", + daemonSet: unavailableDaemonSetWithStaleStatus, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable daemon set with not enough available pods", - daemonSet: unavailableDaemonSetWithNotEnoughAvailablePods, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable daemon set with not enough available pods", + daemonSet: unavailableDaemonSetWithNotEnoughAvailablePods, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable daemon set with not enough updated pods", - daemonSet: unavailableDaemonSetWithNotEnoughUpdatedPods, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable daemon set with not enough updated pods", + daemonSet: unavailableDaemonSetWithNotEnoughUpdatedPods, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, } @@ -459,8 +459,8 @@ func TestTrackDaemonSetAvailability(t *testing.T) { if err != nil { t.Fatalf("trackDaemonSetAvailability() = %v, want no error", err) } - if gotResTyp != tc.wantManifestProcessingAvailabilityResultType { - t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantManifestProcessingAvailabilityResultType) + if gotResTyp != tc.wantAvailabilityResultType { + t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantAvailabilityResultType) } }) } @@ -469,9 +469,9 @@ func TestTrackDaemonSetAvailability(t *testing.T) { // TestTrackServiceAvailability tests the trackServiceAvailability function. func TestTrackServiceAvailability(t *testing.T) { testCases := []struct { - name string - service *corev1.Service - wantManifestProcessingAvailabilityResultType ManifestProcessingAvailabilityResultType + name string + service *corev1.Service + wantAvailabilityResultType ManifestProcessingAvailabilityResultType }{ { name: "untrackable service (external name type)", @@ -485,7 +485,7 @@ func TestTrackServiceAvailability(t *testing.T) { ClusterIPs: []string{"192.168.1.1"}, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotTrackable, + wantAvailabilityResultType: AvailabilityResultTypeNotTrackable, }, { name: "available default typed service (IP assigned)", @@ -499,7 +499,7 @@ func TestTrackServiceAvailability(t *testing.T) { ClusterIPs: []string{"192.168.1.1"}, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { name: "available ClusterIP service (IP assigned)", @@ -514,7 +514,7 @@ func TestTrackServiceAvailability(t *testing.T) { ClusterIPs: []string{"192.168.1.1"}, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { name: "available headless service", @@ -528,7 +528,7 @@ func TestTrackServiceAvailability(t *testing.T) { ClusterIPs: []string{"None"}, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { name: "available node port service (IP assigned)", @@ -543,7 +543,7 @@ func TestTrackServiceAvailability(t *testing.T) { ClusterIPs: []string{"192.168.1.1"}, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { name: "unavailable ClusterIP service (no IP assigned)", @@ -557,7 +557,7 @@ func TestTrackServiceAvailability(t *testing.T) { ClusterIP: "13.6.2.2", }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { name: "available LoadBalancer service (IP assigned)", @@ -579,7 +579,7 @@ func TestTrackServiceAvailability(t *testing.T) { }, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { name: "available LoadBalancer service (hostname assigned)", @@ -601,7 +601,7 @@ func TestTrackServiceAvailability(t *testing.T) { }, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { name: "unavailable LoadBalancer service (ingress not ready)", @@ -619,7 +619,7 @@ func TestTrackServiceAvailability(t *testing.T) { }, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, } @@ -629,8 +629,8 @@ func TestTrackServiceAvailability(t *testing.T) { if err != nil { t.Errorf("trackServiceAvailability() = %v, want no error", err) } - if gotResTyp != tc.wantManifestProcessingAvailabilityResultType { - t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantManifestProcessingAvailabilityResultType) + if gotResTyp != tc.wantAvailabilityResultType { + t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantAvailabilityResultType) } }) } @@ -681,24 +681,24 @@ func TestTrackCRDAvailability(t *testing.T) { } testCases := []struct { - name string - crd *apiextensionsv1.CustomResourceDefinition - wantManifestProcessingAvailabilityResultType ManifestProcessingAvailabilityResultType + name string + crd *apiextensionsv1.CustomResourceDefinition + wantAvailabilityResultType ManifestProcessingAvailabilityResultType }{ { - name: "available CRD", - crd: availableCRD, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available CRD", + crd: availableCRD, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "unavailable CRD (not established)", - crd: unavailableCRDNotEstablished, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable CRD (not established)", + crd: unavailableCRDNotEstablished, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable CRD (name not accepted)", - crd: unavailableCRDNameNotAccepted, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable CRD (name not accepted)", + crd: unavailableCRDNameNotAccepted, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, } @@ -708,8 +708,8 @@ func TestTrackCRDAvailability(t *testing.T) { if err != nil { t.Fatalf("trackCRDAvailability() = %v, want no error", err) } - if gotResTyp != tc.wantManifestProcessingAvailabilityResultType { - t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantManifestProcessingAvailabilityResultType) + if gotResTyp != tc.wantAvailabilityResultType { + t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantAvailabilityResultType) } }) } @@ -768,24 +768,24 @@ func TestTrackPDBAvailability(t *testing.T) { } testCases := []struct { - name string - pdb *policyv1.PodDisruptionBudget - wantManifestProcessingAvailabilityResultType ManifestProcessingAvailabilityResultType + name string + pdb *policyv1.PodDisruptionBudget + wantAvailabilityResultType ManifestProcessingAvailabilityResultType }{ { - name: "available PDB", - pdb: availablePDB, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available PDB", + pdb: availablePDB, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "unavailable PDB (insufficient pods)", - pdb: unavailablePDBInsufficientPods, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable PDB (insufficient pods)", + pdb: unavailablePDBInsufficientPods, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable PDB (stale condition)", - pdb: unavailablePDBStaleCondition, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable PDB (stale condition)", + pdb: unavailablePDBStaleCondition, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, } @@ -795,8 +795,8 @@ func TestTrackPDBAvailability(t *testing.T) { if err != nil { t.Fatalf("trackPDBAvailability() = %v, want no error", err) } - if gotResTyp != tc.wantManifestProcessingAvailabilityResultType { - t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantManifestProcessingAvailabilityResultType) + if gotResTyp != tc.wantAvailabilityResultType { + t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantAvailabilityResultType) } }) } @@ -876,124 +876,124 @@ func TestTrackInMemberClusterObjAvailabilityByGVR(t *testing.T) { } testCases := []struct { - name string - gvr schema.GroupVersionResource - inMemberClusterObj *unstructured.Unstructured - wantManifestProcessingAvailabilityResultType ManifestProcessingAvailabilityResultType + name string + gvr schema.GroupVersionResource + inMemberClusterObj *unstructured.Unstructured + wantAvailabilityResultType ManifestProcessingAvailabilityResultType }{ { - name: "available deployment", - gvr: utils.DeploymentGVR, - inMemberClusterObj: toUnstructured(t, availableDeploy), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available deployment", + gvr: utils.DeploymentGVR, + inMemberClusterObj: toUnstructured(t, availableDeploy), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available stateful set", - gvr: utils.StatefulSetGVR, - inMemberClusterObj: toUnstructured(t, availableStatefulSet), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available stateful set", + gvr: utils.StatefulSetGVR, + inMemberClusterObj: toUnstructured(t, availableStatefulSet), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available service", - gvr: utils.ServiceGVR, - inMemberClusterObj: toUnstructured(t, availableSvc), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available service", + gvr: utils.ServiceGVR, + inMemberClusterObj: toUnstructured(t, availableSvc), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available daemon set", - gvr: utils.DaemonSetGVR, - inMemberClusterObj: toUnstructured(t, availableDaemonSet), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available daemon set", + gvr: utils.DaemonSetGVR, + inMemberClusterObj: toUnstructured(t, availableDaemonSet), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available custom resource definition", - gvr: utils.CustomResourceDefinitionGVR, - inMemberClusterObj: toUnstructured(t, availableCRD), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available custom resource definition", + gvr: utils.CustomResourceDefinitionGVR, + inMemberClusterObj: toUnstructured(t, availableCRD), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "data object (namespace)", - gvr: utils.NamespaceGVR, - inMemberClusterObj: toUnstructured(t, ns.DeepCopy()), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "data object (namespace)", + gvr: utils.NamespaceGVR, + inMemberClusterObj: toUnstructured(t, ns.DeepCopy()), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "data object (config map)", - gvr: utils.ConfigMapGVR, - inMemberClusterObj: toUnstructured(t, cm), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "data object (config map)", + gvr: utils.ConfigMapGVR, + inMemberClusterObj: toUnstructured(t, cm), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "untrackable object (job)", - gvr: utils.JobGVR, - inMemberClusterObj: toUnstructured(t, untrackableJob), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotTrackable, + name: "untrackable object (job)", + gvr: utils.JobGVR, + inMemberClusterObj: toUnstructured(t, untrackableJob), + wantAvailabilityResultType: AvailabilityResultTypeNotTrackable, }, { - name: "available service account", - gvr: utils.ServiceAccountGVR, - inMemberClusterObj: toUnstructured(t, &corev1.ServiceAccount{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available service account", + gvr: utils.ServiceAccountGVR, + inMemberClusterObj: toUnstructured(t, &corev1.ServiceAccount{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available network policy", - gvr: utils.NetworkPolicyGVR, - inMemberClusterObj: toUnstructured(t, &networkingv1.NetworkPolicy{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available network policy", + gvr: utils.NetworkPolicyGVR, + inMemberClusterObj: toUnstructured(t, &networkingv1.NetworkPolicy{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available csi driver", - gvr: utils.CSIDriverGVR, - inMemberClusterObj: toUnstructured(t, &storagev1.CSIDriver{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available csi driver", + gvr: utils.CSIDriverGVR, + inMemberClusterObj: toUnstructured(t, &storagev1.CSIDriver{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available csi node", - gvr: utils.CSINodeGVR, - inMemberClusterObj: toUnstructured(t, &storagev1.CSINode{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available csi node", + gvr: utils.CSINodeGVR, + inMemberClusterObj: toUnstructured(t, &storagev1.CSINode{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available storage class", - gvr: utils.StorageClassGVR, - inMemberClusterObj: toUnstructured(t, &storagev1.StorageClass{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available storage class", + gvr: utils.StorageClassGVR, + inMemberClusterObj: toUnstructured(t, &storagev1.StorageClass{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available csi storage capacity", - gvr: utils.CSIStorageCapacityGVR, - inMemberClusterObj: toUnstructured(t, &storagev1.CSIStorageCapacity{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available csi storage capacity", + gvr: utils.CSIStorageCapacityGVR, + inMemberClusterObj: toUnstructured(t, &storagev1.CSIStorageCapacity{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available controller revision", - gvr: utils.ControllerRevisionGVR, - inMemberClusterObj: toUnstructured(t, &appsv1.ControllerRevision{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available controller revision", + gvr: utils.ControllerRevisionGVR, + inMemberClusterObj: toUnstructured(t, &appsv1.ControllerRevision{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available ingress class", - gvr: utils.IngressClassGVR, - inMemberClusterObj: toUnstructured(t, &networkingv1.IngressClass{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available ingress class", + gvr: utils.IngressClassGVR, + inMemberClusterObj: toUnstructured(t, &networkingv1.IngressClass{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available limit range", - gvr: utils.LimitRangeGVR, - inMemberClusterObj: toUnstructured(t, &corev1.LimitRange{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available limit range", + gvr: utils.LimitRangeGVR, + inMemberClusterObj: toUnstructured(t, &corev1.LimitRange{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available resource quota", - gvr: utils.ResourceQuotaGVR, - inMemberClusterObj: toUnstructured(t, &corev1.ResourceQuota{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available resource quota", + gvr: utils.ResourceQuotaGVR, + inMemberClusterObj: toUnstructured(t, &corev1.ResourceQuota{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available priority class", - gvr: utils.PriorityClassGVR, - inMemberClusterObj: toUnstructured(t, &schedulingv1.PriorityClass{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available priority class", + gvr: utils.PriorityClassGVR, + inMemberClusterObj: toUnstructured(t, &schedulingv1.PriorityClass{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, } @@ -1003,8 +1003,8 @@ func TestTrackInMemberClusterObjAvailabilityByGVR(t *testing.T) { if err != nil { t.Fatalf("trackInMemberClusterObjAvailabilityByGVR() = %v, want no error", err) } - if gotResTyp != tc.wantManifestProcessingAvailabilityResultType { - t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantManifestProcessingAvailabilityResultType) + if gotResTyp != tc.wantAvailabilityResultType { + t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantAvailabilityResultType) } }) } @@ -1047,36 +1047,36 @@ func TestTrackInMemberClusterObjAvailability(t *testing.T) { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 0, }, - gvr: &utils.DeploymentGVR, - inMemberClusterObj: toUnstructured(t, availableDeploy), - applyResTyp: ManifestProcessingApplyResultTypeApplied, + gvr: &utils.DeploymentGVR, + inMemberClusterObj: toUnstructured(t, availableDeploy), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, // A failed to get applied service. { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 1, }, - gvr: &utils.ServiceGVR, - inMemberClusterObj: nil, - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, + gvr: &utils.ServiceGVR, + inMemberClusterObj: nil, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, }, // An unavailable daemon set. { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 2, }, - gvr: &utils.DaemonSetGVR, - inMemberClusterObj: toUnstructured(t, unavailableDaemonSet), - applyResTyp: ManifestProcessingApplyResultTypeApplied, + gvr: &utils.DaemonSetGVR, + inMemberClusterObj: toUnstructured(t, unavailableDaemonSet), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, // An untrackable job. { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 3, }, - gvr: &utils.JobGVR, - inMemberClusterObj: toUnstructured(t, untrackableJob), - applyResTyp: ManifestProcessingApplyResultTypeApplied, + gvr: &utils.JobGVR, + inMemberClusterObj: toUnstructured(t, untrackableJob), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, }, wantBundles: []*manifestProcessingBundle{ @@ -1084,37 +1084,37 @@ func TestTrackInMemberClusterObjAvailability(t *testing.T) { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 0, }, - gvr: &utils.DeploymentGVR, - inMemberClusterObj: toUnstructured(t, availableDeploy), - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + gvr: &utils.DeploymentGVR, + inMemberClusterObj: toUnstructured(t, availableDeploy), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeAvailable, }, { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 1, }, - gvr: &utils.ServiceGVR, - inMemberClusterObj: nil, - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + gvr: &utils.ServiceGVR, + inMemberClusterObj: nil, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, + availabilityResTyp: AvailabilityResultTypeSkipped, }, { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 2, }, - gvr: &utils.DaemonSetGVR, - inMemberClusterObj: toUnstructured(t, unavailableDaemonSet), - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + gvr: &utils.DaemonSetGVR, + inMemberClusterObj: toUnstructured(t, unavailableDaemonSet), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeNotYetAvailable, }, { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 3, }, - gvr: &utils.JobGVR, - inMemberClusterObj: toUnstructured(t, untrackableJob), - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, + gvr: &utils.JobGVR, + inMemberClusterObj: toUnstructured(t, untrackableJob), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeNotTrackable, }, }, }, diff --git a/pkg/controllers/workapplier/backoff.go b/pkg/controllers/workapplier/backoff.go index 8c1a6433d..43b4df240 100644 --- a/pkg/controllers/workapplier/backoff.go +++ b/pkg/controllers/workapplier/backoff.go @@ -60,7 +60,7 @@ const ( ) const ( - processingResultStrTpl = "%s,%s,%s" + processingResultStrTpl = "%s,%s" ) // RequeueMultiStageWithExponentialBackoffRateLimiter is a rate limiter that allows requeues of various @@ -287,7 +287,7 @@ func computeProcessingResultHash(work *fleetv1beta1.Work, bundles []*manifestPro // The order of manifests is stable in a bundle. processingResults := make([]string, 0, len(bundles)) for _, bundle := range bundles { - processingResults = append(processingResults, fmt.Sprintf(processingResultStrTpl, bundle.applyResTyp, bundle.availabilityResTyp, bundle.reportDiffResTyp)) + processingResults = append(processingResults, fmt.Sprintf(processingResultStrTpl, bundle.applyOrReportDiffResTyp, bundle.availabilityResTyp)) } processingResHash, err := resource.HashOf(processingResults) diff --git a/pkg/controllers/workapplier/backoff_test.go b/pkg/controllers/workapplier/backoff_test.go index 553883390..75f94b7a4 100644 --- a/pkg/controllers/workapplier/backoff_test.go +++ b/pkg/controllers/workapplier/backoff_test.go @@ -846,7 +846,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, }, wantRequeueDelaySeconds: 5, // Use fixed delay for the third time, since the processing result has changed. @@ -862,7 +862,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, }, wantRequeueDelaySeconds: 10, // Start to slow back off for the third time. @@ -878,7 +878,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, }, wantRequeueDelaySeconds: 20, // The slow back off continues. @@ -894,7 +894,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, }, wantRequeueDelaySeconds: 100, // Start to fast back off again. @@ -910,7 +910,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, }, wantRequeueDelaySeconds: 200, // Reached the max. cap. @@ -926,7 +926,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, }, }, wantRequeueDelaySeconds: 5, // Use fixed delay for the fourth time, since both generation and processing result have changed. @@ -976,8 +976,8 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeNotYetAvailable, }, }, wantRequeueDelaySeconds: 5, @@ -1000,8 +1000,8 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeAvailable, }, }, wantRequeueDelaySeconds: 5, // Use fixed delay, since the processing result has changed. @@ -1024,8 +1024,8 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeAvailable, }, }, wantRequeueDelaySeconds: 10, // Start the slow backoff. @@ -1048,8 +1048,8 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeAvailable, }, }, wantRequeueDelaySeconds: 50, // Skip to fast back off. @@ -1072,8 +1072,8 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeAvailable, }, }, wantRequeueDelaySeconds: 200, // Reached the max. cap. @@ -1098,7 +1098,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, }, }, wantRequeueDelaySeconds: 5, // Use fixed delay, since the processing result has changed. @@ -1123,7 +1123,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, }, }, wantRequeueDelaySeconds: 10, // Start the slow backoff. @@ -1148,7 +1148,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, }, }, wantRequeueDelaySeconds: 50, // Skip to fast back off. @@ -1173,7 +1173,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, }, }, wantRequeueDelaySeconds: 200, // Reached the max. cap. @@ -1198,7 +1198,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFoundDiff, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, }, }, wantRequeueDelaySeconds: 5, // Use fixed delay, since the processing result has changed. @@ -1223,7 +1223,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFoundDiff, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, }, }, wantRequeueDelaySeconds: 10, // Start the slow backoff. @@ -1248,7 +1248,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFoundDiff, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, }, }, wantRequeueDelaySeconds: 50, // Skip to fast back off. @@ -1474,111 +1474,111 @@ func TestComputeProcessingResultHash(t *testing.T) { bundles: []*manifestProcessingBundle{ {}, }, - wantHash: "ec6e5a3a69851e2b956b6f682bad1d2355faa874e635b4d2f3e33ce84a8f788a", + wantHash: "9637daf658d40f9ab65fc1f86e78f8496692ec8160389758039f752756f0505a", }, { name: "single manifest, apply op failure (pre-processing)", bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeDecodingErred, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeDecodingErred, }, }, - wantHash: "a4cce45a59ced1c0b218b7e2b07920e6515a0bd4e80141f114cf29a1e2062790", + wantHash: "86ab4bd237c2fa247e493a58e91895fe11e7bd2fcfb422890b8c296eaf6cc4ce", }, { name: "single manifest, apply op failure (processing, no error message)", bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, }, }, - wantHash: "f4610fbac163e867a62672a3e95547e8321fa09709ecac73308dfff8fde49511", + wantHash: "0ecc47caf32d81607057dcfb22f60416fe1f1f7930761edb92d4cb7fee4a075f", }, { name: "single manifest, apply op failure (processing, with error message)", bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, - applyErr: fmt.Errorf("failed to apply manifest"), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, + applyOrReportDiffErr: fmt.Errorf("failed to apply manifest"), }, }, // Note that this expected hash value is the same as the previous one. - wantHash: "f4610fbac163e867a62672a3e95547e8321fa09709ecac73308dfff8fde49511", + wantHash: "0ecc47caf32d81607057dcfb22f60416fe1f1f7930761edb92d4cb7fee4a075f", }, { name: "single manifest, availability check failure", bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeNotYetAvailable, }, }, - wantHash: "9110cc26c9559ba84e909593a089fd495eb6e86479c9430d5673229ebe2d1275", + wantHash: "339954d2619310502c70300409bdf65fd6f14d81c12cfade84879e713ea850ea", }, { name: "single manifest, apply op + availability check success", bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeAvailable, }, }, - wantHash: "d922098ce1f87b79fc26fad06355ea4eba77cc5a86e742e9159c58cce5bd4a31", + wantHash: "708387dadaf07f43d46b032c3afb5d984868107b297dad9c99c2d258584d2377", }, { name: "single manifest, diff reporting failure", bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFailed, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToReportDiff, }, }, - wantHash: "dd541a034eb568cf92da960b884dece6d136460399ab68958ce8fc6730c91d45", + wantHash: "c5ffc29f5050ad825711a77012d6be36550035d848deb990082fff196f886906", }, { name: "single manifest, diff reporting success", bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, }, }, - wantHash: "f9b66724190d196e1cf19247a0447a6ed0d71697dcb8016c0bc3b3726a757e1a", + wantHash: "4bc69d33a287d57e25a5406e47722b1cfa3965472cf9324d3ace2302dd0e9f02", }, { name: "multiple manifests (assorted)", bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, - applyErr: fmt.Errorf("failed to apply manifest"), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, + applyOrReportDiffErr: fmt.Errorf("failed to apply manifest"), }, { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeAvailable, }, { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeNotTrackable, }, }, - wantHash: "09c6195d94bfc84cdbb365bb615d3461a457a355b9f74049488a1db38e979018", + wantHash: "1a001803829ef5509d24d60806593cb5fbfb0445d32b9ab1301e5faea57bbaa9", }, { name: "multiple manifests (assorted, different order)", bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeAvailable, }, { - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, - applyErr: fmt.Errorf("failed to apply manifest"), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, + applyOrReportDiffErr: fmt.Errorf("failed to apply manifest"), }, { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeNotTrackable, }, }, // Note that different orders of the manifests result in different hashes. - wantHash: "ef1a6e8d207f5b86a8c7f39417eede40abc6e4f1d5ef9feceb5797f14a834f58", + wantHash: "15461229a70cecc0096aea95c08dbda81990985d69bd6f6a4448254461b84886", }, } diff --git a/pkg/controllers/workapplier/controller.go b/pkg/controllers/workapplier/controller.go index 403b0940e..0a8acd89b 100644 --- a/pkg/controllers/workapplier/controller.go +++ b/pkg/controllers/workapplier/controller.go @@ -34,6 +34,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" "k8s.io/utils/ptr" + "k8s.io/utils/set" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -231,9 +232,6 @@ type Reconciler struct { } // NewReconciler returns a new Work object reconciler for the work applier. -// -// TO-DO (chenyu1): evaluate if KubeFleet needs to expose the requeue rate limiter -// parameters as command-line arguments for user-side configuration. func NewReconciler( hubClient client.Client, workNameSpace string, spokeDynamicClient dynamic.Interface, spokeClient client.Client, restMapper meta.RESTMapper, @@ -266,93 +264,105 @@ func NewReconciler( } } -type manifestProcessingAppliedResultType string +type ManifestProcessingApplyOrReportDiffResultType string const ( - // The result types and descriptions for processing failures. - ManifestProcessingApplyResultTypeDecodingErred manifestProcessingAppliedResultType = "DecodingErred" - ManifestProcessingApplyResultTypeFoundGenerateName manifestProcessingAppliedResultType = "FoundGenerateName" - ManifestProcessingApplyResultTypeDuplicated manifestProcessingAppliedResultType = "Duplicated" - ManifestProcessingApplyResultTypeFailedToFindObjInMemberCluster manifestProcessingAppliedResultType = "FailedToFindObjInMemberCluster" - ManifestProcessingApplyResultTypeFailedToTakeOver manifestProcessingAppliedResultType = "FailedToTakeOver" - ManifestProcessingApplyResultTypeNotTakenOver manifestProcessingAppliedResultType = "NotTakenOver" - ManifestProcessingApplyResultTypeFailedToRunDriftDetection manifestProcessingAppliedResultType = "FailedToRunDriftDetection" - ManifestProcessingApplyResultTypeFoundDrifts manifestProcessingAppliedResultType = "FoundDrifts" + // The result types for apply op failures. + ApplyOrReportDiffResTypeDecodingErred ManifestProcessingApplyOrReportDiffResultType = "DecodingErred" + ApplyOrReportDiffResTypeFoundGenerateName ManifestProcessingApplyOrReportDiffResultType = "FoundGenerateName" + ApplyOrReportDiffResTypeDuplicated ManifestProcessingApplyOrReportDiffResultType = "Duplicated" + ApplyOrReportDiffResTypeFailedToFindObjInMemberCluster ManifestProcessingApplyOrReportDiffResultType = "FailedToFindObjInMemberCluster" + ApplyOrReportDiffResTypeFailedToTakeOver ManifestProcessingApplyOrReportDiffResultType = "FailedToTakeOver" + ApplyOrReportDiffResTypeNotTakenOver ManifestProcessingApplyOrReportDiffResultType = "NotTakenOver" + ApplyOrReportDiffResTypeFailedToRunDriftDetection ManifestProcessingApplyOrReportDiffResultType = "FailedToRunDriftDetection" + ApplyOrReportDiffResTypeFoundDrifts ManifestProcessingApplyOrReportDiffResultType = "FoundDrifts" // Note that the reason string below uses the same value as kept in the old work applier. - ManifestProcessingApplyResultTypeFailedToApply manifestProcessingAppliedResultType = "ManifestApplyFailed" + ApplyOrReportDiffResTypeFailedToApply ManifestProcessingApplyOrReportDiffResultType = "ManifestApplyFailed" - // The result type and description for partially successfully processing attempts. - ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection manifestProcessingAppliedResultType = "AppliedWithFailedDriftDetection" + // The result type and description for successful apply ops. + ApplyOrReportDiffResTypeApplied ManifestProcessingApplyOrReportDiffResultType = "Applied" +) + +const ( + // The descriptions for different apply op result types. - ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetectionDescription = "Manifest has been applied successfully, but drift detection has failed" + // The description for partially successful apply ops. + ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection ManifestProcessingApplyOrReportDiffResultType = "AppliedWithFailedDriftDetection" + // The description for successful apply ops. + ApplyOrReportDiffResTypeAppliedDescription = "Manifest has been applied successfully" +) + +const ( + // The result type for diff reporting failures. + ApplyOrReportDiffResTypeFailedToReportDiff ManifestProcessingApplyOrReportDiffResultType = "FailedToReportDiff" - // The result type and description for successful processing attempts. - ManifestProcessingApplyResultTypeApplied manifestProcessingAppliedResultType = "Applied" + // The result type for successful diff reportings. + ApplyOrReportDiffResTypeFoundDiff ManifestProcessingApplyOrReportDiffResultType = "FoundDiff" + ApplyOrReportDiffResTypeNoDiffFound ManifestProcessingApplyOrReportDiffResultType = "NoDiffFound" +) - ManifestProcessingApplyResultTypeAppliedDescription = "Manifest has been applied successfully" +const ( + // The descriptions for different diff reporting result types. + ApplyOrReportDiffResTypeFailedToReportDiffDescription = "Failed to report the diff between the hub cluster and the member cluster (error = %s)" + ApplyOrReportDiffResTypeNoDiffFoundDescription = "No diff has been found between the hub cluster and the member cluster" + ApplyOrReportDiffResTypeFoundDiffDescription = "Diff has been found between the hub cluster and the member cluster" +) - // A special result type for the case where no apply is performed (i.e., the ReportDiff mode). - ManifestProcessingApplyResultTypeNoApplyPerformed manifestProcessingAppliedResultType = "Skipped" +var ( + // A set for all apply related result types. + manifestProcessingApplyResTypSet = set.New( + ApplyOrReportDiffResTypeDecodingErred, + ApplyOrReportDiffResTypeFoundGenerateName, + ApplyOrReportDiffResTypeDuplicated, + ApplyOrReportDiffResTypeFailedToFindObjInMemberCluster, + ApplyOrReportDiffResTypeFailedToTakeOver, + ApplyOrReportDiffResTypeNotTakenOver, + ApplyOrReportDiffResTypeFailedToRunDriftDetection, + ApplyOrReportDiffResTypeFoundDrifts, + ApplyOrReportDiffResTypeFailedToApply, + ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection, + ApplyOrReportDiffResTypeApplied, + ) ) type ManifestProcessingAvailabilityResultType string const ( // The result type for availability check being skipped. - ManifestProcessingAvailabilityResultTypeSkipped ManifestProcessingAvailabilityResultType = "Skipped" + AvailabilityResultTypeSkipped ManifestProcessingAvailabilityResultType = "Skipped" // The result type for availability check failures. - ManifestProcessingAvailabilityResultTypeFailed ManifestProcessingAvailabilityResultType = "Failed" - - // The description for availability check failures. - ManifestProcessingAvailabilityResultTypeFailedDescription = "Failed to track the availability of the applied manifest (error = %s)" + AvailabilityResultTypeFailed ManifestProcessingAvailabilityResultType = "Failed" // The result types for completed availability checks. - ManifestProcessingAvailabilityResultTypeAvailable ManifestProcessingAvailabilityResultType = "Available" + AvailabilityResultTypeAvailable ManifestProcessingAvailabilityResultType = "Available" // Note that the reason string below uses the same value as kept in the old work applier. - ManifestProcessingAvailabilityResultTypeNotYetAvailable ManifestProcessingAvailabilityResultType = "ManifestNotAvailableYet" - - ManifestProcessingAvailabilityResultTypeNotTrackable ManifestProcessingAvailabilityResultType = "NotTrackable" - - // The descriptions for completed availability checks. - ManifestProcessingAvailabilityResultTypeAvailableDescription = "Manifest is available" - ManifestProcessingAvailabilityResultTypeNotYetAvailableDescription = "Manifest is not yet available; Fleet will check again later" - ManifestProcessingAvailabilityResultTypeNotTrackableDescription = "Manifest's availability is not trackable; Fleet assumes that the applied manifest is available" + AvailabilityResultTypeNotYetAvailable ManifestProcessingAvailabilityResultType = "ManifestNotAvailableYet" + AvailabilityResultTypeNotTrackable ManifestProcessingAvailabilityResultType = "NotTrackable" ) -type ManifestProcessingReportDiffResultType string - const ( - // The result type for the cases where ReportDiff mode is not enabled. - ManifestProcessingReportDiffResultTypeNotEnabled ManifestProcessingReportDiffResultType = "NotEnabled" - - // The result type for diff reporting failures. - ManifestProcessingReportDiffResultTypeFailed ManifestProcessingReportDiffResultType = "Failed" - - ManifestProcessingReportDiffResultTypeFailedDescription = "Failed to report the diff between the hub cluster and the member cluster (error = %s)" - - // The result type for completed diff reportings. - ManifestProcessingReportDiffResultTypeFoundDiff ManifestProcessingReportDiffResultType = "FoundDiff" - ManifestProcessingReportDiffResultTypeNoDiffFound ManifestProcessingReportDiffResultType = "NoDiffFound" + // The description for availability check failures. + AvailabilityResultTypeFailedDescription = "Failed to track the availability of the applied manifest (error = %s)" - ManifestProcessingReportDiffResultTypeNoDiffFoundDescription = "No diff has been found between the hub cluster and the member cluster" - ManifestProcessingReportDiffResultTypeFoundDiffDescription = "Diff has been found between the hub cluster and the member cluster" + // The descriptions for completed availability checks. + AvailabilityResultTypeAvailableDescription = "Manifest is available" + AvailabilityResultTypeNotYetAvailableDescription = "Manifest is not yet available; Fleet will check again later" + AvailabilityResultTypeNotTrackableDescription = "Manifest's availability is not trackable; Fleet assumes that the applied manifest is available" ) type manifestProcessingBundle struct { - manifest *fleetv1beta1.Manifest - id *fleetv1beta1.WorkResourceIdentifier - manifestObj *unstructured.Unstructured - inMemberClusterObj *unstructured.Unstructured - gvr *schema.GroupVersionResource - applyResTyp manifestProcessingAppliedResultType - availabilityResTyp ManifestProcessingAvailabilityResultType - reportDiffResTyp ManifestProcessingReportDiffResultType - applyErr error - availabilityErr error - reportDiffErr error - drifts []fleetv1beta1.PatchDetail - diffs []fleetv1beta1.PatchDetail + manifest *fleetv1beta1.Manifest + id *fleetv1beta1.WorkResourceIdentifier + manifestObj *unstructured.Unstructured + inMemberClusterObj *unstructured.Unstructured + gvr *schema.GroupVersionResource + applyOrReportDiffResTyp ManifestProcessingApplyOrReportDiffResultType + availabilityResTyp ManifestProcessingAvailabilityResultType + applyOrReportDiffErr error + availabilityErr error + drifts []fleetv1beta1.PatchDetail + diffs []fleetv1beta1.PatchDetail } // Reconcile implement the control loop logic for Work object. diff --git a/pkg/controllers/workapplier/controller_integration_migrated_test.go b/pkg/controllers/workapplier/controller_integration_migrated_test.go index 5fd14a9f7..33f83bfa9 100644 --- a/pkg/controllers/workapplier/controller_integration_migrated_test.go +++ b/pkg/controllers/workapplier/controller_integration_migrated_test.go @@ -83,12 +83,12 @@ var _ = Describe("Work Controller", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), }, } Expect(controller.CompareConditions(expected, resultWork.Status.ManifestConditions[0].Conditions)).Should(BeEmpty()) diff --git a/pkg/controllers/workapplier/controller_integration_test.go b/pkg/controllers/workapplier/controller_integration_test.go index 86d1a5839..2bc4ac9b2 100644 --- a/pkg/controllers/workapplier/controller_integration_test.go +++ b/pkg/controllers/workapplier/controller_integration_test.go @@ -714,13 +714,13 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -739,13 +739,13 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -896,13 +896,13 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -921,13 +921,13 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -1014,13 +1014,13 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -1193,13 +1193,13 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -1217,7 +1217,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundGenerateName), + Reason: string(ApplyOrReportDiffResTypeFoundGenerateName), ObservedGeneration: 0, }, }, @@ -1352,13 +1352,13 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -1376,7 +1376,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeDecodingErred), + Reason: string(ApplyOrReportDiffResTypeDecodingErred), }, }, }, @@ -1393,12 +1393,12 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), }, }, }, @@ -1463,6 +1463,161 @@ var _ = Describe("applying manifests", func() { // deletion; consequently this test suite would not attempt so verify its deletion. }) }) + + Context("apply op failure (decoding error)", Ordered, func() { + workName := fmt.Sprintf(workNameTemplate, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + var regularNS *corev1.Namespace + var malformedConfigMap *corev1.ConfigMap + + BeforeAll(func() { + // Prepare a NS object. + regularNS = ns.DeepCopy() + regularNS.Name = nsName + regularNSJSON := marshalK8sObjJSON(regularNS) + + malformedConfigMap = configMap.DeepCopy() + malformedConfigMap.Namespace = nsName + // This will trigger a decoding error on the work applier side as this API is not registered. + malformedConfigMap.TypeMeta = metav1.TypeMeta{ + APIVersion: "malformed/v10", + Kind: "Unknown", + } + malformedConfigMapJSON := marshalK8sObjJSON(malformedConfigMap) + + // Create a new Work object with all the manifest JSONs and proper apply strategy. + createWorkObject(workName, nil, regularNSJSON, malformedConfigMapJSON) + }) + + It("should add cleanup finalizer to the Work object", func() { + finalizerAddedActual := workFinalizerAddedActual(workName) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add cleanup finalizer to the Work object") + }) + + It("should prepare an AppliedWork object", func() { + appliedWorkCreatedActual := appliedWorkCreatedActual(workName) + Eventually(appliedWorkCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to prepare an AppliedWork object") + + appliedWorkOwnerRef = prepareAppliedWorkOwnerRef(workName) + }) + + It("should not apply malformed manifest", func() { + Consistently(func() error { + configMap := &corev1.ConfigMap{} + objKey := client.ObjectKey{Namespace: nsName, Name: malformedConfigMap.Name} + if err := memberClient.Get(ctx, objKey, configMap); !errors.IsNotFound(err) { + return fmt.Errorf("the config map exists, or an unexpected error has occurred: %w", err) + } + return nil + }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Manifests are applied unexpectedly") + }) + + It("should apply the other manifests", func() { + // Ensure that the NS object has been applied as expected. + regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) + Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") + + Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + }) + + It("should update the Work object status", func() { + // Prepare the status information. + workConds := []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: condition.WorkNotAllManifestsAppliedReason, + }, + } + manifestConds := []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 0, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + }, + { + // Note that this specific decoding error will not block the work applier from extracting + // the GVR, hence the populated API group, version and kind information. + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "malformed", + Version: "v10", + Kind: "Unknown", + Resource: "", + Name: malformedConfigMap.Name, + Namespace: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeDecodingErred), + }, + }, + }, + } + + workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") + }) + + It("should update the AppliedWork object status", func() { + // Prepare the status information. + appliedResourceMeta := []fleetv1beta1.AppliedResourceMeta{ + { + WorkResourceIdentifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + UID: regularNS.UID, + }, + } + + appliedWorkStatusUpdatedActual := appliedWorkStatusUpdated(workName, appliedResourceMeta) + Eventually(appliedWorkStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update appliedWork status") + }) + + AfterAll(func() { + // Delete the Work object and related resources. + deleteWorkObject(workName) + + // Ensure that the AppliedWork object has been removed. + appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) + Eventually(appliedWorkRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object") + + workRemovedActual := workRemovedActual(workName) + Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") + + // The environment prepared by the envtest package does not support namespace + // deletion; consequently this test suite would not attempt so verify its deletion. + }) + }) }) var _ = Describe("work applier garbage collection", func() { @@ -1556,13 +1711,13 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -1581,13 +1736,13 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -1827,13 +1982,13 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -1852,13 +2007,13 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -1876,13 +2031,13 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -2146,13 +2301,13 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -2171,13 +2326,13 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -2195,13 +2350,13 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -2463,12 +2618,12 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), }, }, }, @@ -2486,13 +2641,13 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 2, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 2, }, }, @@ -2739,13 +2894,13 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -2764,7 +2919,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 1, }, }, @@ -2996,7 +3151,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 0, }, }, @@ -3031,7 +3186,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 1, }, }, @@ -3191,13 +3346,13 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -3216,13 +3371,13 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -3431,13 +3586,13 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -3456,7 +3611,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), ObservedGeneration: 2, }, }, @@ -3594,13 +3749,13 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -3717,7 +3872,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), ObservedGeneration: 0, }, }, @@ -3834,13 +3989,13 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -3960,13 +4115,13 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -4091,13 +4246,13 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -4214,7 +4369,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), ObservedGeneration: 0, }, }, @@ -4321,13 +4476,13 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -4452,13 +4607,13 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -4541,7 +4696,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), ObservedGeneration: 0, }, }, @@ -4615,7 +4770,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), ObservedGeneration: 0, }, }, @@ -4761,7 +4916,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeNotTakenOver), + Reason: string(ApplyOrReportDiffResTypeNotTakenOver), ObservedGeneration: 0, }, }, @@ -4780,13 +4935,13 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -4877,7 +5032,7 @@ var _ = Describe("report diff", func() { It("should not apply the manifests", func() { // Ensure that the NS object has not been applied. regularNSObjectNotAppliedActual := regularNSObjectNotAppliedActual(nsName) - Eventually(regularNSObjectNotAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to avoid applying the namespace object") + Consistently(regularNSObjectNotAppliedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to avoid applying the namespace object") }) It("should update the Work object status", func() { @@ -4903,7 +5058,7 @@ var _ = Describe("report diff", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 0, }, }, @@ -5114,7 +5269,7 @@ var _ = Describe("report diff", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 0, }, }, @@ -5133,7 +5288,7 @@ var _ = Describe("report diff", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 1, }, }, @@ -5214,7 +5369,7 @@ var _ = Describe("report diff", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 0, }, }, @@ -5233,7 +5388,7 @@ var _ = Describe("report diff", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 2, }, }, @@ -5321,7 +5476,7 @@ var _ = Describe("report diff", func() { It("should not apply any manifest", func() { // Verify that the NS manifest has not been applied. - Eventually(func() error { + Consistently(func() error { // Retrieve the NS object. updatedNS := &corev1.Namespace{} if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { @@ -5342,10 +5497,10 @@ var _ = Describe("report diff", func() { } return nil - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to leave the NS object alone") + }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to leave the NS object alone") // Verify that the Deployment manifest has not been applied. - Eventually(func() error { + Consistently(func() error { // Retrieve the Deployment object. updatedDeploy := &appsv1.Deployment{} if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, updatedDeploy); err != nil { @@ -5393,7 +5548,7 @@ var _ = Describe("report diff", func() { return fmt.Errorf("deployment diff (-got +want):\n%s", diff) } return nil - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to leave the Deployment object alone") + }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to leave the Deployment object alone") }) It("should update the Work object status", func() { @@ -5419,7 +5574,7 @@ var _ = Describe("report diff", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 0, }, }, @@ -5438,7 +5593,7 @@ var _ = Describe("report diff", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 1, }, }, @@ -5486,6 +5641,146 @@ var _ = Describe("report diff", func() { // deletion; consequently this test suite would not attempt so verify its deletion. }) }) + + Context("report diff failure (decoding error)", Ordered, func() { + workName := fmt.Sprintf(workNameTemplate, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + var regularNS *corev1.Namespace + var malformedConfigMap *corev1.ConfigMap + + BeforeAll(func() { + // Prepare a NS object. + regularNS = ns.DeepCopy() + regularNS.Name = nsName + regularNSJSON := marshalK8sObjJSON(regularNS) + + malformedConfigMap = configMap.DeepCopy() + malformedConfigMap.Namespace = nsName + // This will trigger a decoding error on the work applier side as this API is not registered. + malformedConfigMap.TypeMeta = metav1.TypeMeta{ + APIVersion: "malformed/v10", + Kind: "Unknown", + } + malformedConfigMapJSON := marshalK8sObjJSON(malformedConfigMap) + + // Create a new Work object with all the manifest JSONs and proper apply strategy. + applyStrategy := &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + } + createWorkObject(workName, applyStrategy, regularNSJSON, malformedConfigMapJSON) + }) + + It("should add cleanup finalizer to the Work object", func() { + finalizerAddedActual := workFinalizerAddedActual(workName) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add cleanup finalizer to the Work object") + }) + + It("should prepare an AppliedWork object", func() { + appliedWorkCreatedActual := appliedWorkCreatedActual(workName) + Eventually(appliedWorkCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to prepare an AppliedWork object") + + appliedWorkOwnerRef = prepareAppliedWorkOwnerRef(workName) + }) + + It("should not apply any manifest", func() { + Consistently(func() error { + configMap := &corev1.ConfigMap{} + objKey := client.ObjectKey{Namespace: nsName, Name: malformedConfigMap.Name} + if err := memberClient.Get(ctx, objKey, configMap); !errors.IsNotFound(err) { + return fmt.Errorf("the config map exists, or an unexpected error has occurred: %w", err) + } + return nil + }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "The config map has been applied unexpectedly") + + Consistently(regularNSObjectNotAppliedActual(nsName), consistentlyDuration, consistentlyInterval).Should(Succeed(), "The namespace object has been applied unexpectedly") + }) + + It("should update the Work object status", func() { + // Prepare the status information. + workConds := []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeDiffReported, + Status: metav1.ConditionFalse, + Reason: condition.WorkNotAllManifestsDiffReportedReason, + }, + } + manifestConds := []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeDiffReported, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeFoundDiff), + ObservedGeneration: 0, + }, + }, + DiffDetails: &fleetv1beta1.DiffDetails{ + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/", + ValueInHub: "(the whole object)", + }, + }, + }, + }, + { + // Note that this specific decoding error will not block the work applier from extracting + // the GVR, hence the populated API group, version and kind information. + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "malformed", + Version: "v10", + Kind: "Unknown", + Resource: "", + Name: malformedConfigMap.Name, + Namespace: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeDiffReported, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeFailedToReportDiff), + }, + }, + }, + } + + workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") + }) + + It("should update the AppliedWork object status", func() { + // Prepare the status information. + appliedWorkStatusUpdatedActual := appliedWorkStatusUpdated(workName, nil) + Eventually(appliedWorkStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update appliedWork status") + }) + + AfterAll(func() { + // Delete the Work object and related resources. + deleteWorkObject(workName) + + // Ensure that the AppliedWork object has been removed. + appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) + Eventually(appliedWorkRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object") + + workRemovedActual := workRemovedActual(workName) + Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") + + // The environment prepared by the envtest package does not support namespace + // deletion; consequently this test suite would not attempt so verify its deletion. + }) + }) }) var _ = Describe("handling different apply strategies", func() { @@ -5659,7 +5954,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 0, }, }, @@ -5678,7 +5973,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 1, }, }, @@ -5755,13 +6050,13 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -5780,13 +6075,13 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 2, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(AvailabilityResultTypeNotYetAvailable), ObservedGeneration: 2, }, }, @@ -5969,13 +6264,13 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -5994,13 +6289,13 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(AvailabilityResultTypeNotYetAvailable), ObservedGeneration: 1, }, }, @@ -6052,7 +6347,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 0, }, }, @@ -6071,7 +6366,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 1, }, }, @@ -6270,7 +6565,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeNotTakenOver), + Reason: string(ApplyOrReportDiffResTypeNotTakenOver), ObservedGeneration: 0, }, }, @@ -6289,7 +6584,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeNotTakenOver), + Reason: string(ApplyOrReportDiffResTypeNotTakenOver), ObservedGeneration: 1, }, }, @@ -6409,13 +6704,13 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -6434,7 +6729,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 1, }, }, @@ -6649,13 +6944,13 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -6674,13 +6969,13 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, diff --git a/pkg/controllers/workapplier/metrics_test.go b/pkg/controllers/workapplier/metrics_test.go index d9baf2f38..4ffcd14c4 100644 --- a/pkg/controllers/workapplier/metrics_test.go +++ b/pkg/controllers/workapplier/metrics_test.go @@ -71,12 +71,12 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { Conditions: []metav1.Condition{ { Type: placementv1beta1.WorkConditionTypeApplied, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), Status: metav1.ConditionTrue, }, { Type: placementv1beta1.WorkConditionTypeAvailable, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), Status: metav1.ConditionTrue, }, }, @@ -113,7 +113,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToApply), + Reason: string(ApplyOrReportDiffResTypeFailedToApply), }, }, }, @@ -155,12 +155,12 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { Conditions: []metav1.Condition{ { Type: placementv1beta1.WorkConditionTypeApplied, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), Status: metav1.ConditionTrue, }, { Type: placementv1beta1.WorkConditionTypeAvailable, - Reason: string(ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(AvailabilityResultTypeNotYetAvailable), Status: metav1.ConditionFalse, }, }, @@ -201,7 +201,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), }, }, }, @@ -243,7 +243,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingReportDiffResultTypeFailed), + Reason: string(ApplyOrReportDiffResTypeFailedToReportDiff), }, }, }, @@ -263,12 +263,12 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { fleet_manifest_processing_requests_total{apply_status="Applied",availability_status="Available",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Applied",availability_status="ManifestNotAvailableYet",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="ManifestApplyFailed",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 - fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Failed",drift_detection_status="NotFound"} 1 + fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="FailedToReportDiff",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="NoDiffFound",drift_detection_status="NotFound"} 1 `, }, { - name: "applied failed, found drifts, multiple manifests", + name: "apply op failed, found drifts, multiple manifests", work: &placementv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, @@ -287,7 +287,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), }, }, DriftDetails: &placementv1beta1.DriftDetails{}, @@ -297,12 +297,12 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), }, }, }, @@ -323,7 +323,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { fleet_manifest_processing_requests_total{apply_status="Applied",availability_status="ManifestNotAvailableYet",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="FoundDrifts",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="Found"} 1 fleet_manifest_processing_requests_total{apply_status="ManifestApplyFailed",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 - fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Failed",drift_detection_status="NotFound"} 1 + fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="FailedToReportDiff",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="NoDiffFound",drift_detection_status="NotFound"} 1 `, }, @@ -347,7 +347,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), }, }, DiffDetails: &placementv1beta1.DiffDetails{}, @@ -357,7 +357,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), }, }, }, @@ -379,7 +379,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { fleet_manifest_processing_requests_total{apply_status="FoundDrifts",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="Found"} 1 fleet_manifest_processing_requests_total{apply_status="ManifestApplyFailed",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="Found",diff_reporting_status="FoundDiff",drift_detection_status="NotFound"} 1 - fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Failed",drift_detection_status="NotFound"} 1 + fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="FailedToReportDiff",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="NoDiffFound",drift_detection_status="NotFound"} 2 `, }, @@ -441,7 +441,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { fleet_manifest_processing_requests_total{apply_status="FoundDrifts",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="Found"} 1 fleet_manifest_processing_requests_total{apply_status="ManifestApplyFailed",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="Found",diff_reporting_status="FoundDiff",drift_detection_status="NotFound"} 1 - fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Failed",drift_detection_status="NotFound"} 1 + fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="FailedToReportDiff",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="NoDiffFound",drift_detection_status="NotFound"} 2 fleet_manifest_processing_requests_total{apply_status="Unknown",availability_status="Unknown",diff_detection_status="NotFound",diff_reporting_status="Unknown",drift_detection_status="NotFound"} 1 `, @@ -477,17 +477,17 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection), + Reason: string(ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), }, { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeFailed), + Reason: string(AvailabilityResultTypeFailed), }, { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), }, }, }, @@ -510,7 +510,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { fleet_manifest_processing_requests_total{apply_status="FoundDrifts",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="Found"} 1 fleet_manifest_processing_requests_total{apply_status="ManifestApplyFailed",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="Found",diff_reporting_status="FoundDiff",drift_detection_status="NotFound"} 1 - fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Failed",drift_detection_status="NotFound"} 1 + fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="FailedToReportDiff",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="NoDiffFound",drift_detection_status="NotFound"} 2 fleet_manifest_processing_requests_total{apply_status="Unknown",availability_status="Unknown",diff_detection_status="NotFound",diff_reporting_status="Unknown",drift_detection_status="NotFound"} 1 `, diff --git a/pkg/controllers/workapplier/preprocess.go b/pkg/controllers/workapplier/preprocess.go index 4c489319e..8724e255e 100644 --- a/pkg/controllers/workapplier/preprocess.go +++ b/pkg/controllers/workapplier/preprocess.go @@ -61,8 +61,8 @@ func (r *Reconciler) preProcessManifests( bundle.id = buildWorkResourceIdentifier(pieces, gvr, manifestObj) if err != nil { klog.ErrorS(err, "Failed to decode the manifest", "ordinal", pieces, "work", klog.KObj(work)) - bundle.applyErr = fmt.Errorf("failed to decode manifest: %w", err) - bundle.applyResTyp = ManifestProcessingApplyResultTypeDecodingErred + bundle.applyOrReportDiffErr = fmt.Errorf("failed to decode manifest: %w", err) + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeDecodingErred return } @@ -70,8 +70,8 @@ func (r *Reconciler) preProcessManifests( if len(manifestObj.GetGenerateName()) > 0 && len(manifestObj.GetName()) == 0 { // The manifest object has a generate name but no name. klog.V(2).InfoS("Rejected an object with only generate name", "manifestObj", klog.KObj(manifestObj), "work", klog.KObj(work)) - bundle.applyErr = fmt.Errorf("objects with only generate name are not supported") - bundle.applyResTyp = ManifestProcessingApplyResultTypeFoundGenerateName + bundle.applyOrReportDiffErr = fmt.Errorf("objects with only generate name are not supported") + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFoundGenerateName return } @@ -146,7 +146,7 @@ func (r *Reconciler) writeAheadManifestProcessingAttempts( checked := make(map[string]bool, len(bundles)) for idx := range bundles { bundle := bundles[idx] - if bundle.applyErr != nil { + if bundle.applyOrReportDiffErr != nil { // Skip a manifest if it cannot be pre-processed, i.e., it can only be identified by // its ordinal. // @@ -154,7 +154,7 @@ func (r *Reconciler) writeAheadManifestProcessingAttempts( // reconciliation loop), it is just that they are not relevant in the write-ahead // process. klog.V(2).InfoS("Skipped a manifest in the write-ahead process as it has failed pre-processing", "work", workRef, - "ordinal", idx, "applyErr", bundle.applyErr, "applyResTyp", bundle.applyResTyp) + "ordinal", idx, "applyErr", bundle.applyOrReportDiffErr, "applyResTyp", bundle.applyOrReportDiffResTyp) continue } @@ -180,8 +180,8 @@ func (r *Reconciler) writeAheadManifestProcessingAttempts( if _, found := checked[wriStr]; found { klog.V(2).InfoS("A duplicate manifest has been found", "ordinal", idx, "work", workRef, "workResourceID", wriStr) - bundle.applyErr = fmt.Errorf("a duplicate manifest has been found") - bundle.applyResTyp = ManifestProcessingApplyResultTypeDuplicated + bundle.applyOrReportDiffErr = fmt.Errorf("a duplicate manifest has been found") + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeDuplicated continue } checked[wriStr] = true diff --git a/pkg/controllers/workapplier/preprocess_test.go b/pkg/controllers/workapplier/preprocess_test.go index a149644ce..acd942d83 100644 --- a/pkg/controllers/workapplier/preprocess_test.go +++ b/pkg/controllers/workapplier/preprocess_test.go @@ -448,14 +448,14 @@ func TestPrepareManifestCondForWA(t *testing.T) { { Identifier: *nsWRI(0, nsName), Conditions: []metav1.Condition{ - manifestAppliedCond(workGeneration, metav1.ConditionTrue, string(ManifestProcessingApplyResultTypeApplied), ManifestProcessingApplyResultTypeAppliedDescription), + manifestAppliedCond(workGeneration, metav1.ConditionTrue, string(ApplyOrReportDiffResTypeApplied), ApplyOrReportDiffResTypeAppliedDescription), }, }, }, wantManifestCondForWA: &fleetv1beta1.ManifestCondition{ Identifier: *nsWRI(0, nsName), Conditions: []metav1.Condition{ - manifestAppliedCond(workGeneration, metav1.ConditionTrue, string(ManifestProcessingApplyResultTypeApplied), ManifestProcessingApplyResultTypeAppliedDescription), + manifestAppliedCond(workGeneration, metav1.ConditionTrue, string(ApplyOrReportDiffResTypeApplied), ApplyOrReportDiffResTypeAppliedDescription), }, }, }, @@ -515,7 +515,7 @@ func TestFindLeftOverManifests(t *testing.T) { { Identifier: *nsWRI(1, nsName1), Conditions: []metav1.Condition{ - manifestAppliedCond(workGeneration0, metav1.ConditionTrue, string(ManifestProcessingApplyResultTypeApplied), ManifestProcessingApplyResultTypeAppliedDescription), + manifestAppliedCond(workGeneration0, metav1.ConditionTrue, string(ApplyOrReportDiffResTypeApplied), ApplyOrReportDiffResTypeAppliedDescription), }, }, }, @@ -530,21 +530,21 @@ func TestFindLeftOverManifests(t *testing.T) { { Identifier: *nsWRI(1, nsName1), Conditions: []metav1.Condition{ - manifestAppliedCond(workGeneration0, metav1.ConditionTrue, string(ManifestProcessingApplyResultTypeApplied), ManifestProcessingApplyResultTypeAppliedDescription), + manifestAppliedCond(workGeneration0, metav1.ConditionTrue, string(ApplyOrReportDiffResTypeApplied), ApplyOrReportDiffResTypeAppliedDescription), }, }, // Manifest condition that corresponds to a previously applied and now gone manifest. { Identifier: *nsWRI(2, nsName2), Conditions: []metav1.Condition{ - manifestAppliedCond(workGeneration0, metav1.ConditionTrue, string(ManifestProcessingApplyResultTypeApplied), ManifestProcessingApplyResultTypeAppliedDescription), + manifestAppliedCond(workGeneration0, metav1.ConditionTrue, string(ApplyOrReportDiffResTypeApplied), ApplyOrReportDiffResTypeAppliedDescription), }, }, // Manifest condition that corresponds to a gone manifest that failed to be applied. { Identifier: *nsWRI(3, nsName3), Conditions: []metav1.Condition{ - manifestAppliedCond(workGeneration0, metav1.ConditionFalse, string(ManifestProcessingApplyResultTypeFailedToApply), ""), + manifestAppliedCond(workGeneration0, metav1.ConditionFalse, string(ApplyOrReportDiffResTypeFailedToApply), ""), }, }, // Manifest condition that corresponds to a gone manifest that has been marked as to be applied (preparing to be processed). diff --git a/pkg/controllers/workapplier/process.go b/pkg/controllers/workapplier/process.go index 7c6e5369d..815c83240 100644 --- a/pkg/controllers/workapplier/process.go +++ b/pkg/controllers/workapplier/process.go @@ -40,7 +40,7 @@ func (r *Reconciler) processManifests( // TODO: We have to apply the namespace/crd/secret/configmap/pvc first // then we can process some of the manifests in parallel. for _, bundle := range bundles { - if bundle.applyErr != nil { + if bundle.applyOrReportDiffErr != nil { // Skip a manifest if it has failed pre-processing. continue } @@ -95,8 +95,8 @@ func (r *Reconciler) processOneManifest( if !canApplyWithOwnership(bundle.inMemberClusterObj, expectedAppliedWorkOwnerRef) { klog.V(2).InfoS("Ownership is not established yet; skip the apply op", "manifestObj", manifestObjRef, "GVR", *bundle.gvr, "work", workRef) - bundle.applyErr = fmt.Errorf("no ownership of the object in the member cluster; takeover is needed") - bundle.applyResTyp = ManifestProcessingApplyResultTypeNotTakenOver + bundle.applyOrReportDiffErr = fmt.Errorf("no ownership of the object in the member cluster; takeover is needed") + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeNotTakenOver return } @@ -109,8 +109,8 @@ func (r *Reconciler) processOneManifest( // Perform the apply op. appliedObj, err := r.apply(ctx, bundle.gvr, bundle.manifestObj, bundle.inMemberClusterObj, work.Spec.ApplyStrategy, expectedAppliedWorkOwnerRef) if err != nil { - bundle.applyErr = fmt.Errorf("failed to apply the manifest: %w", err) - bundle.applyResTyp = ManifestProcessingApplyResultTypeFailedToApply + bundle.applyOrReportDiffErr = fmt.Errorf("failed to apply the manifest: %w", err) + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFailedToApply klog.ErrorS(err, "Failed to apply the manifest", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), "inMemberClusterObj", klog.KObj(bundle.inMemberClusterObj), "expectedAppliedWorkOwnerRef", *expectedAppliedWorkOwnerRef) @@ -138,7 +138,7 @@ func (r *Reconciler) processOneManifest( } // All done. - bundle.applyResTyp = ManifestProcessingApplyResultTypeApplied + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeApplied klog.V(2).InfoS("Manifest processing completed", "manifestObj", manifestObjRef, "GVR", *bundle.gvr, "work", workRef) } @@ -172,8 +172,8 @@ func (r *Reconciler) findInMemberClusterObjectFor( default: // An unexpected error has occurred. wrappedErr := controller.NewAPIServerError(true, err) - bundle.applyErr = fmt.Errorf("failed to find the corresponding object for the manifest object in the member cluster: %w", wrappedErr) - bundle.applyResTyp = ManifestProcessingApplyResultTypeFailedToFindObjInMemberCluster + bundle.applyOrReportDiffErr = fmt.Errorf("failed to find the corresponding object for the manifest object in the member cluster: %w", wrappedErr) + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFailedToFindObjInMemberCluster klog.ErrorS(wrappedErr, "Failed to find the corresponding object for the manifest object in the member cluster", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), @@ -204,8 +204,8 @@ func (r *Reconciler) takeOverInMemberClusterObjectIfApplicable( switch { case err != nil: // An unexpected error has occurred. - bundle.applyErr = fmt.Errorf("failed to take over a pre-existing object: %w", err) - bundle.applyResTyp = ManifestProcessingApplyResultTypeFailedToTakeOver + bundle.applyOrReportDiffErr = fmt.Errorf("failed to take over a pre-existing object: %w", err) + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFailedToTakeOver klog.ErrorS(err, "Failed to take over a pre-existing object", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), "inMemberClusterObj", klog.KObj(bundle.inMemberClusterObj), "expectedAppliedWorkOwnerRef", *expectedAppliedWorkOwnerRef) @@ -214,8 +214,8 @@ func (r *Reconciler) takeOverInMemberClusterObjectIfApplicable( // Takeover cannot be performed as configuration differences are found between the manifest // object and the object in the member cluster. bundle.diffs = configDiffs - bundle.applyErr = fmt.Errorf("cannot take over object: configuration differences are found between the manifest object and the corresponding object in the member cluster") - bundle.applyResTyp = ManifestProcessingApplyResultTypeFailedToTakeOver + bundle.applyOrReportDiffErr = fmt.Errorf("cannot take over object: configuration differences are found between the manifest object and the corresponding object in the member cluster") + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFailedToTakeOver klog.V(2).InfoS("Cannot take over object as configuration differences are found between the manifest object and the corresponding object in the member cluster", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), "expectedAppliedWorkOwnerRef", *expectedAppliedWorkOwnerRef) @@ -273,20 +273,16 @@ func (r *Reconciler) reportDiffOnlyIfApplicable( expectedAppliedWorkOwnerRef *metav1.OwnerReference, ) (shouldSkipProcessing bool) { if work.Spec.ApplyStrategy.Type != fleetv1beta1.ApplyStrategyTypeReportDiff { - // ReportDiff mode is not enabled; proceed with the processing. - bundle.reportDiffResTyp = ManifestProcessingReportDiffResultTypeNotEnabled klog.V(2).InfoS("ReportDiff mode is not enabled; skip the step") return false } - bundle.applyResTyp = ManifestProcessingApplyResultTypeNoApplyPerformed - if bundle.inMemberClusterObj == nil { // The object has not created in the member cluster yet. // // In this case, the diff found would be the full object; for simplicity reasons, // Fleet will use a placeholder here rather than including the full JSON representation. - bundle.reportDiffResTyp = ManifestProcessingReportDiffResultTypeFoundDiff + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFoundDiff bundle.diffs = []fleetv1beta1.PatchDetail{ { // The root path. @@ -311,8 +307,8 @@ func (r *Reconciler) reportDiffOnlyIfApplicable( switch { case err != nil: // Failed to calculate the configuration diffs. - bundle.reportDiffErr = fmt.Errorf("failed to calculate configuration diffs between the manifest object and the object from the member cluster: %w", err) - bundle.reportDiffResTyp = ManifestProcessingReportDiffResultTypeFailed + bundle.applyOrReportDiffErr = fmt.Errorf("failed to calculate configuration diffs between the manifest object and the object from the member cluster: %w", err) + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFailedToReportDiff klog.ErrorS(err, "Failed to calculate configuration diffs between the manifest object and the object from the member cluster", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), @@ -320,14 +316,14 @@ func (r *Reconciler) reportDiffOnlyIfApplicable( case len(configDiffs) > 0: // Configuration diffs are found. bundle.diffs = configDiffs - bundle.reportDiffResTyp = ManifestProcessingReportDiffResultTypeFoundDiff + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFoundDiff klog.V(2).InfoS("Diff report completed; configuration diffs are found", "diffCount", len(configDiffs), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), "work", klog.KObj(work)) default: // No configuration diffs are found. - bundle.reportDiffResTyp = ManifestProcessingReportDiffResultTypeNoDiffFound + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeNoDiffFound klog.V(2).InfoS("Diff report completed; no configuration diffs are found", "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), "work", klog.KObj(work)) @@ -380,8 +376,8 @@ func (r *Reconciler) performPreApplyDriftDetectionIfApplicable( // For completion purposes, Fleet will still attempt to catch this and // report this as an unexpected error. _ = controller.NewUnexpectedBehaviorError(fmt.Errorf("failed to determine if pre-apply drift detection is needed: %w", err)) - bundle.applyErr = fmt.Errorf("failed to determine if pre-apply drift detection is needed: %w", err) - bundle.applyResTyp = ManifestProcessingApplyResultTypeFailedToRunDriftDetection + bundle.applyOrReportDiffErr = fmt.Errorf("failed to determine if pre-apply drift detection is needed: %w", err) + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFailedToRunDriftDetection return true case !isPreApplyDriftDetectionNeeded: // Drift detection is not needed; proceed with the processing. @@ -396,8 +392,8 @@ func (r *Reconciler) performPreApplyDriftDetectionIfApplicable( switch { case err != nil: // An unexpected error has occurred. - bundle.applyErr = fmt.Errorf("failed to calculate pre-apply drifts between the manifest and the object from the member cluster: %w", err) - bundle.applyResTyp = ManifestProcessingApplyResultTypeFailedToRunDriftDetection + bundle.applyOrReportDiffErr = fmt.Errorf("failed to calculate pre-apply drifts between the manifest and the object from the member cluster: %w", err) + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFailedToRunDriftDetection klog.ErrorS(err, "Failed to calculate pre-apply drifts between the manifest and the object from the member cluster", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), @@ -406,8 +402,8 @@ func (r *Reconciler) performPreApplyDriftDetectionIfApplicable( case len(drifts) > 0: // Drifts are found in the pre-apply drift detection process. bundle.drifts = drifts - bundle.applyErr = fmt.Errorf("cannot apply manifest: drifts are found between the manifest and the object from the member cluster") - bundle.applyResTyp = ManifestProcessingApplyResultTypeFoundDrifts + bundle.applyOrReportDiffErr = fmt.Errorf("cannot apply manifest: drifts are found between the manifest and the object from the member cluster") + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFoundDrifts klog.V(2).InfoS("Cannot apply manifest: drifts are found between the manifest and the object from the member cluster", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), "inMemberClusterObj", klog.KObj(bundle.inMemberClusterObj), "expectedAppliedWorkOwnerRef", *expectedAppliedWorkOwnerRef) @@ -467,10 +463,10 @@ func (r *Reconciler) performPostApplyDriftDetectionIfApplicable( switch { case err != nil: // An unexpected error has occurred. - bundle.applyErr = fmt.Errorf("failed to calculate post-apply drifts between the manifest object and the object from the member cluster: %w", err) + bundle.applyOrReportDiffErr = fmt.Errorf("failed to calculate post-apply drifts between the manifest object and the object from the member cluster: %w", err) // This case counts as a partial error; the apply op has been completed, but Fleet // cannot determine if there are any drifts. - bundle.applyResTyp = ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection klog.ErrorS(err, "Failed to calculate post-apply drifts between the manifest object and the object from the member cluster", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), diff --git a/pkg/controllers/workapplier/status.go b/pkg/controllers/workapplier/status.go index 4422c6d34..aa1dd3d3a 100644 --- a/pkg/controllers/workapplier/status.go +++ b/pkg/controllers/workapplier/status.go @@ -86,6 +86,7 @@ func (r *Reconciler) refreshWorkStatus( } } + isReportDiffModeOn := work.Spec.ApplyStrategy != nil && work.Spec.ApplyStrategy.Type == fleetv1beta1.ApplyStrategyTypeReportDiff for idx := range bundles { bundle := bundles[idx] @@ -102,9 +103,9 @@ func (r *Reconciler) refreshWorkStatus( if bundle.inMemberClusterObj != nil { inMemberClusterObjGeneration = bundle.inMemberClusterObj.GetGeneration() } - setManifestAppliedCondition(manifestCond, bundle.applyResTyp, bundle.applyErr, inMemberClusterObjGeneration) + setManifestAppliedCondition(manifestCond, isReportDiffModeOn, bundle.applyOrReportDiffResTyp, bundle.applyOrReportDiffErr, inMemberClusterObjGeneration) setManifestAvailableCondition(manifestCond, bundle.availabilityResTyp, bundle.availabilityErr, inMemberClusterObjGeneration) - setManifestDiffReportedCondition(manifestCond, bundle.reportDiffResTyp, bundle.reportDiffErr, inMemberClusterObjGeneration) + setManifestDiffReportedCondition(manifestCond, isReportDiffModeOn, bundle.applyOrReportDiffResTyp, bundle.applyOrReportDiffErr, inMemberClusterObjGeneration) // Check if a first drifted timestamp has been set; if not, set it to the current time. firstDriftedTimestamp := &now @@ -151,16 +152,16 @@ func (r *Reconciler) refreshWorkStatus( } // Tally the stats. - if isManifestObjectApplied(bundle.applyResTyp) { + if isManifestObjectApplied(bundle.applyOrReportDiffResTyp) { appliedManifestsCount++ } if isAppliedObjectAvailable(bundle.availabilityResTyp) { availableAppliedObjectsCount++ } - if bundle.availabilityResTyp == ManifestProcessingAvailabilityResultTypeNotTrackable { + if bundle.availabilityResTyp == AvailabilityResultTypeNotTrackable { untrackableAppliedObjectsCount++ } - if isManifestObjectDiffReported(bundle.reportDiffResTyp) { + if isManifestObjectDiffReported(bundle.applyOrReportDiffResTyp) { diffReportedObjectsCount++ } } @@ -210,7 +211,7 @@ func (r *Reconciler) refreshAppliedWorkStatus( for idx := range bundles { bundle := bundles[idx] - if isManifestObjectApplied(bundle.applyResTyp) { + if isManifestObjectApplied(bundle.applyOrReportDiffResTyp) { appliedResources = append(appliedResources, fleetv1beta1.AppliedResourceMeta{ WorkResourceIdentifier: *bundle.id, UID: bundle.inMemberClusterObj.GetUID(), @@ -232,34 +233,38 @@ func (r *Reconciler) refreshAppliedWorkStatus( // isManifestObjectAvailable returns if an availability result type indicates that a manifest // object in a bundle is available. func isAppliedObjectAvailable(availabilityResTyp ManifestProcessingAvailabilityResultType) bool { - return availabilityResTyp == ManifestProcessingAvailabilityResultTypeAvailable || availabilityResTyp == ManifestProcessingAvailabilityResultTypeNotTrackable + return availabilityResTyp == AvailabilityResultTypeAvailable || availabilityResTyp == AvailabilityResultTypeNotTrackable } // isManifestObjectDiffReported returns if a diff report result type indicates that a manifest // object has been checked for configuration differences. -func isManifestObjectDiffReported(reportDiffResTyp ManifestProcessingReportDiffResultType) bool { - return reportDiffResTyp == ManifestProcessingReportDiffResultTypeFoundDiff || reportDiffResTyp == ManifestProcessingReportDiffResultTypeNoDiffFound +func isManifestObjectDiffReported(reportDiffResTyp ManifestProcessingApplyOrReportDiffResultType) bool { + return reportDiffResTyp == ApplyOrReportDiffResTypeFoundDiff || reportDiffResTyp == ApplyOrReportDiffResTypeNoDiffFound } // setManifestAppliedCondition sets the Applied condition on an applied manifest. func setManifestAppliedCondition( manifestCond *fleetv1beta1.ManifestCondition, - appliedResTyp manifestProcessingAppliedResultType, - applyError error, + isReportDiffModeOn bool, + applyOrReportDiffResTyp ManifestProcessingApplyOrReportDiffResultType, + applyOrReportDiffError error, inMemberClusterObjGeneration int64, ) { var appliedCond *metav1.Condition - switch appliedResTyp { - case ManifestProcessingApplyResultTypeApplied: + switch { + case isReportDiffModeOn: + // ReportDiff mode is on and no apply op has been performed. In this case, Fleet + // will reset the Applied condition. + case applyOrReportDiffResTyp == ApplyOrReportDiffResTypeApplied: // The manifest has been successfully applied. appliedCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), - Message: ManifestProcessingApplyResultTypeAppliedDescription, + Reason: string(ApplyOrReportDiffResTypeApplied), + Message: ApplyOrReportDiffResTypeAppliedDescription, ObservedGeneration: inMemberClusterObjGeneration, } - case ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection: + case applyOrReportDiffResTyp == ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection: // The manifest has been successfully applied, but drift detection has failed. // // At this moment Fleet does not prepare a dedicated condition for drift detection @@ -267,20 +272,35 @@ func setManifestAppliedCondition( appliedCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection), - Message: ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetectionDescription, + Reason: string(ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), + Message: string(ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), + ObservedGeneration: inMemberClusterObjGeneration, + } + case !manifestProcessingApplyResTypSet.Has(applyOrReportDiffResTyp): + // Do a sanity check; verify if the returned result type is a valid one. + // Normally this branch should never run. + wrappedErr := fmt.Errorf("found an unexpected apply result type %s", applyOrReportDiffResTyp) + klog.ErrorS(wrappedErr, "Failed to set Applied condition", + "workResourceID", manifestCond.Identifier, + "applyOrReportDiffResTyp", applyOrReportDiffResTyp, + "applyOrReportDiffError", applyOrReportDiffError) + _ = controller.NewUnexpectedBehaviorError(wrappedErr) + // The work applier will consider this to be an apply failure. + appliedCond = &metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeFailedToApply), + Message: fmt.Sprintf("An unexpected apply result is yielded (%s, error: %s)", + applyOrReportDiffResTyp, applyOrReportDiffError), ObservedGeneration: inMemberClusterObjGeneration, } - case ManifestProcessingApplyResultTypeNoApplyPerformed: - // ReportDiff mode is on and no apply op has been performed. In this case, Fleet - // will reset the Applied condition. default: // The apply op fails. appliedCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(appliedResTyp), - Message: fmt.Sprintf("Failed to apply the manifest (error: %s)", applyError), + Reason: string(applyOrReportDiffResTyp), + Message: fmt.Sprintf("Failed to apply the manifest (error: %s)", applyOrReportDiffError), ObservedGeneration: inMemberClusterObjGeneration, } } @@ -289,7 +309,7 @@ func setManifestAppliedCondition( meta.SetStatusCondition(&manifestCond.Conditions, *appliedCond) klog.V(2).InfoS("Applied condition set in ManifestCondition", "workResourceID", manifestCond.Identifier, - "applyResTyp", appliedResTyp, "applyError", applyError, + "applyOrReportDiffResTyp", applyOrReportDiffResTyp, "applyOrReportDiffError", applyOrReportDiffError, "inMemberClusterObjGeneration", inMemberClusterObjGeneration) } else { // As the conditions are ported back; removal must be performed if the Applied @@ -297,7 +317,7 @@ func setManifestAppliedCondition( meta.RemoveStatusCondition(&manifestCond.Conditions, fleetv1beta1.WorkConditionTypeApplied) klog.V(2).InfoS("Applied condition removed from ManifestCondition", "workResourceID", manifestCond.Identifier, - "applyResTyp", appliedResTyp, "applyError", applyError, + "applyOrReportDiffResTyp", applyOrReportDiffResTyp, "applyOrReportDiffError", applyOrReportDiffError, "inMemberClusterObjGeneration", inMemberClusterObjGeneration) } } @@ -311,35 +331,35 @@ func setManifestAvailableCondition( ) { var availableCond *metav1.Condition switch availabilityResTyp { - case ManifestProcessingAvailabilityResultTypeSkipped: + case AvailabilityResultTypeSkipped: // Availability check has been skipped for the manifest as it has not been applied yet. // // In this case, no availability condition is set. - case ManifestProcessingAvailabilityResultTypeFailed: + case AvailabilityResultTypeFailed: // Availability check has failed. availableCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeFailed), - Message: fmt.Sprintf(ManifestProcessingAvailabilityResultTypeFailedDescription, availabilityError), + Reason: string(AvailabilityResultTypeFailed), + Message: fmt.Sprintf(AvailabilityResultTypeFailedDescription, availabilityError), ObservedGeneration: inMemberClusterObjGeneration, } - case ManifestProcessingAvailabilityResultTypeNotYetAvailable: + case AvailabilityResultTypeNotYetAvailable: // The manifest is not yet available. availableCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeNotYetAvailable), - Message: ManifestProcessingAvailabilityResultTypeNotYetAvailableDescription, + Reason: string(AvailabilityResultTypeNotYetAvailable), + Message: AvailabilityResultTypeNotYetAvailableDescription, ObservedGeneration: inMemberClusterObjGeneration, } - case ManifestProcessingAvailabilityResultTypeNotTrackable: + case AvailabilityResultTypeNotTrackable: // Fleet cannot track the availability of the manifest. availableCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeNotTrackable), - Message: ManifestProcessingAvailabilityResultTypeNotTrackableDescription, + Reason: string(AvailabilityResultTypeNotTrackable), + Message: AvailabilityResultTypeNotTrackableDescription, ObservedGeneration: inMemberClusterObjGeneration, } default: @@ -347,8 +367,8 @@ func setManifestAvailableCondition( availableCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), - Message: ManifestProcessingAvailabilityResultTypeAvailableDescription, + Reason: string(AvailabilityResultTypeAvailable), + Message: AvailabilityResultTypeAvailableDescription, ObservedGeneration: inMemberClusterObjGeneration, } } @@ -373,43 +393,52 @@ func setManifestAvailableCondition( // setManifestDiffReportedCondition sets the DiffReported condition on a manifest. func setManifestDiffReportedCondition( manifestCond *fleetv1beta1.ManifestCondition, - reportDiffResTyp ManifestProcessingReportDiffResultType, - reportDiffError error, + isReportDiffModeOn bool, + applyOrReportDiffResTyp ManifestProcessingApplyOrReportDiffResultType, + applyOrReportDiffErr error, inMemberClusterObjGeneration int64, ) { var diffReportedCond *metav1.Condition - switch reportDiffResTyp { - case ManifestProcessingReportDiffResultTypeFailed: + switch { + case !isReportDiffModeOn: + // ReportDiff mode is not on; Fleet will remove DiffReported condition. + case applyOrReportDiffResTyp == ApplyOrReportDiffResTypeFailedToReportDiff: // Diff reporting has failed. diffReportedCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingReportDiffResultTypeFailed), - Message: fmt.Sprintf(ManifestProcessingReportDiffResultTypeFailedDescription, reportDiffError), + Reason: string(ApplyOrReportDiffResTypeFailedToReportDiff), + Message: fmt.Sprintf(ApplyOrReportDiffResTypeFailedToReportDiffDescription, applyOrReportDiffErr), ObservedGeneration: inMemberClusterObjGeneration, } - case ManifestProcessingReportDiffResultTypeNotEnabled: - // Diff reporting is not enabled. - // - // For simplicity reasons, the DiffReported condition will only appear when - // the ReportDiff mode is on; in other configurations, the condition will be - // removed. - case ManifestProcessingReportDiffResultTypeNoDiffFound: + case applyOrReportDiffResTyp == ApplyOrReportDiffResTypeNoDiffFound: // No diff has been found. diffReportedCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), - Message: ManifestProcessingReportDiffResultTypeNoDiffFoundDescription, + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), + Message: ApplyOrReportDiffResTypeNoDiffFoundDescription, ObservedGeneration: inMemberClusterObjGeneration, } - case ManifestProcessingReportDiffResultTypeFoundDiff: + case applyOrReportDiffResTyp == ApplyOrReportDiffResTypeFoundDiff: // Found diffs. diffReportedCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), - Message: ManifestProcessingReportDiffResultTypeFoundDiffDescription, + Reason: string(ApplyOrReportDiffResTypeFoundDiff), + Message: ApplyOrReportDiffResTypeFoundDiffDescription, + ObservedGeneration: inMemberClusterObjGeneration, + } + default: + // There are cases where the work applier might not be able to complete the diff reporting + // due to failures in the pre-processing or processing stage (e.g., the manifest cannot be decoded, + // or the user sets up a takeover strategy that cannot be completed). This is not considered + // as a system error. + diffReportedCond = &metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeDiffReported, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeFailedToReportDiff), + Message: fmt.Sprintf("An error blocks the diff reporting process (%s, error: %s)", applyOrReportDiffResTyp, applyOrReportDiffErr), ObservedGeneration: inMemberClusterObjGeneration, } } @@ -418,7 +447,7 @@ func setManifestDiffReportedCondition( meta.SetStatusCondition(&manifestCond.Conditions, *diffReportedCond) klog.V(2).InfoS("DiffReported condition set in ManifestCondition", "workResourceID", manifestCond.Identifier, - "reportDiffResTyp", reportDiffResTyp, "reportDiffError", reportDiffError, + "applyOrReportDiffResTyp", applyOrReportDiffResTyp, "applyOrReportDiffErr", applyOrReportDiffErr, "inMemberClusterObjGeneration", inMemberClusterObjGeneration) } else { // As the conditions are ported back; removal must be performed if the DiffReported @@ -426,7 +455,7 @@ func setManifestDiffReportedCondition( meta.RemoveStatusCondition(&manifestCond.Conditions, fleetv1beta1.WorkConditionTypeDiffReported) klog.V(2).InfoS("DiffReported condition removed from ManifestCondition", "workResourceID", manifestCond.Identifier, - "reportDiffResTyp", reportDiffResTyp, "reportDiffError", reportDiffError, + "applyOrReportDiffResTyp", applyOrReportDiffResTyp, "applyOrReportDiffErr", applyOrReportDiffErr, "inMemberClusterObjGeneration", inMemberClusterObjGeneration) } } diff --git a/pkg/controllers/workapplier/status_test.go b/pkg/controllers/workapplier/status_test.go index 12405fc44..5e12da724 100644 --- a/pkg/controllers/workapplier/status_test.go +++ b/pkg/controllers/workapplier/status_test.go @@ -18,6 +18,7 @@ package workapplier import ( "context" + "fmt" "testing" "time" @@ -93,10 +94,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy1.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy1.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeAvailable, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -129,13 +129,13 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 2, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 2, }, }, @@ -163,10 +163,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy2.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy2.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection, + availabilityResTyp: AvailabilityResultTypeSkipped, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -178,10 +177,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy3.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeFailedToTakeOver, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy3.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToTakeOver, + availabilityResTyp: AvailabilityResultTypeSkipped, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -208,7 +206,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection), + Reason: string(ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), }, }, }, @@ -226,7 +224,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), }, }, }, @@ -252,10 +250,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeFailed, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeFailed, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -267,10 +264,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeNotYetAvailable, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -282,10 +278,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "jobs", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: AvailabilityResultTypeNotTrackable, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -316,12 +311,12 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeFailed), + Reason: string(AvailabilityResultTypeFailed), }, }, }, @@ -339,12 +334,12 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(AvailabilityResultTypeNotYetAvailable), }, }, }, @@ -362,12 +357,12 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeNotTrackable), + Reason: string(AvailabilityResultTypeNotTrackable), }, }, }, @@ -406,7 +401,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), }, }, DriftDetails: &fleetv1beta1.DriftDetails{ @@ -427,7 +422,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), }, }, DiffDetails: &fleetv1beta1.DiffDetails{ @@ -449,10 +444,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeFoundDrifts, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDrifts, + availabilityResTyp: AvailabilityResultTypeSkipped, drifts: []fleetv1beta1.PatchDetail{ { Path: "/spec/replicas", @@ -470,10 +464,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy2.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeFailedToTakeOver, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy2.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToTakeOver, + availabilityResTyp: AvailabilityResultTypeSkipped, diffs: []fleetv1beta1.PatchDetail{ { Path: "/spec/replicas", @@ -507,7 +500,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), }, }, DriftDetails: &fleetv1beta1.DriftDetails{ @@ -534,7 +527,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), }, }, DiffDetails: &fleetv1beta1.DiffDetails{ @@ -589,7 +582,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), }, }, DriftDetails: &fleetv1beta1.DriftDetails{ @@ -610,10 +603,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeNoApplyPerformed, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFoundDiff, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, + availabilityResTyp: AvailabilityResultTypeSkipped, diffs: []fleetv1beta1.PatchDetail{ { Path: "/x", @@ -646,7 +638,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), }, }, DiffDetails: &fleetv1beta1.DiffDetails{ @@ -700,12 +692,12 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), }, }, }, @@ -723,11 +715,10 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeNoApplyPerformed, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, - diffs: []fleetv1beta1.PatchDetail{}, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, + availabilityResTyp: AvailabilityResultTypeSkipped, + diffs: []fleetv1beta1.PatchDetail{}, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -754,7 +745,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), }, }, }, @@ -799,7 +790,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), }, }, DriftDetails: &fleetv1beta1.DriftDetails{ @@ -819,12 +810,12 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), }, }, }, @@ -842,10 +833,10 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeNoApplyPerformed, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFoundDiff, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, + availabilityResTyp: AvailabilityResultTypeSkipped, + diffs: []fleetv1beta1.PatchDetail{ { Path: "/x", @@ -862,10 +853,9 @@ func TestRefreshWorkStatus(t *testing.T) { Name: nsName, Resource: "namespaces", }, - inMemberClusterObj: toUnstructured(t, ns.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeNoApplyPerformed, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + inMemberClusterObj: toUnstructured(t, ns.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, + availabilityResTyp: AvailabilityResultTypeSkipped, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -892,7 +882,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), }, }, DiffDetails: &fleetv1beta1.DiffDetails{ @@ -918,7 +908,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), }, }, }, @@ -963,7 +953,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), }, }, DriftDetails: &fleetv1beta1.DriftDetails{ @@ -983,12 +973,12 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), }, }, }, @@ -1006,10 +996,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeNoApplyPerformed, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFailed, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToReportDiff, + availabilityResTyp: AvailabilityResultTypeSkipped, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -1020,10 +1009,9 @@ func TestRefreshWorkStatus(t *testing.T) { Name: nsName, Resource: "namespaces", }, - inMemberClusterObj: toUnstructured(t, ns.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeNoApplyPerformed, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + inMemberClusterObj: toUnstructured(t, ns.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, + availabilityResTyp: AvailabilityResultTypeSkipped, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -1050,7 +1038,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingReportDiffResultTypeFailed), + Reason: string(ApplyOrReportDiffResTypeFailedToReportDiff), }, }, }, @@ -1067,7 +1055,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), }, }, }, @@ -1166,8 +1154,8 @@ func TestRefreshAppliedWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy1), - applyResTyp: ManifestProcessingApplyResultTypeApplied, + inMemberClusterObj: toUnstructured(t, deploy1), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -1178,8 +1166,8 @@ func TestRefreshAppliedWorkStatus(t *testing.T) { Name: nsName, Resource: "namespaces", }, - inMemberClusterObj: toUnstructured(t, ns1), - applyResTyp: ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection, + inMemberClusterObj: toUnstructured(t, ns1), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -1191,8 +1179,8 @@ func TestRefreshAppliedWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy2), - applyResTyp: ManifestProcessingApplyResultTypeFailedToFindObjInMemberCluster, + inMemberClusterObj: toUnstructured(t, deploy2), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToFindObjInMemberCluster, }, }, wantAppliedWorkStatus: &fleetv1beta1.AppliedWorkStatus{ @@ -1258,22 +1246,23 @@ func TestSetManifestAppliedCondition(t *testing.T) { testCases := []struct { name string manifestCond *fleetv1beta1.ManifestCondition - applyResTyp manifestProcessingAppliedResultType - applyErr error + isReportDiffModeOn bool + applyOrReportDiffResTyp ManifestProcessingApplyOrReportDiffResultType + applyOrReportDiffErr error observedInMemberClusterGeneration int64 wantManifestCond *fleetv1beta1.ManifestCondition }{ { name: "applied", manifestCond: &fleetv1beta1.ManifestCondition{}, - applyResTyp: ManifestProcessingApplyResultTypeApplied, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, observedInMemberClusterGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, }, @@ -1286,19 +1275,19 @@ func TestSetManifestAppliedCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, }, }, - applyResTyp: ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection, observedInMemberClusterGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection), + Reason: string(ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), ObservedGeneration: 1, }, }, @@ -1311,19 +1300,19 @@ func TestSetManifestAppliedCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, }, }, - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, observedInMemberClusterGeneration: 2, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToApply), + Reason: string(ApplyOrReportDiffResTypeFailedToApply), ObservedGeneration: 2, }, }, @@ -1336,22 +1325,51 @@ func TestSetManifestAppliedCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, }, }, - applyResTyp: ManifestProcessingApplyResultTypeNoApplyPerformed, + isReportDiffModeOn: true, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, observedInMemberClusterGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{}, }, }, + { + // Normally this should never occur. + name: "encountered an unexpected result type", + manifestCond: &fleetv1beta1.ManifestCondition{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 1, + }, + }, + }, + isReportDiffModeOn: false, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, + applyOrReportDiffErr: nil, + observedInMemberClusterGeneration: 1, + wantManifestCond: &fleetv1beta1.ManifestCondition{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeFailedToApply), + ObservedGeneration: 1, + }, + }, + }, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - setManifestAppliedCondition(tc.manifestCond, tc.applyResTyp, tc.applyErr, tc.observedInMemberClusterGeneration) + setManifestAppliedCondition(tc.manifestCond, tc.isReportDiffModeOn, tc.applyOrReportDiffResTyp, tc.applyOrReportDiffErr, tc.observedInMemberClusterGeneration) if diff := cmp.Diff(tc.manifestCond, tc.wantManifestCond, ignoreFieldConditionLTTMsg); diff != "" { t.Errorf("set manifest cond mismatches (-got, +want):\n%s", diff) } @@ -1372,14 +1390,14 @@ func TestSetManifestAvailableCondition(t *testing.T) { { name: "available", manifestCond: &fleetv1beta1.ManifestCondition{}, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + availabilityResTyp: AvailabilityResultTypeAvailable, inMemberClusterObjGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -1392,19 +1410,19 @@ func TestSetManifestAvailableCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, }, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeFailed, + availabilityResTyp: AvailabilityResultTypeFailed, inMemberClusterObjGeneration: 2, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeFailed), + Reason: string(AvailabilityResultTypeFailed), ObservedGeneration: 2, }, }, @@ -1417,19 +1435,19 @@ func TestSetManifestAvailableCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, }, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + availabilityResTyp: AvailabilityResultTypeNotYetAvailable, inMemberClusterObjGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(AvailabilityResultTypeNotYetAvailable), ObservedGeneration: 1, }, }, @@ -1438,14 +1456,14 @@ func TestSetManifestAvailableCondition(t *testing.T) { { name: "untrackable", manifestCond: &fleetv1beta1.ManifestCondition{}, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, + availabilityResTyp: AvailabilityResultTypeNotTrackable, inMemberClusterObjGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeNotTrackable), + Reason: string(AvailabilityResultTypeNotTrackable), ObservedGeneration: 1, }, }, @@ -1458,12 +1476,12 @@ func TestSetManifestAvailableCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeFailed), + Reason: string(AvailabilityResultTypeFailed), ObservedGeneration: 1, }, }, }, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + availabilityResTyp: AvailabilityResultTypeSkipped, inMemberClusterObjGeneration: 2, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{}, @@ -1486,22 +1504,24 @@ func TestSetManifestDiffReportedCondition(t *testing.T) { testCases := []struct { name string manifestCond *fleetv1beta1.ManifestCondition - reportDiffResTyp ManifestProcessingReportDiffResultType - reportDiffError error + isReportDiffModeOn bool + applyOrReportDiffResTyp ManifestProcessingApplyOrReportDiffResultType + applyOrReportDiffErr error inMemberClusterObjGeneration int64 wantManifestCond *fleetv1beta1.ManifestCondition }{ { name: "failed", manifestCond: &fleetv1beta1.ManifestCondition{}, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFailed, + isReportDiffModeOn: true, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToReportDiff, inMemberClusterObjGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingReportDiffResultTypeFailed), + Reason: string(ApplyOrReportDiffResTypeFailedToReportDiff), ObservedGeneration: 1, }, }, @@ -1514,19 +1534,20 @@ func TestSetManifestDiffReportedCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 1, }, }, }, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFoundDiff, + isReportDiffModeOn: true, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, inMemberClusterObjGeneration: 2, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 2, }, }, @@ -1539,19 +1560,20 @@ func TestSetManifestDiffReportedCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 1, }, }, }, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + isReportDiffModeOn: true, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, inMemberClusterObjGeneration: 2, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 2, }, }, @@ -1564,22 +1586,50 @@ func TestSetManifestDiffReportedCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 1, }, }, }, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + isReportDiffModeOn: false, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, inMemberClusterObjGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{}, }, }, + { + name: "decoding error", + manifestCond: &fleetv1beta1.ManifestCondition{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeDiffReported, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), + ObservedGeneration: 1, + }, + }, + }, + isReportDiffModeOn: true, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeDecodingErred, + applyOrReportDiffErr: fmt.Errorf("decoding error"), + inMemberClusterObjGeneration: 1, + wantManifestCond: &fleetv1beta1.ManifestCondition{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeDiffReported, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeFailedToReportDiff), + ObservedGeneration: 1, + }, + }, + }, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - setManifestDiffReportedCondition(tc.manifestCond, tc.reportDiffResTyp, tc.reportDiffError, tc.inMemberClusterObjGeneration) + setManifestDiffReportedCondition(tc.manifestCond, tc.isReportDiffModeOn, tc.applyOrReportDiffResTyp, tc.applyOrReportDiffErr, tc.inMemberClusterObjGeneration) if diff := cmp.Diff(tc.manifestCond, tc.wantManifestCond, ignoreFieldConditionLTTMsg); diff != "" { t.Errorf("set manifest cond mismatches (-got, +want):\n%s", diff) } diff --git a/pkg/controllers/workapplier/utils.go b/pkg/controllers/workapplier/utils.go index 02438d208..57d6e8329 100644 --- a/pkg/controllers/workapplier/utils.go +++ b/pkg/controllers/workapplier/utils.go @@ -44,9 +44,9 @@ func formatWRIString(wri *fleetv1beta1.WorkResourceIdentifier) (string, error) { // isManifestObjectApplied returns if an applied result type indicates that a manifest // object in a bundle has been successfully applied. -func isManifestObjectApplied(appliedResTyp manifestProcessingAppliedResultType) bool { - return appliedResTyp == ManifestProcessingApplyResultTypeApplied || - appliedResTyp == ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection +func isManifestObjectApplied(appliedResTyp ManifestProcessingApplyOrReportDiffResultType) bool { + return appliedResTyp == ApplyOrReportDiffResTypeApplied || + appliedResTyp == ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection } // isPlacedByFleetInDuplicate checks if the object has already been placed by Fleet via another diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index 11270922f..63063e20b 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -38,7 +38,6 @@ import ( "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - "k8s.io/utils/ptr" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -326,10 +325,9 @@ func (r *Reconciler) handleDelete(ctx context.Context, resourceBinding fleetv1be return controllerruntime.Result{}, err } - // delete all the listed works - // - // TO-DO: this controller should be able to garbage collect all works automatically via - // background/foreground cascade deletion. This may render the finalizer unnecessary. + // Note: This controller cannot garbage collect all works automatically via background/foreground + // cascade deletion as the namespaces of work and resourceBinding are different + // and we don't set the ownerReference for the works. for workName := range works { work := works[workName] if err := r.Client.Delete(ctx, work); err != nil && !apierrors.IsNotFound(err) { @@ -746,15 +744,8 @@ func generateSnapshotWorkObj(workName string, resourceBinding fleetv1beta1.Bindi fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: resourceOverrideSnapshotHash, fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: clusterResourceOverrideSnapshotHash, }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: fleetv1beta1.GroupVersion.String(), - Kind: resourceBinding.GetObjectKind().GroupVersionKind().Kind, - Name: resourceBinding.GetName(), - UID: resourceBinding.GetUID(), - BlockOwnerDeletion: ptr.To(true), // make sure that the k8s will call work delete when the binding is deleted - }, - }, + // OwnerReferences cannot be added, as the namespaces of work and resourceBinding are different. + // Garbage collector will assume the resourceBinding is invalid as it cannot be found in the same namespace. }, Spec: fleetv1beta1.WorkSpec{ Workload: fleetv1beta1.WorkloadTemplate{ diff --git a/pkg/controllers/workgenerator/controller_integration_test.go b/pkg/controllers/workgenerator/controller_integration_test.go index ef6fe88f6..48cad7d5a 100644 --- a/pkg/controllers/workgenerator/controller_integration_test.go +++ b/pkg/controllers/workgenerator/controller_integration_test.go @@ -336,15 +336,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -430,15 +421,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -632,15 +614,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -673,15 +646,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: envWork.Name, Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -758,15 +722,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -798,15 +753,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: work.Name, Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -918,15 +864,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -958,15 +895,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: envWork.Name, Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -1117,15 +1045,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.WorkNameWithSubindexFmt, testCRPName, 1), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentResourceSnapshotIndexLabel: "2", @@ -1192,15 +1111,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.WorkNameWithSubindexFmt, testCRPName, 1), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentResourceSnapshotIndexLabel: "2", @@ -1491,15 +1401,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -1723,15 +1624,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -2417,7 +2309,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(workapplier.ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2438,7 +2330,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2503,7 +2395,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingReportDiffResultTypeFailed), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToReportDiff), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2556,7 +2448,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(workapplier.ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2794,7 +2686,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2826,7 +2718,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection), + Reason: string(workapplier.ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2834,7 +2726,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(workapplier.AvailabilityResultTypeAvailable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2900,7 +2792,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToApply), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToApply), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2930,7 +2822,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", Condition: metav1.Condition{ Type: string(placementv1beta1.WorkConditionTypeApplied), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2947,7 +2839,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", Condition: metav1.Condition{ Type: string(placementv1beta1.WorkConditionTypeApplied), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToApply), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToApply), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3040,7 +2932,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingApplyResultTypeApplied), + Reason: string(workapplier.ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3050,7 +2942,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", Status: metav1.ConditionTrue, // As explained earlier, for this spec the ConfigMap object is // considered to be untrackable in availability check. - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeNotTrackable), + Reason: string(workapplier.AvailabilityResultTypeNotTrackable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3080,7 +2972,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", Condition: metav1.Condition{ Type: string(placementv1beta1.WorkConditionTypeApplied), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3168,7 +3060,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingApplyResultTypeApplied), + Reason: string(workapplier.ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3176,7 +3068,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(workapplier.AvailabilityResultTypeAvailable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3197,7 +3089,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection), + Reason: string(workapplier.ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3205,7 +3097,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(workapplier.AvailabilityResultTypeNotYetAvailable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3254,7 +3146,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", Condition: metav1.Condition{ Type: string(placementv1beta1.WorkConditionTypeAvailable), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(workapplier.AvailabilityResultTypeNotYetAvailable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3321,7 +3213,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingApplyResultTypeApplied), + Reason: string(workapplier.ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3329,7 +3221,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(workapplier.AvailabilityResultTypeAvailable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3350,7 +3242,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection), + Reason: string(workapplier.ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3358,7 +3250,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(workapplier.AvailabilityResultTypeAvailable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3457,7 +3349,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingApplyResultTypeApplied), + Reason: string(workapplier.ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3465,7 +3357,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(workapplier.AvailabilityResultTypeAvailable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -4946,15 +4838,6 @@ var _ = Describe("Test Work Generator Controller for ResourcePlacement", func() ObjectMeta: metav1.ObjectMeta{ Name: workName, Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testRPName, placementv1beta1.ParentBindingLabel: binding.Name, diff --git a/pkg/controllers/workgenerator/envelope.go b/pkg/controllers/workgenerator/envelope.go index fc3b93e2c..0d2b3049c 100644 --- a/pkg/controllers/workgenerator/envelope.go +++ b/pkg/controllers/workgenerator/envelope.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/klog/v2" - "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" @@ -214,17 +213,8 @@ func buildNewWorkForEnvelopeCR( fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: resourceOverrideSnapshotHash, fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: clusterResourceOverrideSnapshotHash, }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: fleetv1beta1.GroupVersion.String(), - Kind: resourceBinding.GetObjectKind().GroupVersionKind().Kind, - Name: resourceBinding.GetName(), - UID: resourceBinding.GetUID(), - // Make sure that the resource binding can only be deleted after - // all of its managed work objects have been deleted. - BlockOwnerDeletion: ptr.To(true), - }, - }, + // OwnerReferences cannot be added, as the namespaces of work and resourceBinding are different. + // Garbage collector will assume the resourceBinding is invalid as it cannot be found in the same namespace. }, Spec: fleetv1beta1.WorkSpec{ Workload: fleetv1beta1.WorkloadTemplate{ diff --git a/pkg/controllers/workgenerator/override_test.go b/pkg/controllers/workgenerator/override_test.go index 79e8c79a2..a0b18498a 100644 --- a/pkg/controllers/workgenerator/override_test.go +++ b/pkg/controllers/workgenerator/override_test.go @@ -61,7 +61,7 @@ func TestFetchClusterResourceOverrideSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -87,7 +87,7 @@ func TestFetchClusterResourceOverrideSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", diff --git a/pkg/controllers/workgenerator/suite_test.go b/pkg/controllers/workgenerator/suite_test.go index 81ef9daa9..d453d457f 100644 --- a/pkg/controllers/workgenerator/suite_test.go +++ b/pkg/controllers/workgenerator/suite_test.go @@ -176,7 +176,7 @@ func createOverrides() { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: utils.NamespaceGVK.Group, Version: utils.NamespaceGVK.Version, @@ -265,7 +265,7 @@ func createOverrides() { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: utils.NamespaceGVK.Group, Version: utils.NamespaceGVK.Version, diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index b92c9c01b..8e4756781 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -43,6 +43,26 @@ var ( Name: "placement_apply_succeed_counter", Help: "Number of successfully applied cluster resource placement", }, []string{"name"}) + + // FleetPlacementStatusLastTimeStampSeconds is a prometheus metric which keeps track of the last placement status. + FleetPlacementStatusLastTimeStampSeconds = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "fleet_workload_placement_status_last_timestamp_seconds", + Help: "Last update timestamp of placement status in seconds", + }, []string{"namespace", "name", "generation", "conditionType", "status", "reason"}) + + // FleetEvictionStatus is prometheus metrics which holds the + // status of eviction completion. + FleetEvictionStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "fleet_workload_eviction_complete", + Help: "Last update timestamp of eviction complete status in seconds", + }, []string{"name", "isCompleted", "isValid"}) + + // FleetUpdateRunStatusLastTimestampSeconds is a prometheus metric which holds the + // last update timestamp of update run status in seconds. + FleetUpdateRunStatusLastTimestampSeconds = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "fleet_workload_update_run_status_last_timestamp_seconds", + Help: "Last update timestamp of update run status in seconds", + }, []string{"name", "generation", "condition", "status", "reason"}) ) var ( diff --git a/pkg/propertyprovider/azure/controllers/pod.go b/pkg/propertyprovider/azure/controllers/pod.go index b19423bb3..a5b02d3e9 100644 --- a/pkg/propertyprovider/azure/controllers/pod.go +++ b/pkg/propertyprovider/azure/controllers/pod.go @@ -52,7 +52,10 @@ func (p *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R klog.V(2).InfoS("Reconciliation ends for pod objects in the Azure property provider", "pod", podRef, "latency", latency) }() - // Retrieve the pod object. + // Retrieve the pod object from cache. + // + // Note that the transform func has removed fields that are irrelevant to the pod watcher + // from the retrieved objects at this moment. pod := &corev1.Pod{} if err := p.Client.Get(ctx, req.NamespacedName, pod); err != nil { // Failed to get the pod object. @@ -86,8 +89,12 @@ func (p *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R // This behavior is consistent with how the Kubernetes CLI tool reports requested capacity // on a specific node (`kubectl describe node` command). // - // Note that the tracker will attempt to track the pod even if it has been marked for deletion. + // The tracker will attempt to track the pod even if it has been marked for deletion (when it + // is actually gone, the pod will be untracked). if len(pod.Spec.NodeName) > 0 && pod.Status.Phase != corev1.PodSucceeded && pod.Status.Phase != corev1.PodFailed { + // The pod watcher has field selectors enabled, which will not see pods that should not + // be tracked (e.g., pods that are not assigned to a node, or pods that are in terminal states). + // The check is added here for completeness reasons. klog.V(2).InfoS("Attempt to track the pod", "pod", podRef) p.PT.AddOrUpdate(pod) } else { diff --git a/pkg/propertyprovider/azure/provider.go b/pkg/propertyprovider/azure/provider.go index cc98dbe95..9ae1993eb 100644 --- a/pkg/propertyprovider/azure/provider.go +++ b/pkg/propertyprovider/azure/provider.go @@ -24,12 +24,14 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -88,8 +90,64 @@ var _ propertyprovider.PropertyProvider = &PropertyProvider{} func (p *PropertyProvider) Start(ctx context.Context, config *rest.Config) error { klog.V(2).Info("Starting Azure property provider") + podObj := client.Object(&corev1.Pod{}) mgr, err := ctrl.NewManager(config, ctrl.Options{ Scheme: scheme.Scheme, + Cache: cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + podObj: { + // Set up field selectors so that API server will not send out watch events that + // are not relevant to the pod watcher. This is essentially a trade-off between + // in-memory check overhead and encoding/transmission overhead; for large clusters + // with frequent pod creation/deletion ops, the trade-off seems to be worth it based + // on current experimentation results. + Field: fields.AndSelectors( + fields.OneTermNotEqualSelector("spec.nodeName", ""), + fields.OneTermNotEqualSelector("status.phase", string(corev1.PodSucceeded)), + fields.OneTermNotEqualSelector("status.phase", string(corev1.PodFailed)), + ), + // Drop irrelevant fields from the pod object; this can significantly reduce the + // CPU and memory usage of the pod watcher, as less data is stored in cache. + Transform: func(obj interface{}) (interface{}, error) { + pod, ok := obj.(*corev1.Pod) + if !ok { + return nil, fmt.Errorf("failed to cast object to a pod object") + } + + // The pod watcher only cares about a very limited set of pod fields, + // specifically the pod's current phase, node name, and resource requests. + + // Drop unused metadata fields. + pod.ObjectMeta.Labels = nil + pod.ObjectMeta.Annotations = nil + pod.ObjectMeta.OwnerReferences = nil + pod.ObjectMeta.ManagedFields = nil + + // Drop the rest of the pod status as they are irrelevant to the pod watcher. + pod.Status = corev1.PodStatus{ + Phase: pod.Status.Phase, + } + + // Drop the unwanted pod spec fields. + rebuiltedContainers := make([]corev1.Container, 0, len(pod.Spec.Containers)) + for idx := range pod.Spec.Containers { + c := pod.Spec.Containers[idx] + rebuiltedContainers = append(rebuiltedContainers, corev1.Container{ + Name: c.Name, + Image: c.Image, + Resources: c.Resources, + ResizePolicy: c.ResizePolicy, + }) + } + pod.Spec = corev1.PodSpec{ + NodeName: pod.Spec.NodeName, + Containers: rebuiltedContainers, + } + return pod, nil + }, + }, + }, + }, // Disable metric serving for the Azure property provider controller manager. // // Note that this will not stop the metrics from being collected and exported; as they diff --git a/pkg/scheduler/framework/framework.go b/pkg/scheduler/framework/framework.go index 98ed1ef0e..10bb5a601 100644 --- a/pkg/scheduler/framework/framework.go +++ b/pkg/scheduler/framework/framework.go @@ -93,7 +93,7 @@ type Handle interface { type Framework interface { Handle - // RunSchedulingCycleFor performs scheduling for a cluster resource placement, specifically + // RunSchedulingCycleFor performs scheduling for a resource placement, specifically // its associated latest scheduling policy snapshot. RunSchedulingCycleFor(ctx context.Context, placementKey queue.PlacementKey, policy placementv1beta1.PolicySnapshotObj) (result ctrl.Result, err error) } @@ -242,7 +242,7 @@ func (f *framework) ClusterEligibilityChecker() *clustereligibilitychecker.Clust return f.clusterEligibilityChecker } -// RunSchedulingCycleFor performs scheduling for a cluster resource placement +// RunSchedulingCycleFor performs scheduling for a resource placement // (more specifically, its associated scheduling policy snapshot). func (f *framework) RunSchedulingCycleFor(ctx context.Context, placementKey queue.PlacementKey, policy placementv1beta1.PolicySnapshotObj) (result ctrl.Result, err error) { startTime := time.Now() @@ -313,7 +313,7 @@ func (f *framework) RunSchedulingCycleFor(ctx context.Context, placementKey queu // result so that we won't have a ever increasing chain of flip flop bindings. bound, scheduled, obsolete, unscheduled, dangling, deleting := classifyBindings(policy, bindings, clusters) - // Remove scheduler CRB cleanup finalizer on all deleting bindings. + // Remove scheduler binding cleanup finalizer on all deleting bindings. if err := f.updateBindings(ctx, deleting, removeFinalizerAndUpdate); err != nil { klog.ErrorS(err, "Failed to remove finalizers from deleting bindings", "policySnapshot", policyRef) return ctrl.Result{}, err @@ -379,7 +379,7 @@ var markUnscheduledForAndUpdate = func(ctx context.Context, hubClient client.Cli return err } -// removeFinalizerAndUpdate removes scheduler CRB cleanup finalizer from binding and updates it. +// removeFinalizerAndUpdate removes scheduler binding cleanup finalizer from binding and updates it. var removeFinalizerAndUpdate = func(ctx context.Context, hubClient client.Client, binding placementv1beta1.BindingObj) error { controllerutil.RemoveFinalizer(binding, placementv1beta1.SchedulerBindingCleanupFinalizer) err := hubClient.Update(ctx, binding, &client.UpdateOptions{}) @@ -430,7 +430,7 @@ func (f *framework) runSchedulingCycleForPickAllPlacementType( // The scheduler always needs to take action when processing scheduling policies of the PickAll // placement type; enter the actual scheduling stages right away. - klog.V(2).InfoS("Scheduling is always needed for CRPs of the PickAll placement type; entering scheduling stages", "policySnapshot", policyRef) + klog.V(2).InfoS("Scheduling is always needed for placements of the PickAll placement type; entering scheduling stages", "policySnapshot", policyRef) // Run all plugins needed. // diff --git a/pkg/scheduler/framework/frameworkutils.go b/pkg/scheduler/framework/frameworkutils.go index 08c7f7d2c..eaa955923 100644 --- a/pkg/scheduler/framework/frameworkutils.go +++ b/pkg/scheduler/framework/frameworkutils.go @@ -67,7 +67,7 @@ func classifyBindings(policy placementv1beta1.PolicySnapshotObj, bindings []plac switch { case !binding.GetDeletionTimestamp().IsZero(): - // we need remove scheduler CRB cleanup finalizer from deleting bindings. + // we need remove scheduler binding cleanup finalizer from deleting bindings. deleting = append(deleting, binding) case bindingSpec.State == placementv1beta1.BindingStateUnscheduled: // we need to remember those bindings so that we will not create another one. @@ -503,7 +503,7 @@ func equalDecisions(current, desired []placementv1beta1.ClusterDecision) bool { // many scheduled or bound bindings it should remove. func shouldDownscale(policy placementv1beta1.PolicySnapshotObj, desired, present, obsolete int) (act bool, count int) { if policy.GetPolicySnapshotSpec().Policy.PlacementType == placementv1beta1.PickNPlacementType && desired <= present { - // Downscale only applies to CRPs of the Pick N placement type; and it only applies when the number of + // Downscale only applies to placements of the Pick N placement type; and it only applies when the number of // clusters requested by the user is less than the number of currently bound + scheduled bindings combined; // or there are the right number of bound + scheduled bindings, yet some obsolete bindings still linger // in the system. @@ -531,7 +531,7 @@ func sortByClusterScoreAndName(bindings []placementv1beta1.BindingObj) (sorted [ switch { case scoreA == nil && scoreB == nil: // Both bindings have no assigned cluster scores; normally this will never happen, - // as for CRPs of the PickN type, the scheduler will always assign cluster scores + // as for placements of the PickN type, the scheduler will always assign cluster scores // to bindings. // // In this case, compare their target cluster names instead. diff --git a/pkg/scheduler/queue/queue.go b/pkg/scheduler/queue/queue.go index cee4da8ca..552fd64fc 100644 --- a/pkg/scheduler/queue/queue.go +++ b/pkg/scheduler/queue/queue.go @@ -38,12 +38,12 @@ type PlacementSchedulingQueueWriter interface { // Add adds a PlacementKey to the work queue. // // Note that this bypasses the rate limiter. - Add(crpKey PlacementKey) + Add(placementKey PlacementKey) // AddRateLimited adds a PlacementKey to the work queue after the rate limiter (if any) // says that it is OK. - AddRateLimited(crpKey PlacementKey) + AddRateLimited(placementKey PlacementKey) // AddAfter adds a PlacementKey to the work queue after a set duration. - AddAfter(crpKey PlacementKey, duration time.Duration) + AddAfter(placementKey PlacementKey, duration time.Duration) } // PlacementSchedulingQueue is an interface which queues PlacementKeys for the scheduler @@ -61,9 +61,9 @@ type PlacementSchedulingQueue interface { // NextPlacementKey returns the next-in-line PlacementKey for the scheduler to consume. NextPlacementKey() (key PlacementKey, closed bool) // Done marks a PlacementKey as done. - Done(crpKey PlacementKey) + Done(placementKey PlacementKey) // Forget untracks a PlacementKey from rate limiter(s) (if any) set up with the queue. - Forget(crpKey PlacementKey) + Forget(placementKey PlacementKey) } // simplePlacementSchedulingQueue is a simple implementation of @@ -127,53 +127,53 @@ func (sq *simplePlacementSchedulingQueue) CloseWithDrain() { sq.active.ShutDownWithDrain() } -// NextPlacementKey returns the next ClusterResourcePlacementKey in the work queue for -// the scheduler to process. +// NextPlacementKey returns the next PlacementKey (either clusterResourcePlacementKey or resourcePlacementKey) +// in the work queue for the scheduler to process. // // Note that for now the queue simply wraps a work queue, and consider its state (whether it // is shut down or not) as its own closedness. In the future, when more queues are added, the // queue implementation must manage its own state. func (sq *simplePlacementSchedulingQueue) NextPlacementKey() (key PlacementKey, closed bool) { // This will block on a condition variable if the queue is empty. - crpKey, shutdown := sq.active.Get() + placementKey, shutdown := sq.active.Get() if shutdown { return "", true } - return crpKey.(PlacementKey), false + return placementKey.(PlacementKey), false } -// Done marks a ClusterResourcePlacementKey as done. -func (sq *simplePlacementSchedulingQueue) Done(crpKey PlacementKey) { - sq.active.Done(crpKey) +// Done marks a PlacementKey as done. +func (sq *simplePlacementSchedulingQueue) Done(placementKey PlacementKey) { + sq.active.Done(placementKey) } -// Add adds a ClusterResourcePlacementKey to the work queue. +// Add adds a PlacementKey to the work queue. // // Note that this bypasses the rate limiter (if any). -func (sq *simplePlacementSchedulingQueue) Add(crpKey PlacementKey) { - sq.active.Add(crpKey) +func (sq *simplePlacementSchedulingQueue) Add(placementKey PlacementKey) { + sq.active.Add(placementKey) } -// AddRateLimited adds a ClusterResourcePlacementKey to the work queue after the rate limiter (if any) +// AddRateLimited adds a PlacementKey to the work queue after the rate limiter (if any) // says that it is OK. -func (sq *simplePlacementSchedulingQueue) AddRateLimited(crpKey PlacementKey) { - sq.active.AddRateLimited(crpKey) +func (sq *simplePlacementSchedulingQueue) AddRateLimited(placementKey PlacementKey) { + sq.active.AddRateLimited(placementKey) } -// AddAfter adds a ClusterResourcePlacementKey to the work queue after a set duration. +// AddAfter adds a PlacementKey to the work queue after a set duration. // // Note that this bypasses the rate limiter (if any) -func (sq *simplePlacementSchedulingQueue) AddAfter(crpKey PlacementKey, duration time.Duration) { - sq.active.AddAfter(crpKey, duration) +func (sq *simplePlacementSchedulingQueue) AddAfter(placementKey PlacementKey, duration time.Duration) { + sq.active.AddAfter(placementKey, duration) } -// Forget untracks a ClusterResourcePlacementKey from rate limiter(s) (if any) set up with the queue. -func (sq *simplePlacementSchedulingQueue) Forget(crpKey PlacementKey) { - sq.active.Forget(crpKey) +// Forget untracks a PlacementKey from rate limiter(s) (if any) set up with the queue. +func (sq *simplePlacementSchedulingQueue) Forget(placementKey PlacementKey) { + sq.active.Forget(placementKey) } // NewSimplePlacementSchedulingQueue returns a -// simpleClusterResourcePlacementSchedulingQueue. +// simplePlacementSchedulingQueue. func NewSimplePlacementSchedulingQueue(opts ...Option) PlacementSchedulingQueue { options := defaultSimplePlacementSchedulingQueueOptions for _, opt := range opts { diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index f73a56b39..3ad1d685d 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -124,14 +124,13 @@ func (s *Scheduler) scheduleOnce(ctx context.Context, worker int) { defer metrics.SchedulerActiveWorkers.WithLabelValues().Add(-1) startTime := time.Now() - placementRef := klog.KRef("", string(placementKey)) - klog.V(2).InfoS("Schedule once started", "placement", placementRef, "worker", worker) + klog.V(2).InfoS("Schedule once started", "placement", placementKey, "worker", worker) defer func() { // Note that the time spent on pulling keys from the work queue (and the time spent on waiting // for a key to arrive) is not counted here, as we cannot reliably distinguish between // system processing latencies and actual duration of placement absence. latency := time.Since(startTime).Milliseconds() - klog.V(2).InfoS("Schedule once completed", "placement", placementRef, "latency", latency, "worker", worker) + klog.V(2).InfoS("Schedule once completed", "placement", placementKey, "latency", latency, "worker", worker) }() // Retrieve the placement object (either ClusterResourcePlacement or ResourcePlacement). @@ -143,18 +142,18 @@ func (s *Scheduler) scheduleOnce(ctx context.Context, worker int) { // has been marked for deletion but does not have the scheduler cleanup finalizer to // the work queue. Such placements needs no further processing any way though, as the absence // of the cleanup finalizer implies that bindings derived from the placement are no longer present. - klog.ErrorS(err, "placement is already deleted", "placement", placementRef) + klog.ErrorS(err, "placement is already deleted", "placement", placementKey) return } if errors.Is(err, controller.ErrUnexpectedBehavior) { // The placement is in an unexpected state; this is a scheduler-side error, and // Note that this is a scheduler-side error, so it does not return an error to the caller. // Raise an alert for it. - klog.ErrorS(err, "Placement is in an unexpected state", "placement", placementRef) + klog.ErrorS(err, "Placement is in an unexpected state", "placement", placementKey) return } // Wrap the error for metrics; this method does not return an error. - klog.ErrorS(controller.NewAPIServerError(true, err), "Failed to get placement", "placement", placementRef) + klog.ErrorS(controller.NewAPIServerError(true, err), "Failed to get placement", "placement", placementKey) // Requeue for later processing. s.queue.AddRateLimited(placementKey) @@ -163,10 +162,10 @@ func (s *Scheduler) scheduleOnce(ctx context.Context, worker int) { // Check if the placement has been marked for deletion, and if it has the scheduler cleanup finalizer. if placement.GetDeletionTimestamp() != nil { - // Use SchedulerCRPCleanupFinalizer consistently for all placement types + // Use SchedulerCleanupFinalizer consistently for all placement types if controllerutil.ContainsFinalizer(placement, fleetv1beta1.SchedulerCleanupFinalizer) { if err := s.cleanUpAllBindingsFor(ctx, placement); err != nil { - klog.ErrorS(err, "Failed to clean up all bindings for placement", "placement", placementRef) + klog.ErrorS(err, "Failed to clean up all bindings for placement", "placement", placementKey) if errors.Is(err, controller.ErrUnexpectedBehavior) { // The placement is in an unexpected state, don't requeue it. return @@ -189,7 +188,7 @@ func (s *Scheduler) scheduleOnce(ctx context.Context, worker int) { // Verify that it has an active policy snapshot. latestPolicySnapshot, err := s.lookupLatestPolicySnapshot(ctx, placement) if err != nil { - klog.ErrorS(err, "Failed to lookup latest policy snapshot", "placement", placementRef) + klog.ErrorS(err, "Failed to lookup latest policy snapshot", "placement", placementKey) // No requeue is needed; the scheduler will be triggered again when an active policy // snapshot is created. @@ -200,7 +199,7 @@ func (s *Scheduler) scheduleOnce(ctx context.Context, worker int) { // Add the scheduler cleanup finalizer to the placement (if it does not have one yet). if err := s.addSchedulerCleanUpFinalizer(ctx, placement); err != nil { - klog.ErrorS(err, "Failed to add scheduler cleanup finalizer", "placement", placementRef) + klog.ErrorS(err, "Failed to add scheduler cleanup finalizer", "placement", placementKey) // Requeue for later processing. s.queue.AddRateLimited(placementKey) return @@ -211,18 +210,18 @@ func (s *Scheduler) scheduleOnce(ctx context.Context, worker int) { // Note that the scheduler will enter this cycle as long as the placement is active and an active // policy snapshot has been produced. cycleStartTime := time.Now() - res, err := s.framework.RunSchedulingCycleFor(ctx, controller.GetObjectKeyFromObj(placement), latestPolicySnapshot) + res, err := s.framework.RunSchedulingCycleFor(ctx, placementKey, latestPolicySnapshot) if err != nil { if errors.Is(err, controller.ErrUnexpectedBehavior) { // The placement is in an unexpected state; this is a scheduler-side error, and // Note that this is a scheduler-side error, so it does not return an error to the caller. // Raise an alert for it. - klog.ErrorS(err, "Placement is in an unexpected state", "placement", placementRef) + klog.ErrorS(err, "Placement is in an unexpected state", "placement", placementKey) observeSchedulingCycleMetrics(cycleStartTime, true, false) return } // Requeue for later processing. - klog.ErrorS(err, "Failed to run scheduling cycle", "placement", placementRef) + klog.ErrorS(err, "Failed to run scheduling cycle", "placement", placementKey) s.queue.AddRateLimited(placementKey) observeSchedulingCycleMetrics(cycleStartTime, true, true) return @@ -310,7 +309,7 @@ func (s *Scheduler) cleanUpAllBindingsFor(ctx context.Context, placement fleetv1 return err } - // Remove scheduler CRB cleanup finalizer from deleting bindings. + // Remove scheduler binding cleanup finalizer from deleting bindings. // // Note that once a placement has been marked for deletion, it will no longer enter the scheduling cycle, // so any cleanup finalizer has to be removed here. @@ -335,7 +334,7 @@ func (s *Scheduler) cleanUpAllBindingsFor(ctx context.Context, placement fleetv1 } // All bindings have been deleted; remove the scheduler cleanup finalizer from the placement. - // Use SchedulerCRPCleanupFinalizer consistently for all placement types. + // Use SchedulerCleanupFinalizer consistently for all placement types. controllerutil.RemoveFinalizer(placement, fleetv1beta1.SchedulerCleanupFinalizer) if err := s.client.Update(ctx, placement); err != nil { klog.ErrorS(err, "Failed to remove scheduler cleanup finalizer from placement", "placement", placementRef) @@ -376,7 +375,7 @@ func (s *Scheduler) lookupLatestPolicySnapshot(ctx context.Context, placement fl case len(policySnapshots) == 0: // There is no latest policy snapshot associated with the placement; it could happen when // * the placement is newly created; or - // * the new policy snapshots is in the middle of being replaced. + // * the new policy snapshot is in the middle of being replaced. // // Either way, it is out of the scheduler's scope to handle such a case; the scheduler will // be triggered again if the situation is corrected. diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 91f375c2c..9af4ebdaa 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -69,7 +69,7 @@ func TestAddSchedulerCleanUpFinalizer(t *testing.T) { wantFinalizers []string }{ { - name: "cluster-scoped placement should add CRP finalizer", + name: "cluster-scoped placement should add scheduler cleanup finalizer", placement: func() fleetv1beta1.PlacementObj { return &fleetv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -80,7 +80,7 @@ func TestAddSchedulerCleanUpFinalizer(t *testing.T) { wantFinalizers: []string{fleetv1beta1.SchedulerCleanupFinalizer}, }, { - name: "namespaced placement should also add CRP finalizer", + name: "namespaced placement should also add scheduler cleanup finalizer", placement: func() fleetv1beta1.PlacementObj { return &fleetv1beta1.ResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -175,7 +175,7 @@ func TestCleanUpAllBindingsFor(t *testing.T) { wantRemainingBindings: []fleetv1beta1.BindingObj{}, }, { - name: "cluster-scoped placement cleanup without bindings but have CRP cleanup finalizer", + name: "cluster-scoped placement cleanup without bindings but have placement cleanup finalizer", placement: func() fleetv1beta1.PlacementObj { return &fleetv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -328,7 +328,7 @@ func TestCleanUpAllBindingsFor(t *testing.T) { cmpopts.SortSlices(func(b1, b2 fleetv1beta1.BindingObj) bool { return b1.GetName() < b2.GetName() })); diff != "" { - t.Errorf("Remaining bindings diff (+ got, - want): %s", diff) + t.Errorf("Remaining bindings diff (-got, +want): %s", diff) } }) } @@ -469,6 +469,89 @@ func TestLookupLatestPolicySnapshot(t *testing.T) { }, }, }, + { + name: "cluster-scoped placement should not select namespaced policy snapshot with same placement label", + placement: func() fleetv1beta1.PlacementObj { + return &fleetv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + Finalizers: []string{fleetv1beta1.SchedulerCleanupFinalizer}, + }, + } + }, + policySnapshots: []fleetv1beta1.PolicySnapshotObj{ + &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: crpName, + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + }, + }, + }, + &fleetv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespaced-policy-snapshot", + Namespace: "test-namespace", + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: crpName, + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + }, + }, + }, + }, + wantPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: crpName, + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + }, + }, + }, + }, + { + name: "namespaced placement should not select cluster-scoped policy snapshot with same placement label", + placement: func() fleetv1beta1.PlacementObj { + return &fleetv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rp", + Namespace: "test-namespace", + }, + } + }, + policySnapshots: []fleetv1beta1.PolicySnapshotObj{ + &fleetv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Namespace: "test-namespace", + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: "test-rp", + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + }, + }, + }, + &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-policy-snapshot", + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: "test-rp", + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + }, + }, + }, + }, + wantPolicySnapshot: &fleetv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Namespace: "test-namespace", + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: "test-rp", + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + }, + }, + }, + }, } for _, tc := range testCases { diff --git a/pkg/scheduler/watchers/clusterresourcebinding/controller_integration_test.go b/pkg/scheduler/watchers/binding/controller_integration_test.go similarity index 99% rename from pkg/scheduler/watchers/clusterresourcebinding/controller_integration_test.go rename to pkg/scheduler/watchers/binding/controller_integration_test.go index f6b499355..3703b972e 100644 --- a/pkg/scheduler/watchers/clusterresourcebinding/controller_integration_test.go +++ b/pkg/scheduler/watchers/binding/controller_integration_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourcebinding +package binding import ( "fmt" diff --git a/pkg/scheduler/watchers/clusterresourcebinding/suite_test.go b/pkg/scheduler/watchers/binding/suite_test.go similarity index 97% rename from pkg/scheduler/watchers/clusterresourcebinding/suite_test.go rename to pkg/scheduler/watchers/binding/suite_test.go index f64610c3e..157d5c9ea 100644 --- a/pkg/scheduler/watchers/clusterresourcebinding/suite_test.go +++ b/pkg/scheduler/watchers/binding/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourcebinding +package binding import ( "context" @@ -49,7 +49,7 @@ var ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Scheduler Source Cluster Resource Binding Controller Suite") + RunSpecs(t, "Scheduler Source Binding Controller Suite") } var _ = BeforeSuite(func() { diff --git a/pkg/scheduler/watchers/clusterresourcebinding/watcher.go b/pkg/scheduler/watchers/binding/watcher.go similarity index 99% rename from pkg/scheduler/watchers/clusterresourcebinding/watcher.go rename to pkg/scheduler/watchers/binding/watcher.go index 9127b971b..ae93a605d 100644 --- a/pkg/scheduler/watchers/clusterresourcebinding/watcher.go +++ b/pkg/scheduler/watchers/binding/watcher.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourcebinding +package binding import ( "context" diff --git a/pkg/scheduler/watchers/membercluster/suite_test.go b/pkg/scheduler/watchers/membercluster/suite_test.go index 3bdd19568..4ae107f5e 100644 --- a/pkg/scheduler/watchers/membercluster/suite_test.go +++ b/pkg/scheduler/watchers/membercluster/suite_test.go @@ -51,7 +51,7 @@ var ( ) var ( - defaultResourceSelectors = []placementv1beta1.ClusterResourceSelector{ + defaultResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ { Group: "core", Kind: "Namespace", @@ -190,6 +190,7 @@ var _ = BeforeSuite(func() { Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, ClusterEligibilityChecker: clustereligibilitychecker.New(), + EnableResourcePlacement: true, } err = reconciler.SetupWithManager(ctrlMgr) Expect(err).ToNot(HaveOccurred(), "Failed to set up controller with controller manager") diff --git a/pkg/scheduler/watchers/membercluster/utils.go b/pkg/scheduler/watchers/membercluster/utils.go index a95c330f2..7e682fbb5 100644 --- a/pkg/scheduler/watchers/membercluster/utils.go +++ b/pkg/scheduler/watchers/membercluster/utils.go @@ -18,56 +18,82 @@ package membercluster import ( "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/utils/condition" ) -// isCRPFullyScheduled returns whether a CRP is fully scheduled. -func isCRPFullyScheduled(crp *fleetv1beta1.ClusterResourcePlacement) bool { - // Check the scheduled condition on the CRP to determine if it is fully scheduled. +// isPlacementFullyScheduled returns whether a placement is fully scheduled. +func isPlacementFullyScheduled(placement fleetv1beta1.PlacementObj) bool { + // Check the scheduled condition on the placement to determine if it is fully scheduled. // // Here the controller checks the status rather than listing all the bindings and verify - // if the count matches with the CRP spec as the former approach has less overhead and + // if the count matches with the placement spec as the former approach has less overhead and // (more importantly) avoids leaking scheduler-specific logic into this controller. The - // trade-off is that the controller may consider some fully scheduled CRPs as not fully - // scheduled, if the CRP-side controller(s) cannot update the CRP status in a timely + // trade-off is that the controller may consider some fully scheduled placements as not fully + // scheduled, if the placement-side controller(s) cannot update the placement status in a timely // manner. - scheduledCondition := meta.FindStatusCondition(crp.Status.Conditions, string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType)) - // Check if the CRP is fully scheduled, or its scheduled condition is out of date. - return condition.IsConditionStatusTrue(scheduledCondition, crp.Generation) + var scheduledCondition *metav1.Condition + if placement.GetNamespace() == "" { + // Find CRP scheduled condition. + scheduledCondition = meta.FindStatusCondition(placement.GetPlacementStatus().Conditions, string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType)) + } else { + // Find RP scheduled condition. + scheduledCondition = meta.FindStatusCondition(placement.GetPlacementStatus().Conditions, string(fleetv1beta1.ResourcePlacementScheduledConditionType)) + } + // Check if the placement is fully scheduled, or its scheduled condition is out of date. + return condition.IsConditionStatusTrue(scheduledCondition, placement.GetGeneration()) } -// classifyCRPs returns a list of CRPs that are affected by cluster side changes in case 1a) and +// classifyPlacements returns a list of placements that are affected by cluster side changes in case 1a) and // 1b). -func classifyCRPs(crps []fleetv1beta1.ClusterResourcePlacement) (toProcess []fleetv1beta1.ClusterResourcePlacement) { +func classifyPlacements(placements []fleetv1beta1.PlacementObj) (toProcess []fleetv1beta1.PlacementObj) { // Pre-allocate array. - toProcess = make([]fleetv1beta1.ClusterResourcePlacement, 0, len(crps)) + toProcess = make([]fleetv1beta1.PlacementObj, 0, len(placements)) - for idx := range crps { - crp := crps[idx] + for idx := range placements { + placement := placements[idx] switch { - case crp.Spec.Policy == nil: - // CRPs with no placement policy specified are considered to be of the PickAll placement + case placement.GetPlacementSpec().Policy == nil: + // Placements with no placement policy specified are considered to be of the PickAll placement // type and are affected by cluster side changes in case 1a) and 1b). - toProcess = append(toProcess, crp) - case crp.Spec.Policy.PlacementType == fleetv1beta1.PickFixedPlacementType: - if !isCRPFullyScheduled(&crp) { - // Any CRP with an non-empty list of target cluster names can be affected by cluster + toProcess = append(toProcess, placement) + case placement.GetPlacementSpec().Policy.PlacementType == fleetv1beta1.PickFixedPlacementType: + if !isPlacementFullyScheduled(placement) { + // Any Placement with an non-empty list of target cluster names can be affected by cluster // side changes in case 1b), if it is not yet fully scheduled. - toProcess = append(toProcess, crp) + toProcess = append(toProcess, placement) } - case crp.Spec.Policy.PlacementType == fleetv1beta1.PickAllPlacementType: - // CRPs of the PickAll placement type are affected by cluster side changes in case 1a) + case placement.GetPlacementSpec().Policy.PlacementType == fleetv1beta1.PickAllPlacementType: + // Placements of the PickAll placement type are affected by cluster side changes in case 1a) // and 1b). - toProcess = append(toProcess, crp) - case !isCRPFullyScheduled(&crp): - // CRPs of the PickN placement type, which have not been fully scheduled, are affected + toProcess = append(toProcess, placement) + case !isPlacementFullyScheduled(placement): + // Placements of the PickN placement type, which have not been fully scheduled, are affected // by cluster side changes in case 1a) and 1b) listed in the Reconcile func. - toProcess = append(toProcess, crp) + toProcess = append(toProcess, placement) } } return toProcess } + +// convertCRPArrayToPlacementObjs converts a slice of ClusterResourcePlacement items to PlacementObj array. +func convertCRPArrayToPlacementObjs(crps []fleetv1beta1.ClusterResourcePlacement) []fleetv1beta1.PlacementObj { + placements := make([]fleetv1beta1.PlacementObj, len(crps)) + for i := range crps { + placements[i] = &crps[i] + } + return placements +} + +// convertRPArrayToPlacementObjs converts a slice of ResourcePlacement items to PlacementObj array. +func convertRPArrayToPlacementObjs(rps []fleetv1beta1.ResourcePlacement) []fleetv1beta1.PlacementObj { + placements := make([]fleetv1beta1.PlacementObj, len(rps)) + for i := range rps { + placements[i] = &rps[i] + } + return placements +} diff --git a/pkg/scheduler/watchers/membercluster/utils_test.go b/pkg/scheduler/watchers/membercluster/utils_test.go index 29d98ddda..0734ecb6e 100644 --- a/pkg/scheduler/watchers/membercluster/utils_test.go +++ b/pkg/scheduler/watchers/membercluster/utils_test.go @@ -26,31 +26,39 @@ import ( ) const ( - crpName = "test-crp" - clusterName1 = "bravelion" - clusterName2 = "jumpingcat" - crpName1 = "crp-1" - crpName2 = "crp-2" - crpName3 = "crp-3" - crpName4 = "crp-4" - crpName5 = "crp-5" - crpName6 = "crp-6" + crpName = "test-crp" + rpName = "test-rp" + clusterName1 = "bravelion" + clusterName2 = "jumpingcat" + crpName1 = "crp-1" + crpName2 = "crp-2" + crpName3 = "crp-3" + crpName4 = "crp-4" + crpName5 = "crp-5" + crpName6 = "crp-6" + rpName1 = "rp-1" + rpName2 = "rp-2" + rpName3 = "rp-3" + rpName4 = "rp-4" + rpName5 = "rp-5" + rpName6 = "rp-6" + testNamespace = "test-namespace" ) var ( numOfClusters = int32(10) ) -// TestIsCRPFullyScheduled tests the isCRPFullyScheduled function. -func TestIsPickNCRPFullyScheduled(t *testing.T) { +// TestIsPlacementFullyScheduled tests the isPlacementFullyScheduled function. +func TestIsPlacementFullyScheduled(t *testing.T) { testCases := []struct { - name string - crp *placementv1beta1.ClusterResourcePlacement - want bool + name string + placement placementv1beta1.PlacementObj + want bool }{ { - name: "no scheduled condition", - crp: &placementv1beta1.ClusterResourcePlacement{ + name: "no scheduled condition in CRP", + placement: &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -66,8 +74,8 @@ func TestIsPickNCRPFullyScheduled(t *testing.T) { }, }, { - name: "scheduled condition is false", - crp: &placementv1beta1.ClusterResourcePlacement{ + name: "scheduled condition is false in CRP", + placement: &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -88,8 +96,8 @@ func TestIsPickNCRPFullyScheduled(t *testing.T) { }, }, { - name: "scheduled condition is true, observed generation is out of date", - crp: &placementv1beta1.ClusterResourcePlacement{ + name: "scheduled condition is true, observed generation is out of date in CRP", + placement: &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, Generation: 1, @@ -112,8 +120,8 @@ func TestIsPickNCRPFullyScheduled(t *testing.T) { }, }, { - name: "fully scheduled", - crp: &placementv1beta1.ClusterResourcePlacement{ + name: "resourcePlacementScheduled condition is true in CRP (should not happen)", + placement: &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, Generation: 1, @@ -124,6 +132,122 @@ func TestIsPickNCRPFullyScheduled(t *testing.T) { NumberOfClusters: &numOfClusters, }, }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + { + name: "fully scheduled CRP", + placement: &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + want: true, + }, + { + name: "no scheduled condition in RP", + placement: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{}, + }, + }, + }, + { + name: "scheduled condition is false in RP", + placement: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionFalse, + }, + }, + }, + }, + }, + { + name: "scheduled condition is true, observed generation is out of date in RP", + placement: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 0, + }, + }, + }, + }, + }, + { + name: "clusterResourcePlacementScheduled condition is true in RP (should not happen)", + placement: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, Status: placementv1beta1.PlacementStatus{ Conditions: []metav1.Condition{ { @@ -134,38 +258,63 @@ func TestIsPickNCRPFullyScheduled(t *testing.T) { }, }, }, + }, + { + name: "fully scheduled RP", + placement: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, want: true, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - scheduled := isCRPFullyScheduled(tc.crp) + scheduled := isPlacementFullyScheduled(tc.placement) if scheduled != tc.want { - t.Errorf("isPickNCRPFullyScheduled() = %v, want %v", scheduled, tc.want) + t.Errorf("isPlacementFullyScheduled() = %v, want %v", scheduled, tc.want) } }) } } -// TestClassifyCRPs tests the classifyCRPs function. -func TestClassifyCRPs(t *testing.T) { +// TestClassifyPlacements tests the classifyPlacements function. +func TestClassifyPlacements(t *testing.T) { testCases := []struct { - name string - crps []placementv1beta1.ClusterResourcePlacement - want []placementv1beta1.ClusterResourcePlacement + name string + placements []placementv1beta1.PlacementObj + want []placementv1beta1.PlacementObj }{ { name: "single crp, no policy", - crps: []placementv1beta1.ClusterResourcePlacement{ - { + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, }, }, - want: []placementv1beta1.ClusterResourcePlacement{ - { + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -174,8 +323,8 @@ func TestClassifyCRPs(t *testing.T) { }, { name: "single crp, fixed list of clusters, not fully scheduled", - crps: []placementv1beta1.ClusterResourcePlacement{ - { + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -186,8 +335,8 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - want: []placementv1beta1.ClusterResourcePlacement{ - { + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -201,8 +350,8 @@ func TestClassifyCRPs(t *testing.T) { }, { name: "single crp, fixed list of clusters, fully scheduled", - crps: []placementv1beta1.ClusterResourcePlacement{ - { + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, Generation: 1, @@ -223,12 +372,12 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - want: []placementv1beta1.ClusterResourcePlacement{}, + want: []placementv1beta1.PlacementObj{}, }, { name: "single crp, pick all placement type", - crps: []placementv1beta1.ClusterResourcePlacement{ - { + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -239,8 +388,8 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - want: []placementv1beta1.ClusterResourcePlacement{ - { + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -254,8 +403,8 @@ func TestClassifyCRPs(t *testing.T) { }, { name: "single crp, pick N placement type, not fully scheduled", - crps: []placementv1beta1.ClusterResourcePlacement{ - { + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -267,8 +416,8 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - want: []placementv1beta1.ClusterResourcePlacement{ - { + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -283,8 +432,8 @@ func TestClassifyCRPs(t *testing.T) { }, { name: "single crp, pick N placement type, fully scheduled", - crps: []placementv1beta1.ClusterResourcePlacement{ - { + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, Generation: 1, @@ -306,12 +455,12 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - want: []placementv1beta1.ClusterResourcePlacement{}, + want: []placementv1beta1.PlacementObj{}, }, { - name: "mixed", - crps: []placementv1beta1.ClusterResourcePlacement{ - { + name: "mixed crps", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName2, }, @@ -321,12 +470,12 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - { + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName1, }, }, - { + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName5, Generation: 1, @@ -347,7 +496,7 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - { + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName4, }, @@ -357,7 +506,7 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - { + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName3, }, @@ -369,8 +518,8 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - want: []placementv1beta1.ClusterResourcePlacement{ - { + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName2, }, @@ -380,12 +529,12 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - { + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName1, }, }, - { + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName4, }, @@ -395,7 +544,7 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - { + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName3, }, @@ -408,13 +557,547 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - toProcess := classifyCRPs(tc.crps) - if diff := cmp.Diff(toProcess, tc.want); diff != "" { - t.Errorf("classifyCRPs() toProcess (-got, +want): %s", diff) + { + name: "single rp, no policy", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + }, + }, + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + }, + }, + }, + { + name: "single rp, fixed list of clusters, not fully scheduled", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1}, + }, + }, + }, + }, + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1}, + }, + }, + }, + }, + }, + { + name: "single rp, fixed list of clusters, fully scheduled", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1}, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + want: []placementv1beta1.PlacementObj{}, + }, + { + name: "single rp, pick all placement type, fully scheduled", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + }, + { + name: "single rp, pick N placement type, not fully scheduled", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + }, + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + }, + }, + { + name: "single rp, pick N placement type, fully scheduled", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + want: []placementv1beta1.PlacementObj{}, + }, + { + name: "mixed rps", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName2, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1}, + }, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName1, + Namespace: testNamespace, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName5, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName4, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName3, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + }, + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName2, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1}, + }, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName1, + Namespace: testNamespace, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName4, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName3, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + toProcess := classifyPlacements(tc.placements) + if diff := cmp.Diff(toProcess, tc.want); diff != "" { + t.Errorf("classifyPlacements() toProcess (-got, +want): %s", diff) + } + }) + } +} + +// TestConvertCRPArrayToPlacementObjs tests the convertCRPArrayToPlacementObjs function. +func TestConvertCRPArrayToPlacementObjs(t *testing.T) { + testCases := []struct { + name string + crps []placementv1beta1.ClusterResourcePlacement + wantPlacements []placementv1beta1.PlacementObj + }{ + { + name: "empty array", + crps: []placementv1beta1.ClusterResourcePlacement{}, + wantPlacements: []placementv1beta1.PlacementObj{}, + }, + { + name: "single crp", + crps: []placementv1beta1.ClusterResourcePlacement{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + }, + wantPlacements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + }, + }, + { + name: "multiple crps", + crps: []placementv1beta1.ClusterResourcePlacement{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName2, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName3, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1, clusterName2}, + }, + }, + }, + }, + wantPlacements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName2, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName3, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1, clusterName2}, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + placements := convertCRPArrayToPlacementObjs(tc.crps) + + if diff := cmp.Diff(placements, tc.wantPlacements); diff != "" { + t.Errorf("ConvertCRPArrayToPlacementObjs() diff (-got +want):\n%s", diff) + } + }) + } +} + +// TestConvertRPArrayToPlacementObjs tests the convertRPArrayToPlacementObjs function. +func TestConvertRPArrayToPlacementObjs(t *testing.T) { + testCases := []struct { + name string + rps []placementv1beta1.ResourcePlacement + wantPlacements []placementv1beta1.PlacementObj + }{ + { + name: "empty array", + rps: []placementv1beta1.ResourcePlacement{}, + wantPlacements: []placementv1beta1.PlacementObj{}, + }, + { + name: "single rp", + rps: []placementv1beta1.ResourcePlacement{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + }, + wantPlacements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + }, + }, + { + name: "multiple rps", + rps: []placementv1beta1.ResourcePlacement{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName2, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName3, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1, clusterName2}, + }, + }, + }, + }, + wantPlacements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName2, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName3, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1, clusterName2}, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + placements := convertRPArrayToPlacementObjs(tc.rps) + + if diff := cmp.Diff(placements, tc.wantPlacements); diff != "" { + t.Errorf("ConvertRPArrayToPlacementObjs() diff (-got +want):\n%s", diff) } }) } diff --git a/pkg/scheduler/watchers/membercluster/watcher.go b/pkg/scheduler/watchers/membercluster/watcher.go index 3b997bb21..2ceaff37a 100644 --- a/pkg/scheduler/watchers/membercluster/watcher.go +++ b/pkg/scheduler/watchers/membercluster/watcher.go @@ -49,6 +49,9 @@ type Reconciler struct { // clusterEligibilityCheck helps check if a cluster is eligible for resource replacement. ClusterEligibilityChecker *clustereligibilitychecker.ClusterEligibilityChecker + + // enableResourcePlacement indicates whether the resource placement controller is enabled. + EnableResourcePlacement bool } // Reconcile reconciles a member cluster. @@ -132,35 +135,43 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // Do nothing if there is no error returned. } - // List all CRPs. + // List all placements. // - // Note that this controller reads CRPs from the same cache as the scheduler. + // Note that this controller reads placements from the same cache as the scheduler. crpList := &placementv1beta1.ClusterResourcePlacementList{} if err := r.Client.List(ctx, crpList); err != nil { klog.ErrorS(err, "Failed to list CRPs", "memberCluster", memberClusterRef) return ctrl.Result{}, controller.NewAPIServerError(true, err) } + rpList := &placementv1beta1.ResourcePlacementList{} + if r.EnableResourcePlacement { + // Empty namespace provided to list RPs across all namespaces. + if err := r.Client.List(ctx, rpList, client.InNamespace("")); err != nil { + klog.ErrorS(err, "Failed to list RPs", "memberCluster", memberClusterRef) + return ctrl.Result{}, controller.NewAPIServerError(true, err) + } + } - crps := crpList.Items + placements := append(convertCRPArrayToPlacementObjs(crpList.Items), convertRPArrayToPlacementObjs(rpList.Items)...) if !isMemberClusterMissing && memberCluster.GetDeletionTimestamp().IsZero() { // If the member cluster is set to the left state, the scheduler needs to process all - // CRPs (case 2c)); otherwise, only CRPs of the PickAll type + CRPs of the PickN type, + // placements (case 2c)); otherwise, only placements of the PickAll type + placements of the PickN type, // which have not been fully scheduled, need to be processed (case 1a) and 1b)). - crps = classifyCRPs(crpList.Items) + placements = classifyPlacements(placements) } - // Enqueue the CRPs. + // Enqueue the placements. // - // Note that all the CRPs in the system are enqueued; technically speaking, for situation - // 1a), 1b) and 1c), PickN CRPs that have been fully scheduled needs no further processing, however, + // Note that all the placements in the system are enqueued; technically speaking, for situation + // 1a), 1b) and 1c), PickN placements that have been fully scheduled needs no further processing, however, // for simplicity reasons, this controller will not distinguish between the cases. - for idx := range crps { - crp := &crps[idx] + for idx := range placements { + placement := placements[idx] klog.V(2).InfoS( - "Enqueueing CRP for scheduler processing", + "Enqueueing placement for scheduler processing", "memberCluster", memberClusterRef, - "clusterResourcePlacement", klog.KObj(crp)) - r.SchedulerWorkQueue.Add(queue.PlacementKey(crp.Name)) + "placement", klog.KObj(placement)) + r.SchedulerWorkQueue.Add(controller.GetObjectKeyFromObj(placement)) } // The reconciliation loop completes. diff --git a/pkg/scheduler/watchers/clusterresourceplacement/controller_integration_test.go b/pkg/scheduler/watchers/placement/controller_integration_test.go similarity index 99% rename from pkg/scheduler/watchers/clusterresourceplacement/controller_integration_test.go rename to pkg/scheduler/watchers/placement/controller_integration_test.go index c78681f49..361af0333 100644 --- a/pkg/scheduler/watchers/clusterresourceplacement/controller_integration_test.go +++ b/pkg/scheduler/watchers/placement/controller_integration_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "fmt" @@ -48,7 +48,7 @@ const ( ) var ( - resourceSelectors = []fleetv1beta1.ClusterResourceSelector{ + resourceSelectors = []fleetv1beta1.ResourceSelectorTerm{ { Group: "core", Kind: "Namespace", diff --git a/pkg/scheduler/watchers/clusterresourceplacement/suite_test.go b/pkg/scheduler/watchers/placement/suite_test.go similarity index 97% rename from pkg/scheduler/watchers/clusterresourceplacement/suite_test.go rename to pkg/scheduler/watchers/placement/suite_test.go index b24f296df..64e039a88 100644 --- a/pkg/scheduler/watchers/clusterresourceplacement/suite_test.go +++ b/pkg/scheduler/watchers/placement/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "context" @@ -49,7 +49,7 @@ var ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Scheduler Source Cluster Resource Placement Controller Suite") + RunSpecs(t, "Scheduler Source Placement Controller Suite") } var _ = BeforeSuite(func() { diff --git a/pkg/scheduler/watchers/clusterresourceplacement/watcher.go b/pkg/scheduler/watchers/placement/watcher.go similarity index 97% rename from pkg/scheduler/watchers/clusterresourceplacement/watcher.go rename to pkg/scheduler/watchers/placement/watcher.go index 8e4ef71b2..a9b8d52ce 100644 --- a/pkg/scheduler/watchers/clusterresourceplacement/watcher.go +++ b/pkg/scheduler/watchers/placement/watcher.go @@ -14,9 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package clusterresourceplacement features a controller that enqueues placement objects for the +// Package placement features a controller that enqueues placement objects for the // scheduler to process where the placement object is marked for deletion. -package clusterresourceplacement +package placement import ( "context" diff --git a/pkg/scheduler/watchers/clusterschedulingpolicysnapshot/controller_integration_test.go b/pkg/scheduler/watchers/schedulingpolicysnapshot/controller_integration_test.go similarity index 99% rename from pkg/scheduler/watchers/clusterschedulingpolicysnapshot/controller_integration_test.go rename to pkg/scheduler/watchers/schedulingpolicysnapshot/controller_integration_test.go index 07d0679d0..05cd6c9a1 100644 --- a/pkg/scheduler/watchers/clusterschedulingpolicysnapshot/controller_integration_test.go +++ b/pkg/scheduler/watchers/schedulingpolicysnapshot/controller_integration_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterschedulingpolicysnapshot +package schedulingpolicysnapshot import ( "fmt" diff --git a/pkg/scheduler/watchers/clusterschedulingpolicysnapshot/suite_test.go b/pkg/scheduler/watchers/schedulingpolicysnapshot/suite_test.go similarity index 96% rename from pkg/scheduler/watchers/clusterschedulingpolicysnapshot/suite_test.go rename to pkg/scheduler/watchers/schedulingpolicysnapshot/suite_test.go index 08c6f2096..d7f88ae4c 100644 --- a/pkg/scheduler/watchers/clusterschedulingpolicysnapshot/suite_test.go +++ b/pkg/scheduler/watchers/schedulingpolicysnapshot/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterschedulingpolicysnapshot +package schedulingpolicysnapshot import ( "context" @@ -49,7 +49,7 @@ var ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Scheduler Source Cluster Scheduling Policy Snapshot Controller Suite") + RunSpecs(t, "Scheduler Source Scheduling Policy Snapshot Controller Suite") } var _ = BeforeSuite(func() { diff --git a/pkg/scheduler/watchers/clusterschedulingpolicysnapshot/watcher.go b/pkg/scheduler/watchers/schedulingpolicysnapshot/watcher.go similarity index 98% rename from pkg/scheduler/watchers/clusterschedulingpolicysnapshot/watcher.go rename to pkg/scheduler/watchers/schedulingpolicysnapshot/watcher.go index c2e2c6670..70a01eac4 100644 --- a/pkg/scheduler/watchers/clusterschedulingpolicysnapshot/watcher.go +++ b/pkg/scheduler/watchers/schedulingpolicysnapshot/watcher.go @@ -14,9 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package clusterschedulingpolicysnapshot features a controller that enqueues placement objects for the +// Package schedulingpolicysnapshot features a controller that enqueues placement objects for the // scheduler to process where there is a change in their scheduling policy snapshots. -package clusterschedulingpolicysnapshot +package schedulingpolicysnapshot import ( "context" diff --git a/pkg/utils/controller/metrics/metrics.go b/pkg/utils/controller/metrics/metrics.go index 853df00ab..d68ab3f5d 100644 --- a/pkg/utils/controller/metrics/metrics.go +++ b/pkg/utils/controller/metrics/metrics.go @@ -60,26 +60,6 @@ var ( Name: "fleet_workload_active_workers", Help: "Number of currently used workers per controller", }, []string{"controller"}) - - // FleetPlacementStatusLastTimeStampSeconds is a prometheus metric which keeps track of the last placement status. - FleetPlacementStatusLastTimeStampSeconds = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "fleet_workload_placement_status_last_timestamp_seconds", - Help: "Timestamp in seconds of the last current placement status condition of crp.", - }, []string{"name", "generation", "conditionType", "status", "reason"}) - - // FleetEvictionStatus is prometheus metrics which holds the - // status of eviction completion. - FleetEvictionStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "fleet_workload_eviction_complete", - Help: "Eviction complete status ", - }, []string{"name", "isCompleted", "isValid"}) - - // FleetUpdateRunStatusLastTimestampSeconds is a prometheus metric which holds the - // last update timestamp of update run status in seconds. - FleetUpdateRunStatusLastTimestampSeconds = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "fleet_workload_update_run_status_last_timestamp_seconds", - Help: "Last update timestamp of update run status in seconds", - }, []string{"name", "generation", "condition", "status", "reason"}) ) func init() { @@ -89,8 +69,5 @@ func init() { FleetReconcileTime, FleetWorkerCount, FleetActiveWorkers, - FleetPlacementStatusLastTimeStampSeconds, - FleetEvictionStatus, - FleetUpdateRunStatusLastTimestampSeconds, ) } diff --git a/pkg/utils/controller/placement_resolver.go b/pkg/utils/controller/placement_resolver.go index e37583771..1c6fb60c7 100644 --- a/pkg/utils/controller/placement_resolver.go +++ b/pkg/utils/controller/placement_resolver.go @@ -96,7 +96,11 @@ func GetObjectKeyFromNamespaceName(namespace, name string) string { // ExtractNamespaceNameFromKey resolves a PlacementKey to a (namespace, name) tuple of the placement object. func ExtractNamespaceNameFromKey(key queue.PlacementKey) (string, string, error) { - keyStr := string(key) + return ExtractNamespaceNameFromKeyStr(string(key)) +} + +// ExtractNamespaceNameFromKeyStr resolves a PlacementKey string to a (namespace, name) tuple of the placement object. +func ExtractNamespaceNameFromKeyStr(keyStr string) (string, string, error) { // Check if the key contains a namespace separator if strings.Contains(keyStr, namespaceSeparator) { // This is a namespaced ResourcePlacement diff --git a/pkg/utils/overrider/overrider_test.go b/pkg/utils/overrider/overrider_test.go index d2c4f89da..10d8ca80b 100644 --- a/pkg/utils/overrider/overrider_test.go +++ b/pkg/utils/overrider/overrider_test.go @@ -146,7 +146,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -213,7 +213,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -258,7 +258,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -322,7 +322,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -425,7 +425,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -445,7 +445,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -517,7 +517,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -651,7 +651,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { Placement: &placementv1beta1.PlacementRef{ Name: crpName, }, - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -671,7 +671,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -746,7 +746,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -844,7 +844,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -864,7 +864,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -930,7 +930,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -950,7 +950,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1075,7 +1075,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -1098,7 +1098,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { Placement: &placementv1beta1.PlacementRef{ Name: "other-placement", }, - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", diff --git a/pkg/utils/validator/clusterresourceoverride.go b/pkg/utils/validator/clusterresourceoverride.go index f3f3921d0..f4cec39af 100644 --- a/pkg/utils/validator/clusterresourceoverride.go +++ b/pkg/utils/validator/clusterresourceoverride.go @@ -51,7 +51,7 @@ func ValidateClusterResourceOverride(cro placementv1beta1.ClusterResourceOverrid // validateClusterResourceSelectors checks if override is selecting resource by name. func validateClusterResourceSelectors(cro placementv1beta1.ClusterResourceOverride) error { - selectorMap := make(map[placementv1beta1.ClusterResourceSelector]bool) + selectorMap := make(map[placementv1beta1.ResourceSelectorTerm]bool) allErr := make([]error, 0) for _, selector := range cro.Spec.ClusterResourceSelectors { // Check if the resource is not being selected by label selector @@ -79,7 +79,7 @@ func validateClusterResourceOverrideResourceLimit(cro placementv1beta1.ClusterRe if croList == nil || len(croList.Items) == 0 { return nil } - overrideMap := make(map[placementv1beta1.ClusterResourceSelector]string) + overrideMap := make(map[placementv1beta1.ResourceSelectorTerm]string) // Add overrides and its selectors to the map for _, override := range croList.Items { selectors := override.Spec.ClusterResourceSelectors diff --git a/pkg/utils/validator/clusterresourceoverride_test.go b/pkg/utils/validator/clusterresourceoverride_test.go index 8278d66aa..e9e97be9a 100644 --- a/pkg/utils/validator/clusterresourceoverride_test.go +++ b/pkg/utils/validator/clusterresourceoverride_test.go @@ -21,7 +21,7 @@ func TestValidateClusterResourceSelectors(t *testing.T) { "resource selected by label selector": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -40,7 +40,7 @@ func TestValidateClusterResourceSelectors(t *testing.T) { "resource selected by empty name": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -55,7 +55,7 @@ func TestValidateClusterResourceSelectors(t *testing.T) { "duplicate resources selected": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -72,12 +72,12 @@ func TestValidateClusterResourceSelectors(t *testing.T) { }, }, wantErrMsg: fmt.Errorf("resource selector %+v already exists, and must be unique", - placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "Kind", Name: "example"}), + placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "Kind", Name: "example"}), }, "resource selected by name": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -92,7 +92,7 @@ func TestValidateClusterResourceSelectors(t *testing.T) { "multiple invalid resources selected": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -124,9 +124,9 @@ func TestValidateClusterResourceSelectors(t *testing.T) { }, }, }, - wantErrMsg: apierrors.NewAggregate([]error{fmt.Errorf("label selector is not supported for resource selection %+v", placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "Kind", LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"key": "value"}}}), - fmt.Errorf("resource name is required for resource selection %+v", placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "Kind", Name: ""}), - fmt.Errorf("resource selector %+v already exists, and must be unique", placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "Kind", Name: "example"})}), + wantErrMsg: apierrors.NewAggregate([]error{fmt.Errorf("label selector is not supported for resource selection %+v", placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "Kind", LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"key": "value"}}}), + fmt.Errorf("resource name is required for resource selection %+v", placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "Kind", Name: ""}), + fmt.Errorf("resource selector %+v already exists, and must be unique", placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "Kind", Name: "example"})}), }, } for testName, tt := range tests { @@ -155,7 +155,7 @@ func TestValidateClusterResourceOverrideResourceLimit(t *testing.T) { Name: "override-1", }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -174,7 +174,7 @@ func TestValidateClusterResourceOverrideResourceLimit(t *testing.T) { Name: "override-2", }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -192,7 +192,7 @@ func TestValidateClusterResourceOverrideResourceLimit(t *testing.T) { }, overrideCount: 1, wantErrMsg: fmt.Errorf("invalid resource selector %+v: the resource has been selected by both %v and %v, which is not supported", - placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "kind", Name: "example-0"}, "override-2", "override-0"), + placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "kind", Name: "example-0"}, "override-2", "override-0"), }, "one override, which exists": { cro: placementv1beta1.ClusterResourceOverride{ @@ -200,7 +200,7 @@ func TestValidateClusterResourceOverrideResourceLimit(t *testing.T) { Name: "override-1", }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -219,7 +219,7 @@ func TestValidateClusterResourceOverrideResourceLimit(t *testing.T) { Name: "override-2", }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -242,7 +242,7 @@ func TestValidateClusterResourceOverrideResourceLimit(t *testing.T) { Name: fmt.Sprintf("override-%d", i), }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -311,7 +311,7 @@ func TestValidateClusterResourceOverride(t *testing.T) { "valid cluster resource override": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -328,7 +328,7 @@ func TestValidateClusterResourceOverride(t *testing.T) { "invalid cluster resource override - fail validateResourceSelector": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -357,9 +357,9 @@ func TestValidateClusterResourceOverride(t *testing.T) { }, croList: &placementv1beta1.ClusterResourceOverrideList{}, wantErrMsg: apierrors.NewAggregate([]error{fmt.Errorf("resource selector %+v already exists, and must be unique", - placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "kind", Name: "example"}), + placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "kind", Name: "example"}), fmt.Errorf("label selector is not supported for resource selection %+v", - placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "kind", + placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "kind", LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"key": "value"}}})}), }, "invalid cluster resource override - fail ValidateClusterResourceOverrideResourceLimit": { @@ -368,7 +368,7 @@ func TestValidateClusterResourceOverride(t *testing.T) { Name: "override-1", }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -384,7 +384,7 @@ func TestValidateClusterResourceOverride(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{Name: "override-0"}, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -398,12 +398,12 @@ func TestValidateClusterResourceOverride(t *testing.T) { }, }, wantErrMsg: fmt.Errorf("invalid resource selector %+v: the resource has been selected by both %v and %v, which is not supported", - placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "kind", Name: "duplicate-example"}, "override-1", "override-0"), + placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "kind", Name: "duplicate-example"}, "override-1", "override-0"), }, "valid cluster resource override - empty croList": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -420,7 +420,7 @@ func TestValidateClusterResourceOverride(t *testing.T) { "valid cluster resource override - croList nil": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", diff --git a/pkg/utils/validator/clusterresourceplacement_test.go b/pkg/utils/validator/clusterresourceplacement_test.go index bee65cd52..3e8b7d5d5 100644 --- a/pkg/utils/validator/clusterresourceplacement_test.go +++ b/pkg/utils/validator/clusterresourceplacement_test.go @@ -35,7 +35,7 @@ import ( var ( positiveNumberOfClusters int32 = 1 negativeNumberOfClusters int32 = -1 - resourceSelector = placementv1beta1.ClusterResourceSelector{ + resourceSelector = placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole", @@ -222,7 +222,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, }, @@ -239,7 +239,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { Name: "test-crp-with-very-long-name-field-exceeding-DNS1035LabelMaxLength", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, }, @@ -257,7 +257,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -285,7 +285,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -305,7 +305,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Kind: "Deployment", @@ -327,7 +327,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, }, }, resourceInformer: nil, diff --git a/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_mutating_webhook_test.go b/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_mutating_webhook_test.go index e99a81eb5..4f6f1e363 100644 --- a/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_mutating_webhook_test.go +++ b/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_mutating_webhook_test.go @@ -46,7 +46,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-no-revisionhistory", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -73,7 +73,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-no-policy", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, // Policy omitted Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -98,7 +98,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-no-strategy", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -112,7 +112,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-no-apply-strategy", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -134,7 +134,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-no-serverside-apply-config", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -162,7 +162,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-no-rolling-update-config", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, ClusterNames: []string{"cluster1", "cluster2"}, @@ -186,7 +186,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-no-toleration-operator", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, Tolerations: []placementv1beta1.Toleration{ @@ -215,7 +215,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-topology-spread-constraints", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, TopologySpreadConstraints: []placementv1beta1.TopologySpreadConstraint{ @@ -243,7 +243,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-all-fields", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(3)), @@ -298,7 +298,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-update-missing", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, // Policy change is immutable NumberOfClusters: ptr.To(int32(3)), @@ -313,7 +313,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-update-change-field", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(5)), // Changed from 3 to 5 diff --git a/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook_test.go b/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook_test.go index dbef757d9..a487f93a3 100644 --- a/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook_test.go +++ b/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook_test.go @@ -25,7 +25,7 @@ import ( ) var ( - resourceSelector = placementv1beta1.ClusterResourceSelector{ + resourceSelector = placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole", @@ -44,7 +44,7 @@ func TestHandle(t *testing.T) { Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -64,7 +64,7 @@ func TestHandle(t *testing.T) { Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -85,7 +85,7 @@ func TestHandle(t *testing.T) { Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -104,7 +104,7 @@ func TestHandle(t *testing.T) { Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -123,7 +123,7 @@ func TestHandle(t *testing.T) { Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, }, @@ -135,7 +135,7 @@ func TestHandle(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, Tolerations: []placementv1beta1.Toleration{ @@ -160,7 +160,7 @@ func TestHandle(t *testing.T) { Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -180,7 +180,7 @@ func TestHandle(t *testing.T) { Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -197,7 +197,7 @@ func TestHandle(t *testing.T) { Finalizers: []string{placementv1beta1.PlacementCleanupFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -216,7 +216,7 @@ func TestHandle(t *testing.T) { PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(2)), }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, }, diff --git a/pkg/webhook/clusterresourceplacementdisruptionbudget/clusterresourceplacementdisruptionbudget_validating_webhook_test.go b/pkg/webhook/clusterresourceplacementdisruptionbudget/clusterresourceplacementdisruptionbudget_validating_webhook_test.go index 960bc620f..e4407718f 100644 --- a/pkg/webhook/clusterresourceplacementdisruptionbudget/clusterresourceplacementdisruptionbudget_validating_webhook_test.go +++ b/pkg/webhook/clusterresourceplacementdisruptionbudget/clusterresourceplacementdisruptionbudget_validating_webhook_test.go @@ -121,7 +121,7 @@ func TestHandle(t *testing.T) { Name: "pick-all-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -132,7 +132,7 @@ func TestHandle(t *testing.T) { Name: "crp-pickn", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(1)), @@ -144,7 +144,7 @@ func TestHandle(t *testing.T) { Name: "crp-pickfixed", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, ClusterNames: []string{"cluster1", "cluster2"}, diff --git a/pkg/webhook/clusterresourceplacementeviction/clusterresourceplacementeviction_validating_webhook_test.go b/pkg/webhook/clusterresourceplacementeviction/clusterresourceplacementeviction_validating_webhook_test.go index 757740e20..cf10c078f 100644 --- a/pkg/webhook/clusterresourceplacementeviction/clusterresourceplacementeviction_validating_webhook_test.go +++ b/pkg/webhook/clusterresourceplacementeviction/clusterresourceplacementeviction_validating_webhook_test.go @@ -84,7 +84,7 @@ func TestHandle(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -99,7 +99,7 @@ func TestHandle(t *testing.T) { Finalizers: []string{placementv1beta1.PlacementCleanupFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -110,7 +110,7 @@ func TestHandle(t *testing.T) { Name: "crp-pickfixed", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, ClusterNames: []string{"cluster1", "cluster2"}, diff --git a/pkg/webhook/validation/uservalidation.go b/pkg/webhook/validation/uservalidation.go index 535532c13..cde0b38a5 100644 --- a/pkg/webhook/validation/uservalidation.go +++ b/pkg/webhook/validation/uservalidation.go @@ -18,6 +18,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" clusterv1beta1 "go.goms.io/fleet/apis/cluster/v1beta1" + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" fleetv1alpha1 "go.goms.io/fleet/apis/v1alpha1" "go.goms.io/fleet/pkg/utils" ) @@ -109,14 +110,11 @@ func ValidateFleetMemberClusterUpdate(currentMC, oldMC clusterv1beta1.MemberClus return admission.Denied(err.Error()) } - // Users are no longer allowed to modify labels of fleet member cluster through webhook. - // This will be disabled until member labels are accessible through CLI - if denyModifyMemberClusterLabels { - isLabelUpdated := isMapFieldUpdated(currentMC.GetLabels(), oldMC.GetLabels()) - if isLabelUpdated && !isUserInGroup(userInfo, mastersGroup) { - klog.V(2).InfoS(DeniedModifyMemberClusterLabels, "user", userInfo.Username, "groups", userInfo.Groups, "operation", req.Operation, "GVK", req.RequestKind, "subResource", req.SubResource, "namespacedName", namespacedName) - return admission.Denied(DeniedModifyMemberClusterLabels) - } + isLabelUpdated := isMapFieldUpdated(currentMC.GetLabels(), oldMC.GetLabels()) + if isLabelUpdated && !isUserInGroup(userInfo, mastersGroup) && shouldDenyLabelModification(currentMC.GetLabels(), oldMC.GetLabels(), denyModifyMemberClusterLabels) { + // allow any user to modify kubernetes-fleet.io/* labels, but restricts other label modifications given denyModifyMemberClusterLabels is true. + klog.V(2).InfoS(DeniedModifyMemberClusterLabels, "user", userInfo.Username, "groups", userInfo.Groups, "operation", req.Operation, "GVK", req.RequestKind, "subResource", req.SubResource, "namespacedName", namespacedName) + return admission.Denied(DeniedModifyMemberClusterLabels) } isAnnotationUpdated := isFleetAnnotationUpdated(currentMC.Annotations, oldMC.Annotations) @@ -179,6 +177,29 @@ func isUserInGroup(userInfo authenticationv1.UserInfo, groupName string) bool { return slices.Contains(userInfo.Groups, groupName) } +// shouldDenyLabelModification returns true if any labels (besides kubernetes-fleet.io/* labels) are being modified and denyModifyMemberClusterLabels is true. +func shouldDenyLabelModification(currentLabels, oldLabels map[string]string, denyModifyMemberClusterLabels bool) bool { + if !denyModifyMemberClusterLabels { + return false + } + for k, v := range currentLabels { + oldV, exists := oldLabels[k] + if !exists || oldV != v { + if !strings.HasPrefix(k, placementv1beta1.FleetPrefix) { + return true + } + } + } + for k := range oldLabels { + if _, exists := currentLabels[k]; !exists { + if !strings.HasPrefix(k, placementv1beta1.FleetPrefix) { + return true + } + } + } + return false +} + // isMemberClusterMapFieldUpdated return true if member cluster label is updated. func isMapFieldUpdated(currentMap, oldMap map[string]string) bool { return !reflect.DeepEqual(currentMap, oldMap) diff --git a/pkg/webhook/validation/uservalidation_test.go b/pkg/webhook/validation/uservalidation_test.go index c43a0688d..6ef1dcd8d 100644 --- a/pkg/webhook/validation/uservalidation_test.go +++ b/pkg/webhook/validation/uservalidation_test.go @@ -328,6 +328,106 @@ func TestValidateFleetMemberClusterUpdate(t *testing.T) { }, wantResponse: admission.Allowed(fmt.Sprintf(ResourceAllowedFormat, "nonSystemMastersUser", utils.GenerateGroupString([]string{"system:authenticated"}), admissionv1.Update, &utils.MCMetaGVK, "", types.NamespacedName{Name: "test-mc"})), }, + "allow label modification by any user for kubernetes-fleet.io/* labels": { + denyModifyMemberClusterLabels: true, + oldMC: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mc", + Labels: map[string]string{"kubernetes-fleet.io/some-label": "old-value"}, + Annotations: map[string]string{ + "fleet.azure.com/cluster-resource-id": "test-cluster-resource-id", + }, + }, + }, + newMC: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mc", + Labels: map[string]string{"kubernetes-fleet.io/some-label": "new-value"}, + Annotations: map[string]string{ + "fleet.azure.com/cluster-resource-id": "test-cluster-resource-id", + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Name: "test-mc", + UserInfo: authenticationv1.UserInfo{ + Username: "nonSystemMastersUser", + Groups: []string{"someGroup"}, + }, + RequestKind: &utils.MCMetaGVK, + Operation: admissionv1.Update, + }, + }, + wantResponse: admission.Allowed(fmt.Sprintf(ResourceAllowedFormat, "nonSystemMastersUser", utils.GenerateGroupString([]string{"someGroup"}), + admissionv1.Update, &utils.MCMetaGVK, "", types.NamespacedName{Name: "test-mc"})), + }, + "allow label creation by any user for kubernetes-fleet.io/* labels": { + denyModifyMemberClusterLabels: true, + oldMC: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mc", + Annotations: map[string]string{ + "fleet.azure.com/cluster-resource-id": "test-cluster-resource-id", + }, + }, + }, + newMC: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mc", + Labels: map[string]string{"kubernetes-fleet.io/some-label": "new-value"}, + Annotations: map[string]string{ + "fleet.azure.com/cluster-resource-id": "test-cluster-resource-id", + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Name: "test-mc", + UserInfo: authenticationv1.UserInfo{ + Username: "nonSystemMastersUser", + Groups: []string{"someGroup"}, + }, + RequestKind: &utils.MCMetaGVK, + Operation: admissionv1.Update, + }, + }, + wantResponse: admission.Allowed(fmt.Sprintf(ResourceAllowedFormat, "nonSystemMastersUser", utils.GenerateGroupString([]string{"someGroup"}), + admissionv1.Update, &utils.MCMetaGVK, "", types.NamespacedName{Name: "test-mc"})), + }, + "allow label deletion by any user for kubernetes-fleet.io/* labels": { + denyModifyMemberClusterLabels: true, + oldMC: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mc", + Labels: map[string]string{"kubernetes-fleet.io/some-label": "old-value"}, + Annotations: map[string]string{ + "fleet.azure.com/cluster-resource-id": "test-cluster-resource-id", + }, + }, + }, + newMC: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mc", + Annotations: map[string]string{ + "fleet.azure.com/cluster-resource-id": "test-cluster-resource-id", + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Name: "test-mc", + UserInfo: authenticationv1.UserInfo{ + Username: "nonSystemMastersUser", + Groups: []string{"someGroup"}, + }, + RequestKind: &utils.MCMetaGVK, + Operation: admissionv1.Update, + }, + }, + wantResponse: admission.Allowed(fmt.Sprintf(ResourceAllowedFormat, "nonSystemMastersUser", utils.GenerateGroupString([]string{"someGroup"}), + admissionv1.Update, &utils.MCMetaGVK, "", types.NamespacedName{Name: "test-mc"})), + }, } for testName, testCase := range testCases { diff --git a/test/apis/placement/v1beta1/api_validation_integration_test.go b/test/apis/placement/v1beta1/api_validation_integration_test.go index 9d4e0008a..f6ca7a495 100644 --- a/test/apis/placement/v1beta1/api_validation_integration_test.go +++ b/test/apis/placement/v1beta1/api_validation_integration_test.go @@ -27,7 +27,6 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" @@ -42,11 +41,84 @@ const ( invalidupdateRunStageNameTemplate = "stage012345678901234567890123456789012345678901234567890123456789%d%d" approveRequestNameTemplate = "test-approve-request-%d" crpNameTemplate = "test-crp-%d" + rpNameTemplate = "test-rp-%d" croNameTemplate = "test-cro-%d" roNameTemplate = "test-ro-%d" testNamespace = "test-ns" + unknownScope = "UnknownScope" ) +// createValidClusterResourceOverride creates a valid ClusterResourceOverride for testing purposes. +// The placement parameter is optional - pass nil for no placement reference. +func createValidClusterResourceOverride(name string, placement *placementv1beta1.PlacementRef) placementv1beta1.ClusterResourceOverride { + return placementv1beta1.ClusterResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: placementv1beta1.ClusterResourceOverrideSpec{ + Placement: placement, + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + }, + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + OverrideType: placementv1beta1.JSONPatchOverrideType, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/labels/test", + Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, + }, + }, + }, + }, + }, + }, + } +} + +// createValidResourceOverride creates a valid ResourceOverride for testing purposes. +// The placement parameter is optional - pass nil for no placement reference. +func createValidResourceOverride(namespace, name string, placement *placementv1beta1.PlacementRef) placementv1beta1.ResourceOverride { + return placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: placement, + ResourceSelectors: []placementv1beta1.ResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + }, + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + OverrideType: placementv1beta1.JSONPatchOverrideType, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/labels/test", + Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, + }, + }, + }, + }, + }, + }, + } +} + var _ = Describe("Test placement v1beta1 API validation", func() { Context("Test ClusterResourcePlacement API validation - invalid cases", func() { var crp placementv1beta1.ClusterResourcePlacement @@ -58,7 +130,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -80,7 +152,6 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ClusterResourcePlacement with nil policy", func() { - Expect(hubClient.Get(ctx, types.NamespacedName{Name: crpName}, &crp)).Should(Succeed(), "Get CRP call failed") crp.Spec.Policy = nil err := hubClient.Update(ctx, &crp) var statusErr *k8sErrors.StatusError @@ -89,7 +160,6 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ClusterResourcePlacement with different placement type", func() { - Expect(hubClient.Get(ctx, types.NamespacedName{Name: crpName}, &crp)).Should(Succeed(), "Get CRP call failed") crp.Spec.Policy.PlacementType = placementv1beta1.PickAllPlacementType err := hubClient.Update(ctx, &crp) var statusErr *k8sErrors.StatusError @@ -112,7 +182,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -132,7 +202,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -164,7 +234,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -202,7 +272,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -233,36 +303,91 @@ var _ = Describe("Test placement v1beta1 API validation", func() { } Expect(hubClient.Create(ctx, &crp)).Should(Succeed()) }) + + It("should allow creation of ClusterResourcePlacement with empty string as StatusReportingScope and multiple namespace selectors plus other cluster-scoped resources", func() { + crp = placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns-1", + }, + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns-2", + }, + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-cluster-role", + }, + { + Group: "", + Version: "v1", + Kind: "PersistentVolume", + Name: "test-pv", + }, + }, + StatusReportingScope: "", // defaults to ClusterScopeOnly. + }, + } + Expect(hubClient.Create(ctx, &crp)).Should(Succeed()) + }) }) Context("Test ClusterResourcePlacement StatusReportingScope validation - create, deny cases", func() { var crp placementv1beta1.ClusterResourcePlacement crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) - It("should deny creation of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible and multiple namespace selectors", func() { + It("should deny creation of ClusterResourcePlacement with Unknown StatusReportingScope and multiple namespace selectors", func() { crp = placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", Kind: "Namespace", Name: "test-ns-1", }, + }, + StatusReportingScope: unknownScope, // Invalid scope + }, + } + err := hubClient.Create(ctx, &crp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("supported values: \"ClusterScopeOnly\", \"NamespaceAccessible\"")) + }) + + It("should deny creation of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible and multiple namespace selectors", func() { + crp = placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", Kind: "Namespace", - Name: "test-ns-2", + Name: "test-ns-1", }, { - Group: "rbac.authorization.k8s.io", + Group: "", Version: "v1", - Kind: "ClusterRole", - Name: "test-cluster-role", + Kind: "Namespace", + Name: "test-ns-2", }, }, StatusReportingScope: placementv1beta1.NamespaceAccessible, @@ -280,7 +405,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -304,7 +429,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) }) - Context("Test ClusterResourcePlacement StatusReportingScope validation - update cases", func() { + Context("Test ClusterResourcePlacement ClusterScopeOnly StatusReportingScope validation - update cases", func() { var crp placementv1beta1.ClusterResourcePlacement crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) @@ -314,7 +439,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -322,6 +447,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: "test-ns-1", }, }, + // By default, StatusReportingScope is ClusterScopeOnly }, } Expect(hubClient.Create(ctx, &crp)).Should(Succeed()) @@ -331,63 +457,85 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Expect(hubClient.Delete(ctx, &crp)).Should(Succeed()) }) - It("should allow update of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible, one namespace selector", func() { - crp.Spec.StatusReportingScope = placementv1beta1.NamespaceAccessible + It("should allow empty string for StatusReportingScope in a ClusterResourcePlacement when StatusReportingScope is not set", func() { + Expect(crp.Spec.StatusReportingScope).To(Equal(placementv1beta1.ClusterScopeOnly), "CRP should have default StatusReportingScope ClusterScopeOnly") + crp.Spec.StatusReportingScope = "" // Empty string should default to ClusterScopeOnly Expect(hubClient.Update(ctx, &crp)).Should(Succeed()) + Expect(crp.Spec.StatusReportingScope).To(Equal(placementv1beta1.ClusterScopeOnly), "CRP should have default StatusReportingScope ClusterScopeOnly") }) - It("should allow update of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible, one namespace plus other cluster-scoped resources", func() { - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ClusterResourceSelector{ - { - Group: "rbac.authorization.k8s.io", - Version: "v1", - Kind: "ClusterRole", - Name: "test-cluster-role", - }, + It("should allow update of ClusterResourcePlacement which has default StatusReportingScope, multiple namespace resource selectors", func() { + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", - Kind: "PersistentVolume", - Name: "test-pv", + Kind: "Namespace", + Name: "test-ns-2", }, }...) - crp.Spec.StatusReportingScope = placementv1beta1.NamespaceAccessible Expect(hubClient.Update(ctx, &crp)).Should(Succeed()) }) - It("should allow update of ClusterResourcePlacement with StatusReportingScope ClusterScopeOnly, multiple namespace selectors", func() { - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ClusterResourceSelector{ + It("should allow update of ClusterResourcePlacement with StatusReportingScope ClusterScopeOnly, multiple namespace resource selectors", func() { + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", Kind: "Namespace", Name: "test-ns-2", }, - { - Group: "rbac.authorization.k8s.io", - Version: "v1", - Kind: "ClusterRole", - Name: "test-cluster-role", - }, - { - Group: "", - Version: "v1", - Kind: "PersistentVolume", - Name: "test-pv", - }, }...) crp.Spec.StatusReportingScope = placementv1beta1.ClusterScopeOnly Expect(hubClient.Update(ctx, &crp)).Should(Succeed()) }) - It("should allow update of ClusterResourcePlacement with default StatusReportingScope, multiple namespace selectors", func() { - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ClusterResourceSelector{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - Name: "test-ns-2", + It("should deny update of ClusterResourcePlacement StatusReportingScope to NamespaceAccessible due to immutability", func() { + crp.Spec.StatusReportingScope = placementv1beta1.NamespaceAccessible + err := hubClient.Update(ctx, &crp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("statusReportingScope is immutable")) + }) + + It("should deny update of ClusterResourcePlacement StatusReportingScope to unknown scope", func() { + crp.Spec.StatusReportingScope = unknownScope // Invalid scope + err := hubClient.Update(ctx, &crp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("supported values: \"ClusterScopeOnly\", \"NamespaceAccessible\"")) + }) + }) + + Context("Test ClusterResourcePlacement NamespaceAccessible StatusReportingScope validation - update cases", func() { + var crp placementv1beta1.ClusterResourcePlacement + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + + BeforeEach(func() { + crp = placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns-1", + }, + }, + StatusReportingScope: placementv1beta1.NamespaceAccessible, + }, + } + Expect(hubClient.Create(ctx, &crp)).Should(Succeed()) + }) + + AfterEach(func() { + Expect(hubClient.Delete(ctx, &crp)).Should(Succeed()) + }) + + It("should allow update of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible, one namespace plus other cluster-scoped resources", func() { + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -405,21 +553,14 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible and multiple namespace selectors", func() { - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", Kind: "Namespace", Name: "test-ns-2", }, - { - Group: "rbac.authorization.k8s.io", - Version: "v1", - Kind: "ClusterRole", - Name: "test-cluster-role", - }, }...) - crp.Spec.StatusReportingScope = placementv1beta1.NamespaceAccessible err := hubClient.Update(ctx, &crp) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) @@ -427,7 +568,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible, no namespace selectors", func() { - crp.Spec.ResourceSelectors = []placementv1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -441,12 +582,220 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: "test-pv", }, } - crp.Spec.StatusReportingScope = placementv1beta1.NamespaceAccessible err := hubClient.Update(ctx, &crp) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("when statusReportingScope is NamespaceAccessible, exactly one resourceSelector with kind 'Namespace' is required")) }) + + It("should deny update of ClusterResourcePlacement StatusReportingScope to ClusterScopeOnly due to immutability", func() { + crp.Spec.StatusReportingScope = placementv1beta1.ClusterScopeOnly + err := hubClient.Update(ctx, &crp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("statusReportingScope is immutable")) + }) + + It("should deny update of ClusterResourcePlacement StatusReportingScope to empty string", func() { + crp.Spec.StatusReportingScope = "" + err := hubClient.Update(ctx, &crp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("statusReportingScope is immutable")) + }) + + It("should deny update of ClusterResourcePlacement StatusReportingScope to unknown scope", func() { + crp.Spec.StatusReportingScope = unknownScope // Invalid scope + err := hubClient.Update(ctx, &crp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("supported values: \"ClusterScopeOnly\", \"NamespaceAccessible\"")) + }) + }) + + Context("Test ResourcePlacement StatusReportingScope validation, allow cases", func() { + var rp placementv1beta1.ResourcePlacement + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + + AfterEach(func() { + Expect(hubClient.Delete(ctx, &rp)).Should(Succeed()) + }) + + It("should allow creation of ResourcePlacement with StatusReportingScope NamespaceAccessible, with no namespace resource selected", func() { + rp = placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm-1", + }, + { + Group: "", + Version: "v1", + Kind: "Secret", + Name: "test-secret", + }, + }, + StatusReportingScope: placementv1beta1.NamespaceAccessible, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed()) + }) + + It("should allow creation of ResourcePlacement with StatusReportingScope ClusterScopeOnly", func() { + rp = placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + { + Group: "", + Version: "v1", + Kind: "Secret", + Name: "test-secret", + }, + }, + StatusReportingScope: placementv1beta1.ClusterScopeOnly, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed()) + }) + + It("should allow creation of ResourcePlacement with StatusReportingScope set to empty string", func() { + rp = placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + }, + StatusReportingScope: "", + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed()) + }) + + It("should allow creation of ResourcePlacement with StatusReportingScope not specified", func() { + rp = placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + }, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed()) + }) + + It("should allow update of ResourcePlacement StatusReportingScope, no immutability constraint", func() { + rp = placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + }, + StatusReportingScope: placementv1beta1.ClusterScopeOnly, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed()) + rp.Spec.StatusReportingScope = placementv1beta1.NamespaceAccessible + Expect(hubClient.Update(ctx, &rp)).Should(Succeed()) + }) + }) + + Context("Test ResourcePlacement StatusReportingScope validation, deny cases", func() { + var rp placementv1beta1.ResourcePlacement + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + + It("should deny creation of ResourcePlacement with Unknown StatusReportingScope value", func() { + rp = placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + }, + StatusReportingScope: unknownScope, // Invalid scope + }, + } + err := hubClient.Create(ctx, &rp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create RP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("supported values: \"ClusterScopeOnly\", \"NamespaceAccessible\"")) + }) + + It("should deny update of ResourcePlacement StatusReportingScope to unknown scope due to enum validation", func() { + rp = placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + }, + StatusReportingScope: placementv1beta1.ClusterScopeOnly, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed()) + rp.Spec.StatusReportingScope = unknownScope // Invalid scope - should fail due to enum validation + err := hubClient.Update(ctx, &rp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update RP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("supported values: \"ClusterScopeOnly\", \"NamespaceAccessible\"")) + + // Cleanup after the test. + Expect(hubClient.Delete(ctx, &rp)).Should(Succeed()) + }) }) Context("Test ClusterPlacementDisruptionBudget API validation - valid cases", func() { @@ -1036,110 +1385,33 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Context("Test ClusterResourceOverride API validation - valid cases", func() { It("should allow creation of ClusterResourceOverride without placement reference", func() { - cro := placementv1beta1.ClusterResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), - }, - Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, - }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, - }, - }, - } + cro := createValidClusterResourceOverride( + fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), + nil, + ) Expect(hubClient.Create(ctx, &cro)).Should(Succeed()) Expect(hubClient.Delete(ctx, &cro)).Should(Succeed()) }) It("should allow creation of ClusterResourceOverride with cluster-scoped placement reference", func() { - cro := placementv1beta1.ClusterResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), - }, - Spec: placementv1beta1.ClusterResourceOverrideSpec{ - Placement: &placementv1beta1.PlacementRef{ - Name: "test-placement", - Scope: placementv1beta1.ClusterScoped, - }, - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, - }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, - }, + cro := createValidClusterResourceOverride( + fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), + &placementv1beta1.PlacementRef{ + Name: "test-placement", + Scope: placementv1beta1.ClusterScoped, }, - } + ) Expect(hubClient.Create(ctx, &cro)).Should(Succeed()) Expect(hubClient.Delete(ctx, &cro)).Should(Succeed()) }) It("should allow creation of ClusterResourceOverride without specifying scope in placement reference", func() { - cro := placementv1beta1.ClusterResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), - }, - Spec: placementv1beta1.ClusterResourceOverrideSpec{ - Placement: &placementv1beta1.PlacementRef{ - Name: "test-placement", - }, - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, - }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, - }, + cro := createValidClusterResourceOverride( + fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), + &placementv1beta1.PlacementRef{ + Name: "test-placement", }, - } + ) Expect(hubClient.Create(ctx, &cro)).Should(Succeed()) Expect(hubClient.Delete(ctx, &cro)).Should(Succeed()) }) @@ -1147,196 +1419,218 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Context("Test ClusterResourceOverride API validation - invalid cases", func() { It("should deny creation of ClusterResourceOverride with namespaced placement reference", func() { - cro := placementv1beta1.ClusterResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), + cro := createValidClusterResourceOverride( + fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), + &placementv1beta1.PlacementRef{ + Name: "test-placement", + Scope: placementv1beta1.NamespaceScoped, }, - Spec: placementv1beta1.ClusterResourceOverrideSpec{ - Placement: &placementv1beta1.PlacementRef{ - Name: "test-placement", - Scope: placementv1beta1.NamespaceScoped, - }, - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, - }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, - }, - }, - } + ) err := hubClient.Create(ctx, &cro) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("clusterResourceOverride placement reference cannot be Namespaced scope")) }) + + Context("Test ClusterResourceOverride API validation - placement update invalid cases", func() { + var cro placementv1beta1.ClusterResourceOverride + croName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) + + BeforeEach(func() { + cro = createValidClusterResourceOverride( + croName, + &placementv1beta1.PlacementRef{ + Name: "test-placement", + Scope: placementv1beta1.ClusterScoped, + }, + ) + Expect(hubClient.Create(ctx, &cro)).Should(Succeed()) + }) + + AfterEach(func() { + Expect(hubClient.Delete(ctx, &cro)).Should(Succeed()) + }) + + It("should deny update of ClusterResourceOverride placement name", func() { + cro.Spec.Placement.Name = "different-placement" + err := hubClient.Update(ctx, &cro) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + }) + + It("should deny update of ClusterResourceOverride placement scope", func() { + cro.Spec.Placement.Scope = placementv1beta1.NamespaceScoped + err := hubClient.Update(ctx, &cro) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(ContainSubstring("placement reference cannot be Namespaced scope")) + }) + + It("should deny update of ClusterResourceOverride placement from nil to non-nil", func() { + croWithoutPlacement := createValidClusterResourceOverride( + fmt.Sprintf(croNameTemplate, GinkgoParallelProcess())+"-nil", + nil, + ) + Expect(hubClient.Create(ctx, &croWithoutPlacement)).Should(Succeed()) + + croWithoutPlacement.Spec.Placement = &placementv1beta1.PlacementRef{ + Name: "new-placement", + Scope: placementv1beta1.ClusterScoped, + } + err := hubClient.Update(ctx, &croWithoutPlacement) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + + Expect(hubClient.Delete(ctx, &croWithoutPlacement)).Should(Succeed()) + }) + + It("should deny update of ClusterResourceOverride placement from non-nil to nil", func() { + cro.Spec.Placement = nil + err := hubClient.Update(ctx, &cro) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + }) + }) }) Context("Test ResourceOverride API validation - valid cases", func() { It("should allow creation of ResourceOverride without placement reference", func() { - ro := placementv1beta1.ResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNamespace, - Name: fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), - }, - Spec: placementv1beta1.ResourceOverrideSpec{ - ResourceSelectors: []placementv1beta1.ResourceSelector{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, - }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, - }, - }, - } + ro := createValidResourceOverride( + testNamespace, + fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), + nil, + ) Expect(hubClient.Create(ctx, &ro)).Should(Succeed()) Expect(hubClient.Delete(ctx, &ro)).Should(Succeed()) }) It("should allow creation of ResourceOverride with cluster-scoped placement reference", func() { - ro := placementv1beta1.ResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNamespace, - Name: fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), - }, - Spec: placementv1beta1.ResourceOverrideSpec{ - Placement: &placementv1beta1.PlacementRef{ - Name: "test-placement", - Scope: placementv1beta1.ClusterScoped, - }, - ResourceSelectors: []placementv1beta1.ResourceSelector{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, - }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, - }, - }, - } + ro := createValidResourceOverride( + testNamespace, + fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), + &placementv1beta1.PlacementRef{ + Name: "test-placement", + Scope: placementv1beta1.ClusterScoped, + }, + ) Expect(hubClient.Create(ctx, &ro)).Should(Succeed()) Expect(hubClient.Delete(ctx, &ro)).Should(Succeed()) }) It("should allow creation of ResourceOverride without specifying scope in placement reference", func() { - ro := placementv1beta1.ResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNamespace, - Name: fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), - }, - Spec: placementv1beta1.ResourceOverrideSpec{ - Placement: &placementv1beta1.PlacementRef{ - Name: "test-placement", - }, - ResourceSelectors: []placementv1beta1.ResourceSelector{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, - }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, - }, + ro := createValidResourceOverride( + testNamespace, + fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), + &placementv1beta1.PlacementRef{ + Name: "test-placement", }, - } + ) Expect(hubClient.Create(ctx, &ro)).Should(Succeed()) Expect(hubClient.Delete(ctx, &ro)).Should(Succeed()) }) It("should allow creation of ResourceOverride with namespace-scoped placement reference", func() { - ro := placementv1beta1.ResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNamespace, - Name: fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), - }, - Spec: placementv1beta1.ResourceOverrideSpec{ - Placement: &placementv1beta1.PlacementRef{ + ro := createValidResourceOverride( + testNamespace, + fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), + &placementv1beta1.PlacementRef{ + Name: "test-placement", + Scope: placementv1beta1.NamespaceScoped, + }, + ) + Expect(hubClient.Create(ctx, &ro)).Should(Succeed()) + Expect(hubClient.Delete(ctx, &ro)).Should(Succeed()) + }) + }) + + Context("Test ResourceOverride API validation - invalid cases", func() { + + Context("Test ResourceOverride API validation - placement update invalid cases", func() { + var ro placementv1beta1.ResourceOverride + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + + BeforeEach(func() { + ro = createValidResourceOverride( + testNamespace, + roName, + &placementv1beta1.PlacementRef{ Name: "test-placement", - Scope: placementv1beta1.NamespaceScoped, - }, - ResourceSelectors: []placementv1beta1.ResourceSelector{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, + Scope: placementv1beta1.ClusterScoped, }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, + ) + Expect(hubClient.Create(ctx, &ro)).Should(Succeed()) + }) + + AfterEach(func() { + Expect(hubClient.Delete(ctx, &ro)).Should(Succeed()) + }) + + It("should deny update of ResourceOverride placement name", func() { + ro.Spec.Placement.Name = "different-placement" + err := hubClient.Update(ctx, &ro) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + }) + + It("should deny update of ResourceOverride placement from nil to non-nil", func() { + roWithoutPlacement := createValidResourceOverride( + testNamespace, + fmt.Sprintf(roNameTemplate, GinkgoParallelProcess())+"-nil", + nil, + ) + Expect(hubClient.Create(ctx, &roWithoutPlacement)).Should(Succeed()) + + roWithoutPlacement.Spec.Placement = &placementv1beta1.PlacementRef{ + Name: "new-placement", + Scope: placementv1beta1.ClusterScoped, + } + err := hubClient.Update(ctx, &roWithoutPlacement) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + + Expect(hubClient.Delete(ctx, &roWithoutPlacement)).Should(Succeed()) + }) + + It("should deny update of ResourceOverride placement from non-nil to nil", func() { + ro.Spec.Placement = nil + err := hubClient.Update(ctx, &ro) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + }) + + It("should deny update of ResourceOverride placement from cluster-scoped to namespace-scoped", func() { + ro.Spec.Placement.Scope = placementv1beta1.NamespaceScoped + err := hubClient.Update(ctx, &ro) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + }) + + It("should deny update of ResourceOverride placement from namespace-scoped to cluster-scoped", func() { + roWithNamespaceScope := createValidResourceOverride( + testNamespace, + fmt.Sprintf(roNameTemplate, GinkgoParallelProcess())+"-ns", + &placementv1beta1.PlacementRef{ + Name: "test-placement", + Scope: placementv1beta1.NamespaceScoped, }, - }, - } - Expect(hubClient.Create(ctx, &ro)).Should(Succeed()) - Expect(hubClient.Delete(ctx, &ro)).Should(Succeed()) + ) + Expect(hubClient.Create(ctx, &roWithNamespaceScope)).Should(Succeed()) + + roWithNamespaceScope.Spec.Placement.Scope = placementv1beta1.ClusterScoped + err := hubClient.Update(ctx, &roWithNamespaceScope) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + + Expect(hubClient.Delete(ctx, &roWithNamespaceScope)).Should(Succeed()) + }) }) }) }) diff --git a/test/e2e/README.md b/test/e2e/README.md index 01e204a58..883185a2f 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -35,16 +35,21 @@ test suites, follow the steps below: ginkgo --label-filter="!custom" -v -p . ``` - or run the custom configuration e2e tests with the following command + or run the custom configuration e2e tests with the following command: ```sh ginkgo --label-filter="custom" -v -p . ``` - or run tests involving member cluster join/leave scenarios with the following command (serially) + or run tests involving member cluster join/leave scenarios with the following command (serially): ```sh ginkgo --label-filter="joinleave" -v . ``` + or run tests related to resourcePlacement (rp) only with the following command: + ```sh + ginkgo --label-filter="resourceplacement" -v -p . + ``` + or create a launch.json in your vscode workspace. ```yaml { diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index c324dd168..a9600cb64 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -185,6 +185,254 @@ func workNamespacePlacedOnClusterActual(cluster *framework.Cluster) func() error } } +func placementRolloutCompletedConditions(placementKey types.NamespacedName, generation int64, hasOverride bool) []metav1.Condition { + if placementKey.Namespace == "" { + return crpRolloutCompletedConditions(generation, hasOverride) + } else { + return rpRolloutCompletedConditions(generation, hasOverride) + } +} + +func placementScheduledConditions(placementKey types.NamespacedName, generation int64) []metav1.Condition { + if placementKey.Namespace == "" { + return crpScheduledConditions(generation) + } else { + return rpScheduledConditions(generation) + } +} + +func placementSchedulePartiallyFailedConditions(placementKey types.NamespacedName, generation int64) []metav1.Condition { + if placementKey.Namespace == "" { + return crpSchedulePartiallyFailedConditions(generation) + } else { + return rpSchedulePartiallyFailedConditions(generation) + } +} + +func placementScheduleFailedConditions(placementKey types.NamespacedName, generation int64) []metav1.Condition { + if placementKey.Namespace == "" { + return crpScheduleFailedConditions(generation) + } else { + return rpScheduleFailedConditions(generation) + } +} + +func placementOverrideFailedConditions(placementKey types.NamespacedName, generation int64) []metav1.Condition { + if placementKey.Namespace == "" { + return crpOverrideFailedConditions(generation) + } else { + return rpOverrideFailedConditions(generation) + } +} + +func placementWorkSynchronizedFailedConditions(placementKey types.NamespacedName, generation int64, hasOverrides bool) []metav1.Condition { + if placementKey.Namespace == "" { + return crpWorkSynchronizedFailedConditions(generation, hasOverrides) + } else { + return rpWorkSynchronizedFailedConditions(generation, hasOverrides) + } +} + +func placementRolloutStuckConditions(placementKey types.NamespacedName, generation int64) []metav1.Condition { + if placementKey.Namespace == "" { + return crpRolloutStuckConditions(generation) + } else { + return rpRolloutStuckConditions(generation) + } +} + +func rpRolloutCompletedConditions(generation int64, hasOverride bool) []metav1.Condition { + overrideConditionReason := condition.OverrideNotSpecifiedReason + if hasOverride { + overrideConditionReason = condition.OverriddenSucceededReason + } + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: overrideConditionReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementWorkSynchronizedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementAppliedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.ApplySucceededReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementAvailableConditionType), + Status: metav1.ConditionTrue, + Reason: condition.AvailableReason, + ObservedGeneration: generation, + }, + } +} + +func rpSchedulePartiallyFailedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionFalse, + ObservedGeneration: generation, + Reason: scheduler.NotFullyScheduledReason, + }, + { + Type: string(placementv1beta1.ResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementWorkSynchronizedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementAppliedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.ApplySucceededReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementAvailableConditionType), + Status: metav1.ConditionTrue, + Reason: condition.AvailableReason, + ObservedGeneration: generation, + }, + } +} + +func rpScheduleFailedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionFalse, + ObservedGeneration: generation, + Reason: scheduler.NotFullyScheduledReason, + }, + } +} + +func rpScheduledConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + } +} + +func rpRolloutStuckConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: generation, + Reason: scheduler.FullyScheduledReason, + }, + { + Type: string(placementv1beta1.ResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionFalse, + Reason: condition.RolloutNotStartedYetReason, + ObservedGeneration: generation, + }, + } +} + +func rpOverrideFailedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementOverriddenConditionType), + Status: metav1.ConditionFalse, + Reason: condition.OverriddenFailedReason, + ObservedGeneration: generation, + }, + } +} + +func rpWorkSynchronizedFailedConditions(generation int64, hasOverrides bool) []metav1.Condition { + overridenCondReason := condition.OverrideNotSpecifiedReason + if hasOverrides { + overridenCondReason = condition.OverriddenSucceededReason + } + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: overridenCondReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementWorkSynchronizedConditionType), + Status: metav1.ConditionFalse, + Reason: condition.WorkNotSynchronizedYetReason, + ObservedGeneration: generation, + }, + } +} + +func crpScheduledConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: generation, + Reason: scheduler.FullyScheduledReason, + }, + } +} + func crpScheduleFailedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { @@ -390,6 +638,45 @@ func crpDiffReportedConditions(generation int64, hasOverride bool) []metav1.Cond } } +func crpDiffReportingFailedConditions(generation int64, hasOverride bool) []metav1.Condition { + overrideConditionReason := condition.OverrideNotSpecifiedReason + if hasOverride { + overrideConditionReason = condition.OverriddenSucceededReason + } + return []metav1.Condition{ + { + Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: overrideConditionReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementDiffReportedConditionType), + Status: metav1.ConditionFalse, + Reason: condition.DiffReportedStatusFalseReason, + ObservedGeneration: generation, + }, + } +} + func crpRolloutCompletedConditions(generation int64, hasOverride bool) []metav1.Condition { overrideConditionReason := condition.OverrideNotSpecifiedReason if hasOverride { @@ -435,7 +722,63 @@ func crpRolloutCompletedConditions(generation int64, hasOverride bool) []metav1. } } -func resourcePlacementSyncPendingConditions(generation int64) []metav1.Condition { +func crpWorkSynchronizedFailedConditions(generation int64, hasOverrides bool) []metav1.Condition { + overridenCondReason := condition.OverrideNotSpecifiedReason + if hasOverrides { + overridenCondReason = condition.OverriddenSucceededReason + } + return []metav1.Condition{ + { + Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: overridenCondReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Status: metav1.ConditionFalse, + Reason: condition.WorkNotSynchronizedYetReason, + ObservedGeneration: generation, + }, + } +} + +func crpOverrideFailedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementOverriddenConditionType), + Status: metav1.ConditionFalse, + Reason: condition.OverriddenFailedReason, + ObservedGeneration: generation, + }, + } +} + +func perClusterSyncPendingConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { Type: string(placementv1beta1.PerClusterScheduledConditionType), @@ -452,7 +795,7 @@ func resourcePlacementSyncPendingConditions(generation int64) []metav1.Condition } } -func resourcePlacementRolloutUnknownConditions(generation int64) []metav1.Condition { +func perClusterRolloutUnknownConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { Type: string(placementv1beta1.PerClusterScheduledConditionType), @@ -469,7 +812,7 @@ func resourcePlacementRolloutUnknownConditions(generation int64) []metav1.Condit } } -func resourcePlacementApplyFailedConditions(generation int64) []metav1.Condition { +func perClusterApplyFailedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { Type: string(placementv1beta1.PerClusterScheduledConditionType), @@ -504,7 +847,7 @@ func resourcePlacementApplyFailedConditions(generation int64) []metav1.Condition } } -func resourcePlacementDiffReportedConditions(generation int64) []metav1.Condition { +func perClusterDiffReportedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { Type: string(placementv1beta1.PerClusterScheduledConditionType), @@ -539,7 +882,42 @@ func resourcePlacementDiffReportedConditions(generation int64) []metav1.Conditio } } -func resourcePlacementRolloutCompletedConditions(generation int64, resourceIsTrackable bool, hasOverride bool) []metav1.Condition { +func perClusterDiffReportingFailedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.PerClusterScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.PerClusterRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.PerClusterOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.PerClusterWorkSynchronizedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.AllWorkSyncedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourceBindingDiffReported), + Status: metav1.ConditionFalse, + Reason: condition.WorkNotDiffReportedReason, + ObservedGeneration: generation, + }, + } +} + +func perClusterRolloutCompletedConditions(generation int64, resourceIsTrackable bool, hasOverride bool) []metav1.Condition { availableConditionReason := condition.WorkNotAvailabilityTrackableReason if resourceIsTrackable { availableConditionReason = condition.AllWorkAvailableReason @@ -589,7 +967,7 @@ func resourcePlacementRolloutCompletedConditions(generation int64, resourceIsTra } } -func resourcePlacementScheduleFailedConditions(generation int64) []metav1.Condition { +func perClusterScheduleFailedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { Type: string(placementv1beta1.PerClusterScheduledConditionType), @@ -600,30 +978,7 @@ func resourcePlacementScheduleFailedConditions(generation int64) []metav1.Condit } } -func crpOverrideFailedConditions(generation int64) []metav1.Condition { - return []metav1.Condition{ - { - Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), - Status: metav1.ConditionTrue, - Reason: scheduler.FullyScheduledReason, - ObservedGeneration: generation, - }, - { - Type: string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType), - Status: metav1.ConditionTrue, - Reason: condition.RolloutStartedReason, - ObservedGeneration: generation, - }, - { - Type: string(placementv1beta1.ClusterResourcePlacementOverriddenConditionType), - Status: metav1.ConditionFalse, - Reason: condition.OverriddenFailedReason, - ObservedGeneration: generation, - }, - } -} - -func resourcePlacementOverrideFailedConditions(generation int64) []metav1.Condition { +func perClusterOverrideFailedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { Type: string(placementv1beta1.PerClusterScheduledConditionType), @@ -646,7 +1001,7 @@ func resourcePlacementOverrideFailedConditions(generation int64) []metav1.Condit } } -func resourcePlacementWorkSynchronizedFailedConditions(generation int64, hasOverrides bool) []metav1.Condition { +func perClusterWorkSynchronizedFailedConditions(generation int64, hasOverrides bool) []metav1.Condition { overridenCondReason := condition.OverrideNotSpecifiedReason if hasOverrides { overridenCondReason = condition.OverriddenSucceededReason @@ -679,49 +1034,40 @@ func resourcePlacementWorkSynchronizedFailedConditions(generation int64, hasOver } } -func crpWorkSynchronizedFailedConditions(generation int64, hasOverrides bool) []metav1.Condition { - overridenCondReason := condition.OverrideNotSpecifiedReason - if hasOverrides { - overridenCondReason = condition.OverriddenSucceededReason - } - return []metav1.Condition{ - { - Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), - Status: metav1.ConditionTrue, - Reason: scheduler.FullyScheduledReason, - ObservedGeneration: generation, - }, - { - Type: string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType), - Status: metav1.ConditionTrue, - Reason: condition.RolloutStartedReason, - ObservedGeneration: generation, - }, +func workResourceIdentifiers() []placementv1beta1.ResourceIdentifier { + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + + return []placementv1beta1.ResourceIdentifier{ { - Type: string(placementv1beta1.ClusterResourcePlacementOverriddenConditionType), - Status: metav1.ConditionTrue, - Reason: overridenCondReason, - ObservedGeneration: generation, + Kind: "Namespace", + Name: workNamespaceName, + Version: "v1", }, { - Type: string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), - Status: metav1.ConditionFalse, - Reason: condition.WorkNotSynchronizedYetReason, - ObservedGeneration: generation, + Kind: "ConfigMap", + Name: appConfigMapName, + Version: "v1", + Namespace: workNamespaceName, }, } } -func workResourceIdentifiers() []placementv1beta1.ResourceIdentifier { +func workNamespaceIdentifiers() []placementv1beta1.ResourceIdentifier { workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) - appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) - return []placementv1beta1.ResourceIdentifier{ { Kind: "Namespace", Name: workNamespaceName, Version: "v1", }, + } +} + +func appConfigMapIdentifiers() []placementv1beta1.ResourceIdentifier { + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + return []placementv1beta1.ResourceIdentifier{ { Kind: "ConfigMap", Name: appConfigMapName, @@ -737,39 +1083,67 @@ func crpStatusWithOverrideUpdatedActual( wantObservedResourceIndex string, wantClusterResourceOverrides []string, wantResourceOverrides []placementv1beta1.NamespacedName) func() error { - crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())} + return placementStatusWithOverrideUpdatedActual(crpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, + wantObservedResourceIndex, wantClusterResourceOverrides, wantResourceOverrides) +} + +func rpStatusWithOverrideUpdatedActual( + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + wantSelectedClusters []string, + wantObservedResourceIndex string, + wantClusterResourceOverrides []string, + wantResourceOverrides []placementv1beta1.NamespacedName) func() error { + rpKey := types.NamespacedName{Name: fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()), Namespace: appNamespace().Name} + return placementStatusWithOverrideUpdatedActual(rpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, + wantObservedResourceIndex, wantClusterResourceOverrides, wantResourceOverrides) +} + +func placementStatusWithOverrideUpdatedActual( + placementKey types.NamespacedName, + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + wantSelectedClusters []string, + wantObservedResourceIndex string, + wantClusterResourceOverrides []string, + wantResourceOverrides []placementv1beta1.NamespacedName, +) func() error { return func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - hasOverride := len(wantResourceOverrides) > 0 || len(wantClusterResourceOverrides) > 0 - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + placement, err := retrievePlacement(placementKey) + if err != nil { return err } + hasOverride := len(wantResourceOverrides) > 0 || len(wantClusterResourceOverrides) > 0 var wantPlacementStatus []placementv1beta1.PerClusterPlacementStatus for _, name := range wantSelectedClusters { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, hasOverride), + Conditions: perClusterRolloutCompletedConditions(placement.GetGeneration(), true, hasOverride), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, ObservedResourceIndex: wantObservedResourceIndex, }) } - wantStatus := placementv1beta1.PlacementStatus{ - Conditions: crpRolloutCompletedConditions(crp.Generation, hasOverride), + wantStatus := &placementv1beta1.PlacementStatus{ + Conditions: placementRolloutCompletedConditions(placementKey, placement.GetGeneration(), hasOverride), PerClusterPlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, ObservedResourceIndex: wantObservedResourceIndex, } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { - return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + if diff := cmp.Diff(placement.GetPlacementStatus(), wantStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("Placement status diff (-got, +want): %s", diff) } return nil } } func crpStatusUpdatedActual(wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, wantSelectedClusters, wantUnselectedClusters []string, wantObservedResourceIndex string) func() error { - crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) - return customizedCRPStatusUpdatedActual(crpName, wantSelectedResourceIdentifiers, wantSelectedClusters, wantUnselectedClusters, wantObservedResourceIndex, true) + crpKey := types.NamespacedName{Name: fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())} + return customizedPlacementStatusUpdatedActual(crpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, wantUnselectedClusters, wantObservedResourceIndex, true) +} + +func rpStatusUpdatedActual(wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, wantSelectedClusters, wantUnselectedClusters []string, wantObservedResourceIndex string) func() error { + rpKey := types.NamespacedName{Name: fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()), Namespace: appNamespace().Name} + return customizedPlacementStatusUpdatedActual(rpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, wantUnselectedClusters, wantObservedResourceIndex, true) } func crpStatusWithOverrideUpdatedFailedActual( @@ -778,77 +1152,118 @@ func crpStatusWithOverrideUpdatedFailedActual( wantObservedResourceIndex string, wantClusterResourceOverrides []string, wantResourceOverrides []placementv1beta1.NamespacedName) func() error { - crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())} + return placementStatusWithOverrideUpdatedFailedActual(crpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, + wantObservedResourceIndex, wantClusterResourceOverrides, wantResourceOverrides) +} + +func rpStatusWithOverrideUpdatedFailedActual( + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + wantSelectedClusters []string, + wantObservedResourceIndex string, + wantClusterResourceOverrides []string, + wantResourceOverrides []placementv1beta1.NamespacedName) func() error { + rpKey := types.NamespacedName{Name: fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()), Namespace: appNamespace().Name} + return placementStatusWithOverrideUpdatedFailedActual(rpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, + wantObservedResourceIndex, wantClusterResourceOverrides, wantResourceOverrides) +} +func placementStatusWithOverrideUpdatedFailedActual( + placementKey types.NamespacedName, + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + wantSelectedClusters []string, + wantObservedResourceIndex string, + wantClusterResourceOverrides []string, + wantResourceOverrides []placementv1beta1.NamespacedName, +) func() error { return func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + placement, err := retrievePlacement(placementKey) + if err != nil { return err } - var wantPlacementStatus []placementv1beta1.PerClusterPlacementStatus for _, name := range wantSelectedClusters { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: resourcePlacementOverrideFailedConditions(crp.Generation), + Conditions: perClusterOverrideFailedConditions(placement.GetGeneration()), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, ObservedResourceIndex: wantObservedResourceIndex, }) } - - wantStatus := placementv1beta1.PlacementStatus{ - Conditions: crpOverrideFailedConditions(crp.Generation), + wantStatus := &placementv1beta1.PlacementStatus{ + Conditions: placementOverrideFailedConditions(placementKey, placement.GetGeneration()), PerClusterPlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, ObservedResourceIndex: wantObservedResourceIndex, } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { - return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + if diff := cmp.Diff(placement.GetPlacementStatus(), wantStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("Placement status diff (-got, +want): %s", diff) } return nil } } -func crpStatusWithWorkSynchronizedUpdatedFailedActual( +func rpStatusWithWorkSynchronizedUpdatedFailedActual( wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, wantSelectedClusters []string, wantObservedResourceIndex string, wantClusterResourceOverrides []string, - wantResourceOverrides []placementv1beta1.NamespacedName) func() error { - crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + wantResourceOverrides []placementv1beta1.NamespacedName, +) func() error { + rpKey := types.NamespacedName{Name: fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()), Namespace: appNamespace().Name} + return placementStatusWithWorkSynchronizedUpdatedFailedActual(rpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, + wantObservedResourceIndex, wantClusterResourceOverrides, wantResourceOverrides) +} +func placementStatusWithWorkSynchronizedUpdatedFailedActual( + placementKey types.NamespacedName, + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + wantSelectedClusters []string, + wantObservedResourceIndex string, + wantClusterResourceOverrides []string, + wantResourceOverrides []placementv1beta1.NamespacedName, +) func() error { return func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + placement, err := retrievePlacement(placementKey) + if err != nil { return err } - var wantPlacementStatus []placementv1beta1.PerClusterPlacementStatus hasOverrides := len(wantResourceOverrides) > 0 || len(wantClusterResourceOverrides) > 0 for _, name := range wantSelectedClusters { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: resourcePlacementWorkSynchronizedFailedConditions(crp.Generation, hasOverrides), + Conditions: perClusterWorkSynchronizedFailedConditions(placement.GetGeneration(), hasOverrides), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, ObservedResourceIndex: wantObservedResourceIndex, }) } - - wantStatus := placementv1beta1.PlacementStatus{ - Conditions: crpWorkSynchronizedFailedConditions(crp.Generation, hasOverrides), + wantStatus := &placementv1beta1.PlacementStatus{ + Conditions: placementWorkSynchronizedFailedConditions(placementKey, placement.GetGeneration(), hasOverrides), PerClusterPlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, ObservedResourceIndex: wantObservedResourceIndex, } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { - return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + if diff := cmp.Diff(placement.GetPlacementStatus(), wantStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("Placement status diff (-got, +want): %s", diff) } return nil } } +func crpStatusWithWorkSynchronizedUpdatedFailedActual( + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + wantSelectedClusters []string, + wantObservedResourceIndex string, + wantClusterResourceOverrides []string, + wantResourceOverrides []placementv1beta1.NamespacedName) func() error { + crpKey := types.NamespacedName{Name: fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())} + return placementStatusWithWorkSynchronizedUpdatedFailedActual(crpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, + wantObservedResourceIndex, wantClusterResourceOverrides, wantResourceOverrides) +} + func crpStatusWithExternalStrategyActual( wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, wantObservedResourceIndex string, @@ -878,7 +1293,7 @@ func crpStatusWithExternalStrategyActual( // No observed resource index for this cluster, assume rollout is still pending. wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: resourcePlacementRolloutUnknownConditions(crp.Generation), + Conditions: perClusterRolloutUnknownConditions(crp.Generation), ObservedResourceIndex: wantObservedResourceIndexPerCluster[i], }) } else { @@ -891,7 +1306,7 @@ func crpStatusWithExternalStrategyActual( if reportDiff { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: resourcePlacementDiffReportedConditions(crp.Generation), + Conditions: perClusterDiffReportedConditions(crp.Generation), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, ObservedResourceIndex: wantObservedResourceIndexPerCluster[i], @@ -928,7 +1343,7 @@ func crpStatusWithExternalStrategyActual( } else { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, hasOverrides), + Conditions: perClusterRolloutCompletedConditions(crp.Generation, true, hasOverrides), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, ObservedResourceIndex: wantObservedResourceIndexPerCluster[i], @@ -952,22 +1367,24 @@ func crpStatusWithExternalStrategyActual( wantStatus.Conditions = crpRolloutPendingDueToExternalStrategyConditions(crp.Generation) } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil } } -func customizedCRPStatusUpdatedActual(crpName string, +func customizedPlacementStatusUpdatedActual( + placementKey types.NamespacedName, wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, wantSelectedClusters, wantUnselectedClusters []string, wantObservedResourceIndex string, - resourceIsTrackable bool) func() error { + resourceIsTrackable bool, +) func() error { return func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { - return err + placement, err := retrievePlacement(placementKey) + if err != nil { + return fmt.Errorf("failed to get placement %s: %w", placementKey, err) } wantPlacementStatus := []placementv1beta1.PerClusterPlacementStatus{} @@ -975,61 +1392,70 @@ func customizedCRPStatusUpdatedActual(crpName string, wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, ObservedResourceIndex: wantObservedResourceIndex, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, resourceIsTrackable, false), + Conditions: perClusterRolloutCompletedConditions(placement.GetGeneration(), resourceIsTrackable, false), }) } for i := 0; i < len(wantUnselectedClusters); i++ { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ - Conditions: resourcePlacementScheduleFailedConditions(crp.Generation), + Conditions: perClusterScheduleFailedConditions(placement.GetGeneration()), }) } - var wantCRPConditions []metav1.Condition + var wantPlacementConditions []metav1.Condition if len(wantSelectedClusters) > 0 { - wantCRPConditions = crpRolloutCompletedConditions(crp.Generation, false) + wantPlacementConditions = placementRolloutCompletedConditions(placementKey, placement.GetGeneration(), false) } else { - wantCRPConditions = []metav1.Condition{ - // we don't set the remaining resource conditions. - { - Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), - Status: metav1.ConditionTrue, - Reason: scheduler.FullyScheduledReason, - ObservedGeneration: crp.Generation, - }, - } + // We don't set the remaining resource conditions. + wantPlacementConditions = placementScheduledConditions(placementKey, placement.GetGeneration()) } if len(wantUnselectedClusters) > 0 { if len(wantSelectedClusters) > 0 { - wantCRPConditions = crpSchedulePartiallyFailedConditions(crp.Generation) + wantPlacementConditions = placementSchedulePartiallyFailedConditions(placementKey, placement.GetGeneration()) } else { // we don't set the remaining resource conditions if there is no clusters to select - wantCRPConditions = crpScheduleFailedConditions(crp.Generation) + wantPlacementConditions = placementScheduleFailedConditions(placementKey, placement.GetGeneration()) } } - // Note that the CRP controller will only keep decisions regarding unselected clusters for a CRP if: + // Note that the placement controller will only keep decisions regarding unselected clusters for a placement if: // - // * The CRP is of the PickN placement type and the required N count cannot be fulfilled; or - // * The CRP is of the PickFixed placement type and the list of target clusters specified cannot be fulfilled. - wantStatus := placementv1beta1.PlacementStatus{ - Conditions: wantCRPConditions, + // * The placement is of the PickN placement type and the required N count cannot be fulfilled; or + // * The placement is of the PickFixed placement type and the list of target clusters specified cannot be fulfilled. + wantStatus := &placementv1beta1.PlacementStatus{ + Conditions: wantPlacementConditions, PerClusterPlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, ObservedResourceIndex: wantObservedResourceIndex, } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { - return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + if diff := cmp.Diff(placement.GetPlacementStatus(), wantStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("Placement status diff (-got, +want): %s", diff) } return nil } } func safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, failedWorkloadResourceIdentifier placementv1beta1.ResourceIdentifier, wantSelectedClusters []string, wantObservedResourceIndex string, failedResourceObservedGeneration int64) func() error { + crpKey := types.NamespacedName{Name: fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())} + return safeRolloutWorkloadPlacementStatusUpdatedActual(crpKey, wantSelectedResourceIdentifiers, failedWorkloadResourceIdentifier, wantSelectedClusters, wantObservedResourceIndex, failedResourceObservedGeneration) +} + +func safeRolloutWorkloadRPStatusUpdatedActual(wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, failedWorkloadResourceIdentifier placementv1beta1.ResourceIdentifier, wantSelectedClusters []string, wantObservedResourceIndex string, failedResourceObservedGeneration int64) func() error { + rpKey := types.NamespacedName{Name: fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()), Namespace: appNamespace().Name} + return safeRolloutWorkloadPlacementStatusUpdatedActual(rpKey, wantSelectedResourceIdentifiers, failedWorkloadResourceIdentifier, wantSelectedClusters, wantObservedResourceIndex, failedResourceObservedGeneration) +} + +func safeRolloutWorkloadPlacementStatusUpdatedActual( + placementKey types.NamespacedName, + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + failedWorkloadResourceIdentifier placementv1beta1.ResourceIdentifier, + wantSelectedClusters []string, + wantObservedResourceIndex string, + failedResourceObservedGeneration int64, +) func() error { return func() error { - crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + placement, err := retrievePlacement(placementKey) + if err != nil { return err } @@ -1042,37 +1468,37 @@ func safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResourceIdentifiers [ Type: string(placementv1beta1.PerClusterScheduledConditionType), Status: metav1.ConditionTrue, Reason: condition.ScheduleSucceededReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, { Type: string(placementv1beta1.PerClusterRolloutStartedConditionType), Status: metav1.ConditionTrue, Reason: condition.RolloutStartedReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, { Type: string(placementv1beta1.PerClusterOverriddenConditionType), Status: metav1.ConditionTrue, Reason: condition.OverrideNotSpecifiedReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, { Type: string(placementv1beta1.PerClusterWorkSynchronizedConditionType), Status: metav1.ConditionTrue, Reason: condition.AllWorkSyncedReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionTrue, Reason: condition.AllWorkAppliedReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, { Type: string(placementv1beta1.PerClusterAvailableConditionType), Status: metav1.ConditionFalse, Reason: condition.WorkNotAvailableReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, }, FailedPlacements: []placementv1beta1.FailedResourcePlacement{ @@ -1081,7 +1507,7 @@ func safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResourceIdentifiers [ Condition: metav1.Condition{ Type: string(placementv1beta1.PerClusterAvailableConditionType), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(workapplier.AvailabilityResultTypeNotYetAvailable), ObservedGeneration: failedResourceObservedGeneration, }, }, @@ -1097,13 +1523,13 @@ func safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResourceIdentifiers [ Type: string(placementv1beta1.PerClusterScheduledConditionType), Status: metav1.ConditionTrue, Reason: condition.ScheduleSucceededReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, { Type: string(placementv1beta1.PerClusterRolloutStartedConditionType), Status: metav1.ConditionFalse, Reason: condition.RolloutNotStartedYetReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, }, } @@ -1112,30 +1538,15 @@ func safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResourceIdentifiers [ wantPlacementStatus = append(wantPlacementStatus, rolloutBlockedPlacementStatus) } - wantCRPConditions := []metav1.Condition{ - { - Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), - Status: metav1.ConditionTrue, - Reason: scheduler.FullyScheduledReason, - ObservedGeneration: crp.Generation, - }, - { - Type: string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType), - Status: metav1.ConditionFalse, - Reason: condition.RolloutNotStartedYetReason, - ObservedGeneration: crp.Generation, - }, - } - - wantStatus := placementv1beta1.PlacementStatus{ - Conditions: wantCRPConditions, + wantStatus := &placementv1beta1.PlacementStatus{ + Conditions: placementRolloutStuckConditions(placementKey, placement.GetGeneration()), PerClusterPlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, ObservedResourceIndex: wantObservedResourceIndex, } - if diff := cmp.Diff(crp.Status, wantStatus, safeRolloutCRPStatusCmpOptions...); diff != "" { - return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + if diff := cmp.Diff(placement.GetPlacementStatus(), wantStatus, safeRolloutPlacementStatusCmpOptions...); diff != "" { + return fmt.Errorf("Placement status diff (-got, +want): %s", diff) } return nil } @@ -1180,10 +1591,26 @@ func workNamespaceRemovedFromClusterActual(cluster *framework.Cluster) func() er } } -func allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName string) func() error { +// namespacedResourcesRemovedFromClusterActual checks that resources in the specified namespace have been removed from the cluster. +// It checks if the placed configMap is removed by default, as this is tested in most of the test cases. +// For tests with additional resources placed, e.g. deployments, daemonSets, add those to placedResources. +func namespacedResourcesRemovedFromClusterActual(cluster *framework.Cluster, placedResources ...client.Object) func() error { + cm := appConfigMap() + placedResources = append(placedResources, &cm) return func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + for _, resource := range placedResources { + if err := cluster.KubeClient.Get(ctx, types.NamespacedName{Name: resource.GetName(), Namespace: appNamespace().Name}, resource); !errors.IsNotFound(err) { + return fmt.Errorf("%s %s/%s still exists on cluster %s or get encountered an error: %w", resource.GetObjectKind().GroupVersionKind(), appNamespace().Name, resource.GetName(), cluster.ClusterName, err) + } + } + return nil + } +} + +func allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(placementKey types.NamespacedName) func() error { + return func() error { + placement, err := retrievePlacement(placementKey) + if err != nil { if errors.IsNotFound(err) { return nil } @@ -1191,19 +1618,19 @@ func allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName str } wantFinalizers := []string{customDeletionBlockerFinalizer} - finalizer := crp.Finalizers + finalizer := placement.GetFinalizers() if diff := cmp.Diff(finalizer, wantFinalizers); diff != "" { - return fmt.Errorf("CRP finalizers diff (-got, +want): %s", diff) + return fmt.Errorf("Placement finalizers diff (-got, +want): %s", diff) } return nil } } -func crpRemovedActual(crpName string) func() error { +func placementRemovedActual(placementKey types.NamespacedName) func() error { return func() error { - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, &placementv1beta1.ClusterResourcePlacement{}); !errors.IsNotFound(err) { - return fmt.Errorf("CRP still exists or an unexpected error occurred: %w", err) + if _, err := retrievePlacement(placementKey); !errors.IsNotFound(err) { + return fmt.Errorf("Placement %s still exists or an unexpected error occurred: %w", placementKey, err) } return nil diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index e262e2dd7..0e34ae9e8 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -79,7 +79,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -131,7 +131,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { Namespace: workNamespaceName, }, } - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", true) Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -176,7 +176,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { }) It("should update CRP status as success again", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "2", true) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "2", true) Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -200,7 +200,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP") }) @@ -325,7 +325,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { Condition: metav1.Condition{ Type: string(placementv1beta1.PerClusterAvailableConditionType), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(workapplier.AvailabilityResultTypeNotYetAvailable), ObservedGeneration: 1, }, }, @@ -360,7 +360,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { return err } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -474,7 +474,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementWorkSynchronizedFailedConditions(crp.Generation, false), + Conditions: perClusterWorkSynchronizedFailedConditions(crp.Generation, false), }, }, SelectedResources: []placementv1beta1.ResourceIdentifier{ @@ -493,7 +493,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { }, ObservedResourceIndex: "0", } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -609,12 +609,12 @@ var _ = Describe("Process objects with generate name", Ordered, func() { Condition: metav1.Condition{ Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundGenerateName), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundGenerateName), ObservedGeneration: 0, }, }, }, - Conditions: resourcePlacementApplyFailedConditions(crp.Generation), + Conditions: perClusterApplyFailedConditions(crp.Generation), }, }, SelectedResources: []placementv1beta1.ResourceIdentifier{ @@ -633,7 +633,7 @@ var _ = Describe("Process objects with generate name", Ordered, func() { }, ObservedResourceIndex: "0", } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -689,7 +689,7 @@ func checkForRolloutStuckOnOneFailedClusterStatus(wantSelectedResources []placem Condition: metav1.Condition{ Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToApply), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToApply), }, }, } @@ -700,11 +700,11 @@ func checkForRolloutStuckOnOneFailedClusterStatus(wantSelectedResources []placem return err } wantCRPConditions := crpRolloutStuckConditions(crp.Generation) - if diff := cmp.Diff(crp.Status.Conditions, wantCRPConditions, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status.Conditions, wantCRPConditions, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } // check the selected resources is still right - if diff := cmp.Diff(crp.Status.SelectedResources, wantSelectedResources, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status.SelectedResources, wantSelectedResources, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } // check the placement status has a failed placement @@ -720,19 +720,19 @@ func checkForRolloutStuckOnOneFailedClusterStatus(wantSelectedResources []placem for _, placementStatus := range crp.Status.PerClusterPlacementStatuses { // this is the cluster that got the new enveloped resource that was malformed if len(placementStatus.FailedPlacements) != 0 { - if diff := cmp.Diff(placementStatus.FailedPlacements, wantFailedResourcePlacement, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(placementStatus.FailedPlacements, wantFailedResourcePlacement, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } // check that the applied error message is correct if !strings.Contains(placementStatus.FailedPlacements[0].Condition.Message, "field is immutable") { return fmt.Errorf("CRP failed resource placement does not have unsupported scope message") } - if diff := cmp.Diff(placementStatus.Conditions, resourcePlacementApplyFailedConditions(crp.Generation), crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(placementStatus.Conditions, perClusterApplyFailedConditions(crp.Generation), placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } } else { // the cluster is stuck behind a rollout schedule since we now have 1 cluster that is not in applied ready status - if diff := cmp.Diff(placementStatus.Conditions, resourcePlacementSyncPendingConditions(crp.Generation), crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(placementStatus.Conditions, perClusterSyncPendingConditions(crp.Generation), placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } } diff --git a/test/e2e/join_and_leave_test.go b/test/e2e/join_and_leave_test.go index a1e4891c7..10d86c06d 100644 --- a/test/e2e/join_and_leave_test.go +++ b/test/e2e/join_and_leave_test.go @@ -91,7 +91,7 @@ var _ = Describe("Test member cluster join and leave flow", Label("joinleave"), Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -117,7 +117,7 @@ var _ = Describe("Test member cluster join and leave flow", Label("joinleave"), }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", true) Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -214,7 +214,7 @@ var _ = Describe("Test member cluster join and leave flow", Label("joinleave"), It("Should update CRP status to not placing any resources since all clusters are left", func() { // resourceQuota is enveloped so it's not trackable yet - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, nil, nil, "0", false) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, nil, nil, "0", false) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -234,7 +234,7 @@ var _ = Describe("Test member cluster join and leave flow", Label("joinleave"), }) It("should update CRP status to applied to all clusters again automatically after rejoining", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", true) Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) }) diff --git a/test/e2e/placement_apply_strategy_test.go b/test/e2e/placement_apply_strategy_test.go index 8ee7357d3..4bdbc3683 100644 --- a/test/e2e/placement_apply_strategy_test.go +++ b/test/e2e/placement_apply_strategy_test.go @@ -82,7 +82,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { AfterAll(func() { By(fmt.Sprintf("deleting placement %s", crpName)) - cleanupCRP(crpName) + cleanupPlacement(types.NamespacedName{Name: crpName}) By("deleting created work resources on member cluster") cleanWorkResourcesOnCluster(allMemberClusters[0]) @@ -116,7 +116,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) @@ -141,7 +141,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { AfterAll(func() { By(fmt.Sprintf("deleting placement %s", crpName)) - cleanupCRP(crpName) + cleanupPlacement(types.NamespacedName{Name: crpName}) }) It("should update CRP status as expected", func() { @@ -170,7 +170,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { It("should remove the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -194,7 +194,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { AfterAll(func() { By(fmt.Sprintf("deleting placement %s", crpName)) - cleanupCRP(crpName) + cleanupPlacement(types.NamespacedName{Name: crpName}) }) It("should update CRP status as expected", func() { @@ -223,7 +223,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { It("should remove the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -247,7 +247,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { AfterAll(func() { By(fmt.Sprintf("deleting placement %s", crpName)) - cleanupCRP(crpName) + cleanupPlacement(types.NamespacedName{Name: crpName}) By("deleting created work resources on member cluster") cleanWorkResourcesOnCluster(allMemberClusters[0]) @@ -278,22 +278,22 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { Condition: metav1.Condition{ Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 0, }, }, }, - Conditions: resourcePlacementApplyFailedConditions(crp.Generation), + Conditions: perClusterApplyFailedConditions(crp.Generation), }, { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), + Conditions: perClusterRolloutCompletedConditions(crp.Generation, true, false), }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), + Conditions: perClusterRolloutCompletedConditions(crp.Generation, true, false), }, }, SelectedResources: []placementv1beta1.ResourceIdentifier{ @@ -311,7 +311,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { }, ObservedResourceIndex: "0", } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -336,7 +336,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) @@ -376,7 +376,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { AfterAll(func() { By(fmt.Sprintf("deleting placement %s", crpName)) - cleanupCRP(crpName) + cleanupPlacement(types.NamespacedName{Name: crpName}) By("deleting created work resources on member cluster") cleanWorkResourcesOnCluster(allMemberClusters[0]) @@ -412,7 +412,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) @@ -505,7 +505,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -516,7 +516,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { Condition: metav1.Condition{ Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, { @@ -529,7 +529,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { Condition: metav1.Condition{ Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, }, @@ -546,7 +546,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { } wantCRPStatus := buildWantCRPStatus(conflictedCRP.Generation) - if diff := cmp.Diff(conflictedCRP.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(conflictedCRP.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -972,7 +972,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -983,7 +983,7 @@ var _ = Describe("switching apply strategies", func() { Condition: metav1.Condition{ Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeNotTakenOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeNotTakenOver), }, }, }, @@ -991,7 +991,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -1005,7 +1005,7 @@ var _ = Describe("switching apply strategies", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1039,7 +1039,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "1", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1050,7 +1050,7 @@ var _ = Describe("switching apply strategies", func() { Condition: metav1.Condition{ Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeNotTakenOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeNotTakenOver), }, }, }, @@ -1058,7 +1058,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "1", - Conditions: resourcePlacementSyncPendingConditions(crpGeneration), + Conditions: perClusterSyncPendingConditions(crpGeneration), }, }, ObservedResourceIndex: "1", @@ -1072,7 +1072,7 @@ var _ = Describe("switching apply strategies", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1110,12 +1110,12 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "1", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), }, { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "1", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1147,7 +1147,7 @@ var _ = Describe("switching apply strategies", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1221,7 +1221,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1256,7 +1256,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1299,7 +1299,7 @@ var _ = Describe("switching apply strategies", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1333,7 +1333,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "1", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1354,7 +1354,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "1", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1397,7 +1397,7 @@ var _ = Describe("switching apply strategies", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1435,12 +1435,12 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "1", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "1", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "1", @@ -1454,7 +1454,7 @@ var _ = Describe("switching apply strategies", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil diff --git a/test/e2e/placement_cro_test.go b/test/e2e/placement_cro_test.go index adce9e73b..ee7808962 100644 --- a/test/e2e/placement_cro_test.go +++ b/test/e2e/placement_cro_test.go @@ -48,6 +48,9 @@ var _ = Context("creating clusterResourceOverride (selecting all clusters) to ov Name: croName, }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: crpName, // assigned CRP name + }, ClusterResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ @@ -101,7 +104,7 @@ var _ = Context("creating clusterResourceOverride (selecting all clusters) to ov checkIfOverrideAnnotationsOnAllMemberClusters(true, want) }) - It("update cro attached to this CRP only and change annotation value", func() { + It("update cro and change annotation value", func() { Eventually(func() error { cro := &placementv1beta1.ClusterResourceOverride{} if err := hubClient.Get(ctx, types.NamespacedName{Name: croName}, cro); err != nil { @@ -148,7 +151,7 @@ var _ = Context("creating clusterResourceOverride (selecting all clusters) to ov checkIfOverrideAnnotationsOnAllMemberClusters(true, want) }) - It("update cro attached to this CRP only and no updates on the namespace", func() { + It("update cro and no updates on the namespace", func() { Eventually(func() error { cro := &placementv1beta1.ClusterResourceOverride{} if err := hubClient.Get(ctx, types.NamespacedName{Name: croName}, cro); err != nil { @@ -276,25 +279,6 @@ var _ = Context("creating clusterResourceOverride with multiple jsonPatchOverrid wantAnnotations := map[string]string{croTestAnnotationKey: croTestAnnotationValue, croTestAnnotationKey1: croTestAnnotationValue1} checkIfOverrideAnnotationsOnAllMemberClusters(true, wantAnnotations) }) - - It("update cro attached to an invalid CRP", func() { - Eventually(func() error { - cro := &placementv1beta1.ClusterResourceOverride{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: croName}, cro); err != nil { - return err - } - cro.Spec.Placement = &placementv1beta1.PlacementRef{ - Name: "invalid-crp", // assigned CRP name - } - return hubClient.Update(ctx, cro) - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cro as expected", crpName) - }) - - It("CRP status should not be changed", func() { - wantCRONames := []string{fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, croName, 0)} - crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", wantCRONames, nil) - Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "CRP %s status has been changed", crpName) - }) }) var _ = Context("creating clusterResourceOverride with different rules for each cluster", Ordered, func() { @@ -723,3 +707,246 @@ var _ = Context("creating clusterResourceOverride with delete rules for one clus }, consistentlyDuration, eventuallyInterval).Should(BeTrue(), "Failed to delete work resources on member cluster %s", memberCluster.ClusterName) }) }) + +var _ = Context("creating clusterResourceOverride with cluster-scoped placementRef", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + croName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) + croSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, croName, 0) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the working CRO with proper PlacementRef before CRP so that the observed resource index is predictable. + croWorking := &placementv1beta1.ClusterResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: croName, + }, + Spec: placementv1beta1.ClusterResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: crpName, // correct CRP name + Scope: placementv1beta1.ClusterScoped, + }, + ClusterResourceSelectors: workResourceSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, croTestAnnotationKey, croTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating working clusterResourceOverride %s", croName)) + Expect(hubClient.Create(ctx, croWorking)).To(Succeed(), "Failed to create clusterResourceOverride %s", croName) + + // This is to make sure the working cro snapshot is created before the CRP + Eventually(func() error { + croSnap := &placementv1beta1.ClusterResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: croSnapShotName}, croSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cro snapshot as expected", croName) + + // Create the CRP. + createCRP(crpName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + + By(fmt.Sprintf("deleting clusterResourceOverride %s", croName)) + cleanupClusterResourceOverride(croName) + }) + + It("should update CRP status as expected", func() { + wantCRONames := []string{croSnapShotName} + crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", wantCRONames, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have override annotations from working CRO on the placed resources", func() { + want := map[string]string{croTestAnnotationKey: croTestAnnotationValue} + checkIfOverrideAnnotationsOnAllMemberClusters(true, want) + }) +}) + +var _ = Context("creating clusterResourceOverride with cluster-scoped placementRef but pointing to a different crp", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + croNotWorkingName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) + fakeCRPName := "fake-crp-name" + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the not working CRO with incorrect PlacementRef + croNotWorking := &placementv1beta1.ClusterResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: croNotWorkingName, + }, + Spec: placementv1beta1.ClusterResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: fakeCRPName, // fake CRP name + Scope: placementv1beta1.ClusterScoped, + }, + ClusterResourceSelectors: workResourceSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, croTestAnnotationKey1, croTestAnnotationValue1))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating not working clusterResourceOverride %s", croNotWorkingName)) + Expect(hubClient.Create(ctx, croNotWorking)).To(Succeed(), "Failed to create clusterResourceOverride %s", croNotWorkingName) + + // Create the CRP. + createCRP(crpName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + + By(fmt.Sprintf("deleting clusterResourceOverride %s", croNotWorkingName)) + cleanupClusterResourceOverride(croNotWorkingName) + }) + + It("should update CRP status with no overrides", func() { + crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should not have annotations from not working CRO on the placed resources", func() { + for _, memberCluster := range allMemberClusters { + Expect(validateNamespaceNoAnnotationOnCluster(memberCluster, croTestAnnotationKey1)).Should(Succeed(), "CRO pointing to a different CRP should not add annotations on %s", memberCluster.ClusterName) + Expect(validateConfigMapNoAnnotationKeyOnCluster(memberCluster, croTestAnnotationKey1)).Should(Succeed(), "CRO pointing to a different CRP should not add annotations on config map on %s", memberCluster.ClusterName) + } + }) +}) + +var _ = Context("creating clusterResourceOverride for a namespace-only CRP", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + croName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) + croSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, croName, 0) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the CRO before CRP so that the observed resource index is predictable. + cro := &placementv1beta1.ClusterResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: croName, + }, + Spec: placementv1beta1.ClusterResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: crpName, // assigned CRP name + Scope: placementv1beta1.ClusterScoped, + }, + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "Namespace", + Version: "v1", + Name: fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()), + }, + }, + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, croTestAnnotationKey, croTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating clusterResourceOverride %s", croName)) + Expect(hubClient.Create(ctx, cro)).To(Succeed(), "Failed to create clusterResourceOverride %s", croName) + + // This is to make sure the CRO snapshot is created before the CRP + Eventually(func() error { + croSnap := &placementv1beta1.ClusterResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: croSnapShotName}, croSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRO snapshot as expected", croName) + + // Create the namespace-only CRP. + createNamespaceOnlyCRP(crpName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + + By(fmt.Sprintf("deleting clusterResourceOverride %s", croName)) + cleanupClusterResourceOverride(croName) + }) + + It("should update CRP status as expected", func() { + wantCRONames := []string{croSnapShotName} + crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, "0", wantCRONames, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + It("should place only the namespace on member clusters", func() { + for _, memberCluster := range allMemberClusters { + workNamespacePlacedActual := workNamespacePlacedOnClusterActual(memberCluster) + Eventually(workNamespacePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work namespace on member cluster %s", memberCluster.ClusterName) + } + }) + + It("should have override annotations on the namespace only", func() { + want := map[string]string{croTestAnnotationKey: croTestAnnotationValue} + for _, memberCluster := range allMemberClusters { + Expect(validateAnnotationOfWorkNamespaceOnCluster(memberCluster, want)).Should(Succeed(), "Failed to override the annotation of work namespace on %s", memberCluster.ClusterName) + } + }) + + It("should not place configmap or other resources on member clusters", func() { + for _, memberCluster := range allMemberClusters { + // Verify configmap is not placed + Consistently(func() bool { + configMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + configMap := &corev1.ConfigMap{} + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: configMapName}, configMap) + return errors.IsNotFound(err) + }, consistentlyDuration, eventuallyInterval).Should(BeTrue(), "ConfigMap should not be placed on member cluster %s", memberCluster.ClusterName) + } + }) +}) diff --git a/test/e2e/placement_drift_diff_test.go b/test/e2e/placement_drift_diff_test.go index cd4d0131a..d0fb2a87f 100644 --- a/test/e2e/placement_drift_diff_test.go +++ b/test/e2e/placement_drift_diff_test.go @@ -186,7 +186,7 @@ var _ = Describe("take over existing resources", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -199,7 +199,7 @@ var _ = Describe("take over existing resources", func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, }, @@ -225,12 +225,12 @@ var _ = Describe("take over existing resources", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -244,7 +244,7 @@ var _ = Describe("take over existing resources", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -358,7 +358,7 @@ var _ = Describe("take over existing resources", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -370,7 +370,7 @@ var _ = Describe("take over existing resources", func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, { @@ -384,7 +384,7 @@ var _ = Describe("take over existing resources", func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, }, @@ -424,12 +424,12 @@ var _ = Describe("take over existing resources", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -443,7 +443,7 @@ var _ = Describe("take over existing resources", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -563,7 +563,7 @@ var _ = Describe("detect drifts on placed resources", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{ { @@ -585,12 +585,12 @@ var _ = Describe("detect drifts on placed resources", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -604,7 +604,7 @@ var _ = Describe("detect drifts on placed resources", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -702,7 +702,7 @@ var _ = Describe("detect drifts on placed resources", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -715,7 +715,7 @@ var _ = Describe("detect drifts on placed resources", func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, }, @@ -741,12 +741,12 @@ var _ = Describe("detect drifts on placed resources", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -760,7 +760,7 @@ var _ = Describe("detect drifts on placed resources", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -886,7 +886,7 @@ var _ = Describe("detect drifts on placed resources", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -898,7 +898,7 @@ var _ = Describe("detect drifts on placed resources", func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, { @@ -912,7 +912,7 @@ var _ = Describe("detect drifts on placed resources", func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, }, @@ -952,12 +952,12 @@ var _ = Describe("detect drifts on placed resources", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -971,7 +971,7 @@ var _ = Describe("detect drifts on placed resources", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1091,7 +1091,7 @@ var _ = Describe("report diff mode", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { @@ -1129,7 +1129,7 @@ var _ = Describe("report diff mode", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { @@ -1164,7 +1164,7 @@ var _ = Describe("report diff mode", func() { { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { @@ -1208,7 +1208,7 @@ var _ = Describe("report diff mode", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1300,14 +1300,14 @@ var _ = Describe("report diff mode", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, }, { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { @@ -1342,7 +1342,7 @@ var _ = Describe("report diff mode", func() { { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { @@ -1386,7 +1386,7 @@ var _ = Describe("report diff mode", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1517,7 +1517,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1531,7 +1531,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, }, @@ -1558,7 +1558,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1572,7 +1572,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 2, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, }, @@ -1599,7 +1599,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1611,7 +1611,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, { @@ -1626,7 +1626,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 2, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, }, @@ -1679,7 +1679,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } @@ -1764,7 +1764,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1778,7 +1778,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 2, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, }, @@ -1805,7 +1805,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1819,7 +1819,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 3, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, }, @@ -1846,7 +1846,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1858,7 +1858,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, { @@ -1873,7 +1873,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 2, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, }, @@ -1926,7 +1926,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } @@ -2001,21 +2001,21 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, }, { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{}, }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -2027,7 +2027,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, { @@ -2042,7 +2042,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 2, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, }, @@ -2095,7 +2095,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -2155,21 +2155,21 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: observedResourceIndex, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, }, { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: observedResourceIndex, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{}, }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: observedResourceIndex, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{}, @@ -2189,7 +2189,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { // for comparison. wantCRPStatus := buildWantCRPStatus(crp.Generation, crp.Status.ObservedResourceIndex) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil diff --git a/test/e2e/placement_negative_cases_test.go b/test/e2e/placement_negative_cases_test.go index cfb25510a..ad81327d6 100644 --- a/test/e2e/placement_negative_cases_test.go +++ b/test/e2e/placement_negative_cases_test.go @@ -12,6 +12,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -23,23 +24,23 @@ import ( ) var _ = Describe("handling errors and failures gracefully", func() { + envelopeName := "wrapper" + wrappedCMName1 := "app-1" + wrappedCMName2 := "app-2" + + cmDataKey := "foo" + cmDataVal1 := "bar" + cmDataVal2 := "baz" + // This test spec uses envelopes for placement as it is a bit tricky to simulate // decoding errors with resources created directly in the hub cluster. // // TO-DO (chenyu1): reserve an API group exclusively on the hub cluster so that - // envelopes do not need to used for this test spec. - Context("pre-processing failure (decoding errors)", Ordered, func() { + // envelopes do not need to be used for this test spec. + Context("pre-processing failure in apply ops (decoding errors)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) - envelopeName := "wrapper" - wrappedCMName1 := "app-1" - wrappedCMName2 := "app-2" - - cmDataKey := "foo" - cmDataVal1 := "bar" - cmDataVal2 := "baz" - BeforeAll(func() { // Use an envelope to create duplicate resource entries. ns := appNamespace() @@ -145,12 +146,12 @@ var _ = Describe("handling errors and failures gracefully", func() { Condition: metav1.Condition{ Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeDecodingErred), + Reason: string(workapplier.ApplyOrReportDiffResTypeDecodingErred), ObservedGeneration: 0, }, }, }, - Conditions: resourcePlacementApplyFailedConditions(crp.Generation), + Conditions: perClusterApplyFailedConditions(crp.Generation), }, }, SelectedResources: []placementv1beta1.ResourceIdentifier{ @@ -169,7 +170,7 @@ var _ = Describe("handling errors and failures gracefully", func() { }, ObservedResourceIndex: "0", } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -217,4 +218,128 @@ var _ = Describe("handling errors and failures gracefully", func() { ensureCRPAndRelatedResourcesDeleted(crpName, []*framework.Cluster{memberCluster1EastProd}) }) }) + + Context("pre-processing failure in report diff mode (decoding errors)", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Use an envelope to create duplicate resource entries. + ns := appNamespace() + Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) + + // Create an envelope resource to wrap the configMaps. + resourceEnvelope := &placementv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: envelopeName, + Namespace: ns.Name, + }, + Data: map[string]runtime.RawExtension{}, + } + + // Create a malformed config map as a wrapped resource. + badConfigMap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "malformed/v10", + Kind: "Unknown", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: wrappedCMName1, + }, + Data: map[string]string{ + cmDataKey: cmDataVal1, + }, + } + badCMBytes, err := json.Marshal(badConfigMap) + Expect(err).To(BeNil(), "Failed to marshal configMap %s", badConfigMap.Name) + resourceEnvelope.Data["cm1.yaml"] = runtime.RawExtension{Raw: badCMBytes} + Expect(hubClient.Create(ctx, resourceEnvelope)).To(Succeed(), "Failed to create configMap %s", resourceEnvelope.Name) + + // Create a CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: workResourceSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + ApplyStrategy: &placementv1beta1.ApplyStrategy{ + Type: placementv1beta1.ApplyStrategyTypeReportDiff, + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + }) + + It("should update CRP status as expected", func() { + Eventually(func() error { + crp := &placementv1beta1.ClusterResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + return err + } + + wantStatus := placementv1beta1.PlacementStatus{ + Conditions: crpDiffReportingFailedConditions(crp.Generation, false), + PerClusterPlacementStatuses: []placementv1beta1.PerClusterPlacementStatus{ + { + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: perClusterDiffReportingFailedConditions(crp.Generation), + }, + }, + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Kind: "Namespace", + Name: workNamespaceName, + Version: "v1", + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: envelopeName, + Namespace: workNamespaceName, + }, + }, + ObservedResourceIndex: "0", + } + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration*20, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("should not apply any resource", func() { + Consistently(func() error { + cm := &corev1.ConfigMap{} + if err := memberCluster1EastProdClient.Get(ctx, types.NamespacedName{Name: wrappedCMName1, Namespace: workNamespaceName}, cm); !errors.IsNotFound(err) { + return fmt.Errorf("the config map exists, or an unexpected error has occurred: %w", err) + } + return nil + }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "The malformed configMap has been applied unexpectedly") + + Consistently(workNamespaceRemovedFromClusterActual(memberCluster1EastProd)).Should(Succeed(), "The namespace object has been applied unexpectedly") + }) + + AfterAll(func() { + // Remove the CRP and the namespace from the hub cluster. + ensureCRPAndRelatedResourcesDeleted(crpName, []*framework.Cluster{memberCluster1EastProd}) + }) + }) }) diff --git a/test/e2e/placement_ro_test.go b/test/e2e/placement_ro_test.go index ee7afe42d..ef2a60ac5 100644 --- a/test/e2e/placement_ro_test.go +++ b/test/e2e/placement_ro_test.go @@ -51,9 +51,10 @@ var _ = Context("creating resourceOverride (selecting all clusters) to override }, Spec: placementv1beta1.ResourceOverrideSpec{ Placement: &placementv1beta1.PlacementRef{ - Name: crpName, // assigned CRP name + Name: crpName, // assigned CRP name + Scope: placementv1beta1.ClusterScoped, }, - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -109,7 +110,7 @@ var _ = Context("creating resourceOverride (selecting all clusters) to override checkIfOverrideAnnotationsOnAllMemberClusters(false, want) }) - It("update ro attached to this CRP only and change annotation value", func() { + It("update ro and change annotation value", func() { Eventually(func() error { ro := &placementv1beta1.ResourceOverride{} if err := hubClient.Get(ctx, types.NamespacedName{Name: roName, Namespace: roNamespace}, ro); err != nil { @@ -119,7 +120,7 @@ var _ = Context("creating resourceOverride (selecting all clusters) to override Placement: &placementv1beta1.PlacementRef{ Name: crpName, }, - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -157,7 +158,7 @@ var _ = Context("creating resourceOverride (selecting all clusters) to override checkIfOverrideAnnotationsOnAllMemberClusters(false, want) }) - It("update ro attached to this CRP only and no update on the configmap itself", func() { + It("update ro and no update on the configmap itself", func() { Eventually(func() error { ro := &placementv1beta1.ResourceOverride{} if err := hubClient.Get(ctx, types.NamespacedName{Name: roName, Namespace: roNamespace}, ro); err != nil { @@ -214,7 +215,7 @@ var _ = Context("creating resourceOverride with multiple jsonPatchOverrides to o Namespace: roNamespace, }, Spec: placementv1beta1.ResourceOverrideSpec{ - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -273,28 +274,6 @@ var _ = Context("creating resourceOverride with multiple jsonPatchOverrides to o wantAnnotations := map[string]string{roTestAnnotationKey: roTestAnnotationValue, roTestAnnotationKey1: roTestAnnotationValue1} checkIfOverrideAnnotationsOnAllMemberClusters(false, wantAnnotations) }) - - It("update ro attached to an invalid CRP", func() { - Eventually(func() error { - ro := &placementv1beta1.ResourceOverride{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: roName, Namespace: roNamespace}, ro); err != nil { - return err - } - ro.Spec.Placement = &placementv1beta1.PlacementRef{ - Name: "invalid-crp", // assigned CRP name - } - return hubClient.Update(ctx, ro) - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", crpName) - }) - - It("CRP status should not be changed", func() { - wantRONames := []placementv1beta1.NamespacedName{ - {Namespace: roNamespace, Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0)}, - } - crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) - Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "CRP %s status has been changed", crpName) - }) - }) var _ = Context("creating resourceOverride with different rules for each cluster to override configMap", Ordered, func() { @@ -317,7 +296,7 @@ var _ = Context("creating resourceOverride with different rules for each cluster Placement: &placementv1beta1.PlacementRef{ Name: crpName, // assigned CRP name }, - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -452,7 +431,7 @@ var _ = Context("creating resourceOverride and clusterResourceOverride, resource Namespace: roNamespace, }, Spec: placementv1beta1.ResourceOverrideSpec{ - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -543,7 +522,7 @@ var _ = Context("creating resourceOverride with incorrect path", Ordered, func() Placement: &placementv1beta1.PlacementRef{ Name: crpName, // assigned CRP name }, - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -614,7 +593,7 @@ var _ = Context("creating resourceOverride and resource becomes invalid after ov Placement: &placementv1beta1.PlacementRef{ Name: crpName, // assigned CRP name }, - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -674,7 +653,7 @@ var _ = Context("creating resourceOverride with a templated rules with cluster n Namespace: roNamespace, }, Spec: placementv1beta1.ResourceOverrideSpec{ - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -773,7 +752,7 @@ var _ = Context("creating resourceOverride with delete configMap", Ordered, func Namespace: roNamespace, }, Spec: placementv1beta1.ResourceOverrideSpec{ - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -893,7 +872,7 @@ var _ = Context("creating resourceOverride with a templated rules with cluster l Placement: &placementv1beta1.PlacementRef{ Name: crpName, // assigned CRP name }, - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -1005,7 +984,7 @@ var _ = Context("creating resourceOverride with a templated rules with cluster l ObservedGeneration: crp.Generation, }, } - if diff := cmp.Diff(crp.Status.Conditions, wantCondition, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status.Conditions, wantCondition, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP condition diff (-got, +want): %s", diff) } return nil @@ -1050,7 +1029,7 @@ var _ = Context("creating resourceOverride with non-exist label", Ordered, func( Placement: &placementv1beta1.PlacementRef{ Name: crpName, // assigned CRP name }, - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -1119,3 +1098,142 @@ var _ = Context("creating resourceOverride with non-exist label", Ordered, func( // This check will ignore the annotation of resources. It("should not place the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) }) + +var _ = Context("creating resourceOverride with namespace scope should not apply override", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + roNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + // Create the CRP. + createCRP(crpName) + // Create the ro with namespace scope. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: roNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: crpName, // assigned CRP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, roNamespace) + }) + + It("should update CRP status as expected without override", func() { + crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should not have override annotations on the configmap", func() { + for _, memberCluster := range allMemberClusters { + Expect(validateConfigMapNoAnnotationKeyOnCluster(memberCluster, roTestAnnotationKey)).Should(Succeed(), "Failed to validate no override annotation on config map on %s", memberCluster.ClusterName) + } + }) +}) + +var _ = Context("creating resourceOverride but namespace-only CRP should not apply override", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + roNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + // Create the namespace-only CRP. + createNamespaceOnlyCRP(crpName) + // Create the ro with cluster scope referring to the namespace-only CRP. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: roNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: crpName, // assigned CRP name + Scope: placementv1beta1.ClusterScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, roNamespace) + }) + + It("should update CRP status as expected without override", func() { + // Since the CRP is namespace-only, configMap is not placed, so no override should be applied. + crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, "0", nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + // This check will verify that only namespace is placed, not the configmap. + It("should place only the namespace on member clusters", checkIfPlacedNamespaceResourceOnAllMemberClusters) + + It("should not place the configmap on member clusters since CRP is namespace-only", func() { + for _, memberCluster := range allMemberClusters { + namespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + configMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + configMap := corev1.ConfigMap{} + err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: namespaceName}, &configMap) + Expect(errors.IsNotFound(err)).To(BeTrue(), "ConfigMap should not be placed on member cluster %s since CRP is namespace-only", memberCluster.ClusterName) + } + }) +}) diff --git a/test/e2e/placement_selecting_resources_test.go b/test/e2e/placement_selecting_resources_test.go index 8624698cd..0f8cba3f1 100644 --- a/test/e2e/placement_selecting_resources_test.go +++ b/test/e2e/placement_selecting_resources_test.go @@ -93,7 +93,7 @@ var _ = Describe("creating CRP and selecting resources by name", Ordered, func() It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -114,7 +114,7 @@ var _ = Describe("creating CRP and selecting resources by label", Ordered, func( Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -157,7 +157,7 @@ var _ = Describe("creating CRP and selecting resources by label", Ordered, func( It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -178,7 +178,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become selected a Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -243,7 +243,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become selected a It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -264,7 +264,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become unselected Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -328,7 +328,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become unselected }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -420,7 +420,7 @@ var _ = Describe("validating CRP when cluster-scoped and namespace-scoped resour It("should remove the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -508,7 +508,7 @@ var _ = Describe("validating CRP when adding resources in a matching namespace", It("should remove the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -601,7 +601,7 @@ var _ = Describe("validating CRP when deleting resources in a matching namespace It("should remove the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -620,7 +620,7 @@ var _ = Describe("validating CRP when selecting a reserved resource", Ordered, f Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -641,7 +641,7 @@ var _ = Describe("validating CRP when selecting a reserved resource", Ordered, f AfterAll(func() { By(fmt.Sprintf("deleting placement %s", crpName)) - cleanupCRP(crpName) + cleanupPlacement(types.NamespacedName{Name: crpName}) }) It("should update CRP status as expected", func() { @@ -661,7 +661,7 @@ var _ = Describe("validating CRP when selecting a reserved resource", Ordered, f }, }, } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -680,7 +680,7 @@ var _ = Describe("validating CRP when selecting a reserved resource", Ordered, f }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -702,7 +702,7 @@ var _ = Describe("When creating a pickN ClusterResourcePlacement with duplicated Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -743,7 +743,7 @@ var _ = Describe("When creating a pickN ClusterResourcePlacement with duplicated }, }, } - if diff := cmp.Diff(gotCRP.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(gotCRP.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -753,7 +753,7 @@ var _ = Describe("When creating a pickN ClusterResourcePlacement with duplicated It("updating the CRP to select one namespace", func() { gotCRP := &placementv1beta1.ClusterResourcePlacement{} Expect(hubClient.Get(ctx, types.NamespacedName{Name: crpName}, gotCRP)).Should(Succeed(), "Failed to get CRP %s", crpName) - gotCRP.Spec.ResourceSelectors = []placementv1beta1.ClusterResourceSelector{ + gotCRP.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -841,22 +841,22 @@ var _ = Describe("validating CRP when failed to apply resources", Ordered, func( Condition: metav1.Condition{ Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 0, }, }, }, - Conditions: resourcePlacementApplyFailedConditions(crp.Generation), + Conditions: perClusterApplyFailedConditions(crp.Generation), }, { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), + Conditions: perClusterRolloutCompletedConditions(crp.Generation, true, false), }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), + Conditions: perClusterRolloutCompletedConditions(crp.Generation, true, false), }, }, SelectedResources: []placementv1beta1.ResourceIdentifier{ @@ -874,7 +874,7 @@ var _ = Describe("validating CRP when failed to apply resources", Ordered, func( }, ObservedResourceIndex: "0", } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -899,7 +899,7 @@ var _ = Describe("validating CRP when failed to apply resources", Ordered, func( }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) @@ -941,7 +941,7 @@ var _ = Describe("validating CRP when placing cluster scope resource (other than Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Kind: "ClusterRole", @@ -1020,7 +1020,7 @@ var _ = Describe("validating CRP when placing cluster scope resource (other than }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -1041,7 +1041,7 @@ var _ = Describe("validating CRP revision history allowing single revision when Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -1078,7 +1078,7 @@ var _ = Describe("validating CRP revision history allowing single revision when return err } - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, placementv1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, placementv1beta1.ResourceSelectorTerm{ Group: "", Kind: "Namespace", Version: "v1", @@ -1114,7 +1114,7 @@ var _ = Describe("validating CRP revision history allowing single revision when It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -1135,7 +1135,7 @@ var _ = Describe("validating CRP revision history allowing multiple revisions wh Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -1171,7 +1171,7 @@ var _ = Describe("validating CRP revision history allowing multiple revisions wh return err } - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, placementv1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, placementv1beta1.ResourceSelectorTerm{ Group: "", Kind: "Namespace", Version: "v1", @@ -1207,7 +1207,7 @@ var _ = Describe("validating CRP revision history allowing multiple revisions wh It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -1232,7 +1232,7 @@ var _ = Describe("validating CRP when selected resources cross the 1MB limit", O PlacementType: placementv1beta1.PickFixedPlacementType, ClusterNames: []string{memberCluster1EastProdName, memberCluster2EastCanaryName}, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -1282,7 +1282,7 @@ var _ = Describe("validating CRP when selected resources cross the 1MB limit", O }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, largeEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -1367,7 +1367,7 @@ var _ = Describe("creating CRP and checking selected resources order", Ordered, Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", diff --git a/test/e2e/placement_with_custom_config_test.go b/test/e2e/placement_with_custom_config_test.go index 5aaff681e..f593386c3 100644 --- a/test/e2e/placement_with_custom_config_test.go +++ b/test/e2e/placement_with_custom_config_test.go @@ -54,7 +54,7 @@ var _ = Describe("validating CRP when using customized resourceSnapshotCreationM Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -137,7 +137,7 @@ var _ = Describe("validating CRP when using customized resourceSnapshotCreationM It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -165,7 +165,7 @@ var _ = Describe("validating that CRP status can be updated after updating the r Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -257,7 +257,7 @@ var _ = Describe("validating that CRP status can be updated after updating the r It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) diff --git a/test/e2e/resource_placement_pickall_test.go b/test/e2e/resource_placement_pickall_test.go new file mode 100644 index 000000000..900b37146 --- /dev/null +++ b/test/e2e/resource_placement_pickall_test.go @@ -0,0 +1,867 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/propertyprovider" + "go.goms.io/fleet/pkg/propertyprovider/azure" + "go.goms.io/fleet/test/e2e/framework" +) + +var _ = Describe("placing namespaced scoped resources using a RP with PickAll policy", Label("resourceplacement"), func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + + BeforeEach(OncePerOrdered, func() { + // Create the resources. + createWorkResources() + + // Create the CRP with Namespace-only selector. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + By("should update CRP status as expected") + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterEach(OncePerOrdered, func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, allMemberClusters) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("with no placement policy specified", Ordered, func() { + It("creating the RP should succeed", func() { + // Create the RP in the same namespace selecting namespaced resources with no placement policy. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + }) + + Context("with no affinities specified", Ordered, func() { + It("creating the RP should succeed", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + }) + + Context("with affinities, label selector only, updated", Ordered, func() { + It("creating the RP should succeed", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + envProd, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should place resources on matching clusters", func() { + // Verify that resources have been placed on the matching clusters. + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1EastProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on matching clusters") + }) + + It("can update the RP", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, rp); err != nil { + return err + } + + rp.Spec.Policy.Affinity = &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionWest, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + envProd, + }, + }, + }, + }, + }, + }, + }, + }, + } + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster3WestProdName}, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on matching clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster3WestProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on matching clusters") + }) + + It("should remove resources on previously matched clusters", func() { + checkIfRemovedConfigMapFromMemberClusters([]*framework.Cluster{memberCluster1EastProd}) + }) + }) + + Context("with affinities, label selector only, no matching clusters", Ordered, func() { + It("creating the RP should succeed", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionWest, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + envCanary, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), nil, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should not place resources on any cluster", checkIfRemovedConfigMapFromAllMemberClusters) + }) + + Context("with affinities, metric selector only", Ordered, func() { + It("creating the RP should succeed", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "3", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName, memberCluster3WestProdName}, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on matching clusters", func() { + targetClusters := []*framework.Cluster{memberCluster2EastCanary, memberCluster3WestProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("with affinities, metric selector only, updated", Ordered, func() { + It("creating the RP should succeed", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "3", + }, + }, + { + Name: propertyprovider.TotalCPUCapacityProperty, + Operator: placementv1beta1.PropertySelectorLessThan, + Values: []string{ + "10000", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName, memberCluster3WestProdName}, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on matching clusters", func() { + targetClusters := []*framework.Cluster{memberCluster2EastCanary, memberCluster3WestProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + + It("can update the RP", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, rp); err != nil { + return err + } + + rp.Spec.Policy.Affinity = &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "3", + }, + }, + { + Name: propertyprovider.TotalCPUCapacityProperty, + Operator: placementv1beta1.PropertySelectorLessThan, + Values: []string{ + "10000", + }, + }, + }, + }, + }, + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorEqualTo, + Values: []string{ + "4", + }, + }, + { + Name: propertyprovider.AvailableMemoryCapacityProperty, + Operator: placementv1beta1.PropertySelectorNotEqualTo, + Values: []string{ + "20000Gi", + }, + }, + }, + }, + }, + }, + }, + }, + } + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName, memberCluster3WestProdName}, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on matching clusters", func() { + targetClusters := []*framework.Cluster{memberCluster2EastCanary, memberCluster3WestProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("with affinities, metric selector only, no matching clusters", Ordered, func() { + It("creating the RP should succeed", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: azure.PerCPUCoreCostProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "0.01", + }, + }, + { + Name: propertyprovider.AllocatableCPUCapacityProperty, + Operator: placementv1beta1.PropertySelectorGreaterThan, + Values: []string{ + "10000", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), nil, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should not place resources on any cluster", checkIfRemovedConfigMapFromAllMemberClusters) + }) + + Context("with affinities, label and metric selectors", Ordered, func() { + It("creating the RP should succeed", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "3", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName}, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on matching clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster2EastCanary) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on matching clusters") + }) + }) + + Context("with affinities, label and metric selectors, updated", Ordered, func() { + It("creating the RP should succeed", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.AllocatableCPUCapacityProperty, + Operator: placementv1beta1.PropertySelectorLessThanOrEqualTo, + Values: []string{ + "10000", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName, memberCluster2EastCanaryName}, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on matching clusters", func() { + targetClusters := []*framework.Cluster{memberCluster1EastProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + + It("can update the RP", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, rp); err != nil { + return err + } + + rp.Spec.Policy.Affinity = &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + envCanary, + }, + }, + }, + }, + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.AllocatableMemoryCapacityProperty, + Operator: placementv1beta1.PropertySelectorLessThan, + Values: []string{ + "1Ki", + }, + }, + }, + }, + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabelName, + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{ + regionWest, + }, + }, + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + envProd, + }, + }, + }, + }, + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorEqualTo, + Values: []string{ + "2", + }, + }, + { + Name: propertyprovider.TotalMemoryCapacityProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "1Ki", + }, + }, + }, + }, + }, + }, + }, + }, + } + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName}, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on matching clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1EastProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on matching clusters") + }) + + It("should remove resources on previously matched clusters", func() { + checkIfRemovedConfigMapFromMemberClusters([]*framework.Cluster{memberCluster2EastCanary}) + }) + }) + + Context("with affinities, label and metric selectors, no matching clusters", Ordered, func() { + It("creating the RP should succeed", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: azure.PerGBMemoryCostProperty, + Operator: placementv1beta1.PropertySelectorEqualTo, + Values: []string{ + "0", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), nil, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should not place resources on any cluster", checkIfRemovedConfigMapFromAllMemberClusters) + }) +}) diff --git a/test/e2e/resource_placement_pickfixed_test.go b/test/e2e/resource_placement_pickfixed_test.go new file mode 100644 index 000000000..e1442d766 --- /dev/null +++ b/test/e2e/resource_placement_pickfixed_test.go @@ -0,0 +1,309 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/test/e2e/framework" +) + +var _ = Describe("placing namespaced scoped resources using an RP with PickFixed policy", Label("resourceplacement"), func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Name: rpName, Namespace: appNamespace().Name} + + BeforeEach(OncePerOrdered, func() { + // Create the resources. + createWorkResources() + + // Create the CRP with Namespace-only selector. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterEach(OncePerOrdered, func() { + ensureRPAndRelatedResourcesDeleted(rpKey, allMemberClusters) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("pick some clusters", Ordered, func() { + It("should create rp with pickFixed policy successfully", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1EastProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + }) + }) + + Context("refreshing target clusters", Ordered, func() { + It("should should create an RP with pickFixed policy successfully", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should place resources on the specified clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1EastProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters") + }) + + It("update RP to pick a different cluster", func() { + rp := &placementv1beta1.ResourcePlacement{} + Eventually(func() error { + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return err + } + rp.Spec.Policy.ClusterNames = []string{memberCluster2EastCanaryName} + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on newly specified clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster2EastCanary) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters") + }) + + It("should remove resources from previously specified clusters", func() { + checkIfRemovedConfigMapFromMemberClusters([]*framework.Cluster{memberCluster1EastProd}) + }) + }) + + Context("pick unhealthy and non-existent clusters", Ordered, func() { + It("should create RP with pickFixed policy targeting unhealthy and non-existent clusters", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster4UnhealthyName, + memberCluster5LeftName, + memberCluster6NonExistentName, + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), nil, []string{memberCluster4UnhealthyName, memberCluster5LeftName, memberCluster6NonExistentName}, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) + + Context("switch to another cluster to simulate stuck deleting works", Ordered, func() { + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + var currentConfigMap corev1.ConfigMap + + It("should create RP with pickFixed policy successfully", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on specified clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1EastProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters") + }) + + It("should add finalizer to work resources on the specified clusters", func() { + Eventually(func() error { + if err := memberCluster1EastProd.KubeClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: appConfigMapName}, ¤tConfigMap); err != nil { + return err + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to get configmap") + // Add finalizer to block deletion to simulate work stuck + controllerutil.AddFinalizer(¤tConfigMap, "example.com/finalizer") + Expect(memberCluster1EastProd.KubeClient.Update(ctx, ¤tConfigMap)).To(Succeed(), "Failed to update configmap with finalizer") + }) + + It("update RP to pick another cluster", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return err + } + rp.Spec.Policy.ClusterNames = []string{memberCluster2EastCanaryName} + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP") + }) + + It("should update RP status as expected", func() { + // should successfully apply to the new cluster + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should have a deletion timestamp on work objects", func() { + work := &placementv1beta1.Work{} + workName := fmt.Sprintf("%s.%s-work", rpKey.Namespace, rpName) + Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: fmt.Sprintf("fleet-member-%s", memberCluster1EastProdName), Name: workName}, work)).Should(Succeed(), "Failed to get work") + Expect(work.DeletionTimestamp).ShouldNot(BeNil(), "Work should have a deletion timestamp") + }) + + It("configmap should still exists on previously specified cluster and be in deleting state", func() { + configMap := &corev1.ConfigMap{} + Expect(memberCluster1EastProd.KubeClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: appConfigMapName}, configMap)).Should(Succeed(), "Failed to get configmap") + Expect(configMap.DeletionTimestamp).ShouldNot(BeNil(), "ConfigMap should have a deletion timestamp") + }) + + It("should remove finalizer from work resources on the specified clusters", func() { + configMap := &corev1.ConfigMap{} + Expect(memberCluster1EastProd.KubeClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: appConfigMapName}, configMap)).Should(Succeed(), "Failed to get configmap") + controllerutil.RemoveFinalizer(configMap, "example.com/finalizer") + Expect(memberCluster1EastProd.KubeClient.Update(ctx, configMap)).To(Succeed(), "Failed to update configmap with finalizer") + }) + + It("should remove resources from previously specified clusters", func() { + checkIfRemovedConfigMapFromMemberClusters([]*framework.Cluster{memberCluster1EastProd}) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) +}) diff --git a/test/e2e/resource_placement_pickn_test.go b/test/e2e/resource_placement_pickn_test.go new file mode 100644 index 000000000..b08ff148f --- /dev/null +++ b/test/e2e/resource_placement_pickn_test.go @@ -0,0 +1,889 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/propertyprovider" + "go.goms.io/fleet/pkg/propertyprovider/azure" + "go.goms.io/fleet/test/e2e/framework" +) + +var _ = Describe("placing namespaced scoped resources using a RP with PickN policy", Label("resourceplacement"), func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Name: rpName, Namespace: appNamespace().Name} + + BeforeEach(OncePerOrdered, func() { + // Create the resources. + createWorkResources() + + // Create the CRP with Namespace-only selector. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterEach(OncePerOrdered, func() { + ensureRPAndRelatedResourcesDeleted(rpKey, allMemberClusters) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("picking N clusters with no affinities/topology spread constraints (pick by cluster names in alphanumeric order)", Ordered, func() { + It("should create rp with pickN policy successfully", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(1)), + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster3WestProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster3WestProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + }) + }) + + Context("upscaling", Ordered, func() { + It("should create rp with pickN policy for upscaling test", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(1)), + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should place resources on the picked clusters", func() { + // Verify that resources have been placed on the picked clusters. + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster3WestProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + }) + + It("can upscale", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return err + } + + rp.Spec.Policy.NumberOfClusters = ptr.To(int32(2)) + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to upscale") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster3WestProdName, memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the newly picked clusters", func() { + targetClusters := []*framework.Cluster{memberCluster3WestProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("downscaling", Ordered, func() { + It("should create rp with pickN policy for downscaling test", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should place resources on the picked clusters", func() { + targetClusters := []*framework.Cluster{memberCluster3WestProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + + It("can downscale", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return err + } + + rp.Spec.Policy.NumberOfClusters = ptr.To(int32(1)) + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to downscale") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster3WestProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the newly picked clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster3WestProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + }) + + It("should remove resources from the downscaled clusters", func() { + checkIfRemovedConfigMapFromMemberClusters([]*framework.Cluster{memberCluster2EastCanary}) + }) + }) + + Context("picking N clusters with affinities and topology spread constraints", Ordered, func() { + It("should create rp with pickN policy and constraints successfully", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + // Note that due to limitations in the E2E environment, specifically the limited + // number of clusters available, the affinity and topology spread constraints + // specified here are validated only on a very superficial level, i.e., the flow + // functions. For further evaluations, specifically the correctness check + // of the affinity and topology spread constraint logic, see the scheduler + // integration tests. + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + }, + }, + }, + }, + }, + TopologySpreadConstraints: []placementv1beta1.TopologySpreadConstraint{ + { + MaxSkew: ptr.To(int32(1)), + TopologyKey: envLabelName, + WhenUnsatisfiable: placementv1beta1.DoNotSchedule, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName, memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + targetClusters := []*framework.Cluster{memberCluster1EastProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("affinities and topology spread constraints updated", Ordered, func() { + It("should create rp with initial constraints", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + }, + }, + }, + }, + }, + TopologySpreadConstraints: []placementv1beta1.TopologySpreadConstraint{ + { + MaxSkew: ptr.To(int32(1)), + TopologyKey: envLabelName, + WhenUnsatisfiable: placementv1beta1.DoNotSchedule, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should place resources on the picked clusters", func() { + targetClusters := []*framework.Cluster{memberCluster1EastProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + + It("can update the RP", func() { + // Specify new affinity and topology spread constraints. + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return err + } + + rp.Spec.Policy.Affinity = &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []placementv1beta1.PreferredClusterSelector{ + { + Weight: 20, + Preference: placementv1beta1.ClusterSelectorTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabelName: envProd, + }, + }, + }, + }, + }, + }, + } + rp.Spec.Policy.TopologySpreadConstraints = []placementv1beta1.TopologySpreadConstraint{ + { + MaxSkew: ptr.To(int32(1)), + TopologyKey: regionLabelName, + WhenUnsatisfiable: placementv1beta1.ScheduleAnyway, + }, + } + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP with new affinity and topology spread constraints") + }) + + // topology spread constraints takes a bit longer to be applied + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName, memberCluster3WestProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the newly picked clusters", func() { + targetClusters := []*framework.Cluster{memberCluster1EastProd, memberCluster3WestProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + + It("should remove resources from the unpicked clusters", func() { + checkIfRemovedConfigMapFromMemberClusters([]*framework.Cluster{memberCluster2EastCanary}) + }) + }) + + Context("not enough clusters to pick", Ordered, func() { + It("should create rp with pickN policy requesting more clusters than available", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + // This spec uses an RP of the PickN placement type with the number of + // target clusters equal to that of all clusters present in the environment. + // + // This is necessary as the RP controller reports status for unselected clusters + // only in a partial manner; specifically, for an RP of the PickN placement with + // N target clusters but only M matching clusters, only N - M decisions for + // unselected clusters will be reported in the RP status. To avoid + // undeterministic behaviors, here this value is set to make sure that all + // unselected clusters will be included in the status. + NumberOfClusters: ptr.To(int32(5)), + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName, memberCluster2EastCanaryName}, []string{memberCluster3WestProdName, memberCluster4UnhealthyName, memberCluster5LeftName}, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + targetClusters := []*framework.Cluster{memberCluster1EastProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("downscaling to zero", Ordered, func() { + It("should create rp with pickN policy for downscaling to zero test", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should place resources on the picked clusters", func() { + targetClusters := []*framework.Cluster{memberCluster3WestProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + + It("can downscale", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return err + } + + rp.Spec.Policy.NumberOfClusters = ptr.To(int32(0)) + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to downscale") + }) + + It("should remove resources from the downscaled clusters", func() { + downscaledClusters := []*framework.Cluster{memberCluster3WestProd, memberCluster2EastCanary} + checkIfRemovedConfigMapFromMemberClusters(downscaledClusters) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), nil, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) + + Context("picking N clusters with single property sorter", Ordered, func() { + It("should create rp with pickN policy and single property sorter", func() { + // Have to add this check in each It() spec, instead of using BeforeAll(). + // Otherwise, the AfterEach() would be skipped too and the namespace does not get cleaned up. + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + // Note that due to limitations in the E2E environment, specifically the limited + // number of clusters available, the affinity and topology spread constraints + // specified here are validated only on a very superficial level, i.e., the flow + // functions. For further evaluations, specifically the correctness check + // of the affinity and topology spread constraint logic, see the scheduler + // integration tests. + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []placementv1beta1.PreferredClusterSelector{ + { + Weight: 20, + Preference: placementv1beta1.ClusterSelectorTerm{ + PropertySorter: &placementv1beta1.PropertySorter{ + Name: propertyprovider.NodeCountProperty, + SortOrder: placementv1beta1.Ascending, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName, memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + targetClusters := []*framework.Cluster{memberCluster1EastProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("picking N clusters with multiple property sorters", Ordered, func() { + It("should create rp with pickN policy and multiple property sorters", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + // Note that due to limitations in the E2E environment, specifically the limited + // number of clusters available, the affinity and topology spread constraints + // specified here are validated only on a very superficial level, i.e., the flow + // functions. For further evaluations, specifically the correctness check + // of the affinity and topology spread constraint logic, see the scheduler + // integration tests. + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []placementv1beta1.PreferredClusterSelector{ + { + Weight: 20, + Preference: placementv1beta1.ClusterSelectorTerm{ + PropertySorter: &placementv1beta1.PropertySorter{ + Name: propertyprovider.NodeCountProperty, + SortOrder: placementv1beta1.Ascending, + }, + }, + }, + { + Weight: 20, + Preference: placementv1beta1.ClusterSelectorTerm{ + PropertySorter: &placementv1beta1.PropertySorter{ + Name: propertyprovider.AvailableMemoryCapacityProperty, + SortOrder: placementv1beta1.Descending, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster3WestProdName, memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + targetClusters := []*framework.Cluster{memberCluster3WestProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("picking N clusters with label selector and property sorter", Ordered, func() { + It("should create rp with pickN policy, label selector and property sorter", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + // Note that due to limitations in the E2E environment, specifically the limited + // number of clusters available, the affinity and topology spread constraints + // specified here are validated only on a very superficial level, i.e., the flow + // functions. For further evaluations, specifically the correctness check + // of the affinity and topology spread constraint logic, see the scheduler + // integration tests. + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []placementv1beta1.PreferredClusterSelector{ + { + Weight: 20, + Preference: placementv1beta1.ClusterSelectorTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + PropertySorter: &placementv1beta1.PropertySorter{ + Name: propertyprovider.NodeCountProperty, + SortOrder: placementv1beta1.Ascending, + }, + }, + }, + { + Weight: 20, + Preference: placementv1beta1.ClusterSelectorTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabelName: envCanary, + }, + }, + PropertySorter: &placementv1beta1.PropertySorter{ + Name: propertyprovider.AvailableMemoryCapacityProperty, + SortOrder: placementv1beta1.Descending, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName, memberCluster1EastProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + targetClusters := []*framework.Cluster{memberCluster2EastCanary, memberCluster1EastProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("picking N clusters with required and preferred affinity terms", Ordered, func() { + It("should create rp with pickN policy, required and preferred affinity terms", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(1)), + // Note that due to limitations in the E2E environment, specifically the limited + // number of clusters available, the affinity and topology spread constraints + // specified here are validated only on a very superficial level, i.e., the flow + // functions. For further evaluations, specifically the correctness check + // of the affinity and topology spread constraint logic, see the scheduler + // integration tests. + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabelName: envProd, + }, + }, + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: azure.PerCPUCoreCostProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "0", + }, + }, + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorNotEqualTo, + Values: []string{ + "3", + }, + }, + }, + }, + }, + }, + }, + PreferredDuringSchedulingIgnoredDuringExecution: []placementv1beta1.PreferredClusterSelector{ + { + Weight: 30, + Preference: placementv1beta1.ClusterSelectorTerm{ + PropertySorter: &placementv1beta1.PropertySorter{ + Name: propertyprovider.NodeCountProperty, + SortOrder: placementv1beta1.Ascending, + }, + }, + }, + { + Weight: 40, + Preference: placementv1beta1.ClusterSelectorTerm{ + PropertySorter: &placementv1beta1.PropertySorter{ + Name: propertyprovider.AvailableMemoryCapacityProperty, + SortOrder: placementv1beta1.Descending, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster3WestProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + targetClusters := []*framework.Cluster{memberCluster3WestProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) +}) diff --git a/test/e2e/resource_placement_ro_test.go b/test/e2e/resource_placement_ro_test.go new file mode 100644 index 000000000..edd6159c4 --- /dev/null +++ b/test/e2e/resource_placement_ro_test.go @@ -0,0 +1,1174 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package e2e + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + scheduler "go.goms.io/fleet/pkg/scheduler/framework" + "go.goms.io/fleet/pkg/utils/condition" +) + +var _ = Describe("placing namespaced scoped resources using a RP with ResourceOverride", Label("resourceplacement"), func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + + BeforeEach(OncePerOrdered, func() { + By("creating work resources") + createWorkResources() + + // Create the CRP with Namespace-only selector. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + By("should update CRP status as expected") + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterEach(OncePerOrdered, func() { + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("creating resourceOverride (selecting all clusters) to override configMap for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + + // Create the ro. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: rpName, // assigned RP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + + By("should update RP status to not select any override") + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, nil) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s/%s status as expected", workNamespace, rpName) + + By("should not have annotations on the configmap") + for _, memberCluster := range allMemberClusters { + Expect(validateConfigMapNoAnnotationKeyOnCluster(memberCluster, roTestAnnotationKey)).Should(Succeed(), "Failed to remove the annotation of config map on %s", memberCluster.ClusterName) + } + + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + }) + + It("should update RP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0)}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should place the resources on all member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have override annotations on the configmap", func() { + want := map[string]string{roTestAnnotationKey: roTestAnnotationValue} + checkIfOverrideAnnotationsOnAllMemberClusters(false, want) + }) + + It("update ro and change annotation value", func() { + Eventually(func() error { + ro := &placementv1beta1.ResourceOverride{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: roName, Namespace: workNamespace}, ro); err != nil { + return err + } + ro.Spec = placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: rpName, // assigned RP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, roTestAnnotationKey, roTestAnnotationValue1))}, + }, + }, + }, + }, + }, + } + return hubClient.Update(ctx, ro) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) + }) + + It("should update RP status as expected after RO update", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 1)}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters after RO update", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have updated override annotations on the configmap", func() { + want := map[string]string{roTestAnnotationKey: roTestAnnotationValue1} + checkIfOverrideAnnotationsOnAllMemberClusters(false, want) + }) + + It("update ro and no update on the configmap itself", func() { + Eventually(func() error { + ro := &placementv1beta1.ResourceOverride{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: roName, Namespace: workNamespace}, ro); err != nil { + return err + } + ro.Spec.Policy.OverrideRules = append(ro.Spec.Policy.OverrideRules, placementv1beta1.OverrideRule{ + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "invalid-key": "invalid-value", + }, + }, + }, + }, + }, + OverrideType: placementv1beta1.DeleteOverrideType, + }) + return hubClient.Update(ctx, ro) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", crpName) + }) + + It("should refresh the RP status even as there is no change on the resources", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 2)}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have override annotations on the configmap", func() { + want := map[string]string{roTestAnnotationKey: roTestAnnotationValue1} + checkIfOverrideAnnotationsOnAllMemberClusters(false, want) + }) + }) + + Context("creating resourceOverride with multiple jsonPatchOverrides to override configMap for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) + + BeforeAll(func() { + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: fmt.Sprintf("/metadata/annotations/%s", roTestAnnotationKey1), + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s"`, roTestAnnotationValue1))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + // wait until the snapshot is created so that the observed resource index is predictable. + Eventually(func() error { + roSnap := &placementv1beta1.ResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: roSnapShotName, Namespace: workNamespace}, roSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: roSnapShotName}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have override annotations on the configmap", func() { + wantAnnotations := map[string]string{roTestAnnotationKey: roTestAnnotationValue, roTestAnnotationKey1: roTestAnnotationValue1} + checkIfOverrideAnnotationsOnAllMemberClusters(false, wantAnnotations) + }) + }) + + Context("creating resourceOverride with different rules for each cluster to override configMap for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + + // Create the ro. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: rpName, // assigned RP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{regionLabelName: regionEast, envLabelName: envProd}, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s-0"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + }, + }, + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{regionLabelName: regionEast, envLabelName: envCanary}, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s-1"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + }, + }, + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{regionLabelName: regionWest, envLabelName: envProd}, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s-2"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0)}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have override annotations on the configmap", func() { + for i, cluster := range allMemberClusters { + wantAnnotations := map[string]string{roTestAnnotationKey: fmt.Sprintf("%s-%d", roTestAnnotationValue, i)} + Expect(validateOverrideAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) + } + }) + }) + + Context("creating resourceOverride with incorrect path for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) + + BeforeAll(func() { + // Create the bad ro. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: rpName, // assigned RP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: fmt.Sprintf("/metadata/annotations/%s", roTestAnnotationKey), + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s"`, roTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating the bad resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + // wait until the snapshot is created so that failed override won't block the rollout + Eventually(func() error { + roSnap := &placementv1beta1.ResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: roSnapShotName, Namespace: workNamespace}, roSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) + + // Create the RP later + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as failed to override", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: roSnapShotName}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedFailedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should not place the selected resources on member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + }) + + Context("creating resourceOverride and resource becomes invalid after override for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + + // Create the ro. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: rpName, // assigned RP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s"`, roTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0)}, + } + rpStatusUpdatedActual := rpStatusWithWorkSynchronizedUpdatedFailedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should not place the selected resources on member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + }) + + Context("creating resourceOverride with templated rules with cluster name to override configMap for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) + + BeforeAll(func() { + // Create the ro before rp so that the observed resource index is predictable. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpReplace, + Path: "/data/data", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s"`, placementv1beta1.OverrideClusterNameVariable))}, + }, + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/data/newField", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"new-%s"`, placementv1beta1.OverrideClusterNameVariable))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + Eventually(func() error { + roSnap := &placementv1beta1.ResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: roSnapShotName, Namespace: workNamespace}, roSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: roSnapShotName}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should have override configMap on the member clusters", func() { + cmName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + cmNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + for _, cluster := range allMemberClusters { + wantConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: cmNamespace, + }, + Data: map[string]string{ + "data": cluster.ClusterName, + "newField": fmt.Sprintf("new-%s", cluster.ClusterName), + }, + } + configMapActual := configMapPlacedOnClusterActual(cluster, wantConfigMap) + Eventually(configMapActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update configmap %s data as expected", cmName) + } + }) + }) + + Context("creating resourceOverride with delete configMap for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) + + BeforeAll(func() { + // Create the ro before rp so that the observed resource index is predictable. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{regionLabelName: regionEast}, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + }, + }, + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{regionLabelName: regionWest}, + }, + }, + }, + }, + OverrideType: placementv1beta1.DeleteOverrideType, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + Eventually(func() error { + roSnap := &placementv1beta1.ResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: roSnapShotName, Namespace: workNamespace}, roSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: roSnapShotName}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the configmap on member clusters that are patched", func() { + for idx := 0; idx < 2; idx++ { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("should have override annotations on the configmap on the member clusters that are patched", func() { + for idx := 0; idx < 2; idx++ { + cluster := allMemberClusters[idx] + wantAnnotations := map[string]string{roTestAnnotationKey: roTestAnnotationValue} + Expect(validateOverrideAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) + } + }) + + It("should not place the configmap on the member clusters that are deleted", func() { + memberCluster := allMemberClusters[2] + Consistently(func() bool { + namespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + configMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + configMap := corev1.ConfigMap{} + err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: namespaceName}, &configMap) + return errors.IsNotFound(err) + }, consistentlyDuration, consistentlyInterval).Should(BeTrue(), "Failed to delete work resources on member cluster %s", memberCluster.ClusterName) + }) + }) + + Context("creating resourceOverride with templated rules with cluster label key replacement for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the ro before rp so that the observed resource index is predictable. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: rpName, // assigned RP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/data/region", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s%s}"`, placementv1beta1.OverrideClusterLabelKeyVariablePrefix, regionLabelName))}, + }, + { + Operator: placementv1beta1.JSONPatchOverrideOpReplace, + Path: "/data/data", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"newdata-%s%s}"`, placementv1beta1.OverrideClusterLabelKeyVariablePrefix, envLabelName))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0)}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should replace the cluster label key in the configMap", func() { + cmName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + cmNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + for _, cluster := range allMemberClusters { + wantConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: cmNamespace, + }, + Data: map[string]string{ + "data": fmt.Sprintf("newdata-%s", labelsByClusterName[cluster.ClusterName][envLabelName]), + "region": labelsByClusterName[cluster.ClusterName][regionLabelName], + }, + } + configMapActual := configMapPlacedOnClusterActual(cluster, wantConfigMap) + Eventually(configMapActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update configmap %s data as expected", cmName) + } + }) + + It("should handle non-existent cluster label key gracefully", func() { + By("Update the ResourceOverride to use a non-existent label key") + Eventually(func() error { + ro := &placementv1beta1.ResourceOverride{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: roName, Namespace: workNamespace}, ro); err != nil { + return err + } + ro.Spec.Policy.OverrideRules[0].JSONPatchOverrides[0].Value = apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%snon-existent-label}"`, placementv1beta1.OverrideClusterLabelKeyVariablePrefix))} + return hubClient.Update(ctx, ro) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update resourceOverride %s with non-existent label key", roName) + + By("Verify the RP status should have one cluster failed to override while the rest stuck in rollout") + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: workNamespace}, rp); err != nil { + return err + } + wantCondition := []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: rp.Generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionFalse, + Reason: condition.RolloutNotStartedYetReason, + ObservedGeneration: rp.Generation, + }, + } + if diff := cmp.Diff(rp.Status.Conditions, wantCondition, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("RP condition diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "RP %s failed to show the override failed and stuck in rollout", rpName) + + By("Verify the configMap remains unchanged") + cmName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + cmNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + for _, cluster := range allMemberClusters { + wantConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: cmNamespace, + }, + Data: map[string]string{ + "data": fmt.Sprintf("newdata-%s", labelsByClusterName[cluster.ClusterName][envLabelName]), + "region": labelsByClusterName[cluster.ClusterName][regionLabelName], + }, + } + configMapActual := configMapPlacedOnClusterActual(cluster, wantConfigMap) + Consistently(configMapActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "ConfigMap %s should remain unchanged", cmName) + } + }) + }) + + Context("creating resourceOverride with non-exist label for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) + + BeforeAll(func() { + // Create the bad ro. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: rpName, // assigned RP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/data/region", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s%s}"`, placementv1beta1.OverrideClusterLabelKeyVariablePrefix, "non-existent-label"))}, + }, + { + Operator: placementv1beta1.JSONPatchOverrideOpReplace, + Path: "/data/data", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"newdata-%s%s}"`, placementv1beta1.OverrideClusterLabelKeyVariablePrefix, envLabelName))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating the bad resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + Eventually(func() error { + roSnap := &placementv1beta1.ResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: roSnapShotName, Namespace: workNamespace}, roSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) + + // Create the RP later so that failed override won't block the rollout + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as failed to override", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: roSnapShotName}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedFailedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should not place the selected resources on member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + }) +}) diff --git a/test/e2e/resource_placement_rollout_test.go b/test/e2e/resource_placement_rollout_test.go new file mode 100644 index 000000000..b537b36d3 --- /dev/null +++ b/test/e2e/resource_placement_rollout_test.go @@ -0,0 +1,1193 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/utils" + "go.goms.io/fleet/pkg/utils/condition" + testv1alpha1 "go.goms.io/fleet/test/apis/v1alpha1" + "go.goms.io/fleet/test/utils/controller" +) + +const ( + valFoo1 = "foo1" + valBar1 = "bar1" +) + +var ( + testDaemonSet appv1.DaemonSet + testStatefulSet appv1.StatefulSet + testService corev1.Service + testJob batchv1.Job + testCustomResource testv1alpha1.TestResource +) + +var _ = Describe("placing namespaced scoped resources using a RP with rollout", Label("resourceplacement"), func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Name: rpName, Namespace: appNamespace().Name} + + BeforeEach(OncePerOrdered, func() { + testDeployment = appv1.Deployment{} + readDeploymentTestManifest(&testDeployment) + testDaemonSet = appv1.DaemonSet{} + readDaemonSetTestManifest(&testDaemonSet) + testStatefulSet = appv1.StatefulSet{} + readStatefulSetTestManifest(&testStatefulSet, false) + testService = corev1.Service{} + readServiceTestManifest(&testService) + testJob = batchv1.Job{} + readJobTestManifest(&testJob) + + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + crpStatusUpdatedActual := crpStatusUpdatedActual(nil, allMemberClusterNames, nil, "0") // nil as no resources created yet + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterEach(OncePerOrdered, func() { + // Remove the custom deletion blocker finalizer from the RP and CRP. + ensureRPAndRelatedResourcesDeleted(rpKey, allMemberClusters, &testDeployment, &testDaemonSet, &testStatefulSet, &testService, &testJob) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("Test an RP place enveloped objects successfully", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + var testDeploymentEnvelope placementv1beta1.ResourceEnvelope + + BeforeAll(func() { + readEnvelopeResourceTestManifest(&testDeploymentEnvelope) + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testDeploymentEnvelope.Name, + Namespace: workNamespace.Name, + }, + } + }) + + It("Create the wrapped deployment resources in the namespace", func() { + createWrappedResourcesForRollout(&testDeploymentEnvelope, &testDeployment, utils.DeploymentKind, workNamespace) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("Create the RP that select the enveloped objects", func() { + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace.Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testDeploymentEnvelope.Name, + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(wantSelectedResources, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForDeploymentPlacementToReady(memberCluster, &testDeployment) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("should mark the work as available", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + var works placementv1beta1.WorkList + listOpts := []client.ListOption{ + client.InNamespace(fmt.Sprintf(utils.NamespaceNameFormat, memberCluster.ClusterName)), + // This test spec runs in parallel with other suites; there might be unrelated + // Work objects in the namespace. + client.MatchingLabels{ + placementv1beta1.PlacementTrackingLabel: rpName, + placementv1beta1.ParentNamespaceLabel: workNamespace.Name, + }, + } + Eventually(func() string { + if err := hubClient.List(ctx, &works, listOpts...); err != nil { + return err.Error() + } + for i := range works.Items { + work := works.Items[i] + wantConditions := []metav1.Condition{ + { + Type: placementv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: condition.WorkAllManifestsAppliedReason, + ObservedGeneration: 1, + }, + { + Type: placementv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: condition.WorkAllManifestsAvailableReason, + ObservedGeneration: 1, + }, + } + diff := controller.CompareConditions(wantConditions, work.Status.Conditions) + if len(diff) != 0 { + return diff + } + } + if len(works.Items) == 0 { + return "no available work found" + } + return "" + }, eventuallyDuration, eventuallyInterval).Should(BeEmpty(), + "work condition mismatch for work %s (-want, +got):", memberCluster.ClusterName) + } + }) + }) + + Context("Test an RP place workload objects successfully, block rollout based on deployment availability", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + + BeforeAll(func() { + // Create the test resources. + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: appv1.SchemeGroupVersion.Group, + Version: appv1.SchemeGroupVersion.Version, + Kind: utils.DeploymentKind, + Name: testDeployment.Name, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the deployment resource in the namespace", func() { + Expect(hubClient.Create(ctx, &workNamespace)).To(Succeed(), "Failed to create namespace %s", workNamespace.Name) + testDeployment.Namespace = workNamespace.Name + Expect(hubClient.Create(ctx, &testDeployment)).To(Succeed(), "Failed to create test deployment %s", testDeployment.Name) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("create the RP that select the deployment", func() { + rp := buildRPForSafeRollout(workNamespace.Name) + rp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ + { + Group: appv1.SchemeGroupVersion.Group, + Kind: utils.DeploymentKind, + Version: appv1.SchemeGroupVersion.Version, + Name: testDeployment.Name, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(wantSelectedResources, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForDeploymentPlacementToReady(memberCluster, &testDeployment) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("change the image name in deployment, to make it unavailable", func() { + Eventually(func() error { + var dep appv1.Deployment + err := hubClient.Get(ctx, types.NamespacedName{Name: testDeployment.Name, Namespace: testDeployment.Namespace}, &dep) + if err != nil { + return err + } + dep.Spec.Template.Spec.Containers[0].Image = randomImageName + return hubClient.Update(ctx, &dep) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to change the image name in deployment") + }) + + It("should update RP status as expected", func() { + failedDeploymentResourceIdentifier := placementv1beta1.ResourceIdentifier{ + Group: appv1.SchemeGroupVersion.Group, + Version: appv1.SchemeGroupVersion.Version, + Kind: utils.DeploymentKind, + Name: testDeployment.Name, + Namespace: testDeployment.Namespace, + } + rpStatusActual := safeRolloutWorkloadRPStatusUpdatedActual(wantSelectedResources, failedDeploymentResourceIdentifier, allMemberClusterNames, "1", 2) + Eventually(rpStatusActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) + + Context("Test an RP place workload objects successfully, block rollout based on daemonset availability", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + var testDaemonSetEnvelope placementv1beta1.ResourceEnvelope + + BeforeAll(func() { + // Create the test resources. + readEnvelopeResourceTestManifest(&testDaemonSetEnvelope) + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testDaemonSetEnvelope.Name, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the daemonset resource in the namespace", func() { + createWrappedResourcesForRollout(&testDaemonSetEnvelope, &testDaemonSet, utils.DaemonSetKind, workNamespace) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("create the RP that select the enveloped daemonset", func() { + rp := buildRPForSafeRollout(workNamespace.Name) + rp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testDaemonSetEnvelope.Name, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(wantSelectedResources, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForDaemonSetPlacementToReady(memberCluster, &testDaemonSet) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("change the image name in daemonset, to make it unavailable", func() { + Eventually(func() error { + testDaemonSet.Spec.Template.Spec.Containers[0].Image = randomImageName + daemonSetByte, err := json.Marshal(testDaemonSet) + if err != nil { + return nil + } + testDaemonSetEnvelope.Data["daemonset.yaml"] = runtime.RawExtension{Raw: daemonSetByte} + return hubClient.Update(ctx, &testDaemonSetEnvelope) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to change the image name of daemonset in envelope object") + }) + + It("should update RP status as expected", func() { + failedDaemonSetResourceIdentifier := placementv1beta1.ResourceIdentifier{ + Group: appv1.SchemeGroupVersion.Group, + Version: appv1.SchemeGroupVersion.Version, + Kind: utils.DaemonSetKind, + Name: testDaemonSet.Name, + Namespace: testDaemonSet.Namespace, + Envelope: &placementv1beta1.EnvelopeIdentifier{ + Name: testDaemonSetEnvelope.Name, + Namespace: testDaemonSetEnvelope.Namespace, + Type: placementv1beta1.ResourceEnvelopeType, + }, + } + rpStatusActual := safeRolloutWorkloadRPStatusUpdatedActual(wantSelectedResources, failedDaemonSetResourceIdentifier, allMemberClusterNames, "1", 2) + Eventually(rpStatusActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) + + Context("Test an RP place workload objects successfully, block rollout based on statefulset availability", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + var testStatefulSetEnvelope placementv1beta1.ResourceEnvelope + + BeforeAll(func() { + // Create the test resources. + readEnvelopeResourceTestManifest(&testStatefulSetEnvelope) + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testStatefulSetEnvelope.Name, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the statefulset resource in the namespace", func() { + createWrappedResourcesForRollout(&testStatefulSetEnvelope, &testStatefulSet, utils.StatefulSetKind, workNamespace) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("create the RP that select the enveloped statefulset", func() { + rp := buildRPForSafeRollout(workNamespace.Name) + rp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testStatefulSetEnvelope.Name, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(wantSelectedResources, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, 2*workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForStatefulSetPlacementToReady(memberCluster, &testStatefulSet) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("change the image name in statefulset, to make it unavailable", func() { + Eventually(func() error { + testStatefulSet.Spec.Template.Spec.Containers[0].Image = randomImageName + statefulSetByte, err := json.Marshal(testStatefulSet) + if err != nil { + return nil + } + testStatefulSetEnvelope.Data["statefulset.yaml"] = runtime.RawExtension{Raw: statefulSetByte} + return hubClient.Update(ctx, &testStatefulSetEnvelope) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to change the image name in statefulset") + }) + + It("should update RP status as expected", func() { + failedStatefulSetResourceIdentifier := placementv1beta1.ResourceIdentifier{ + Group: appv1.SchemeGroupVersion.Group, + Version: appv1.SchemeGroupVersion.Version, + Kind: utils.StatefulSetKind, + Name: testStatefulSet.Name, + Namespace: testStatefulSet.Namespace, + Envelope: &placementv1beta1.EnvelopeIdentifier{ + Name: testStatefulSetEnvelope.Name, + Namespace: testStatefulSetEnvelope.Namespace, + Type: placementv1beta1.ResourceEnvelopeType, + }, + } + rpStatusActual := safeRolloutWorkloadRPStatusUpdatedActual(wantSelectedResources, failedStatefulSetResourceIdentifier, allMemberClusterNames, "1", 2) + Eventually(rpStatusActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) + + Context("Test an RP place workload objects successfully, block rollout based on service availability", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + + BeforeAll(func() { + // Create the test resources. + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Kind: utils.ServiceKind, + Name: testService.Name, + Version: corev1.SchemeGroupVersion.Version, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the service resource in the namespace", func() { + Expect(hubClient.Create(ctx, &workNamespace)).To(Succeed(), "Failed to create namespace %s", workNamespace.Name) + testService.Namespace = workNamespace.Name + Expect(hubClient.Create(ctx, &testService)).To(Succeed(), "Failed to create test service %s", testService.Name) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("create the RP that select the service", func() { + rp := buildRPForSafeRollout(workNamespace.Name) + rp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ + { + Kind: utils.ServiceKind, + Version: corev1.SchemeGroupVersion.Version, + Name: testService.Name, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(wantSelectedResources, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForServiceToReady(memberCluster, &testService) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("change service to LoadBalancer, to make it unavailable", func() { + Eventually(func() error { + var service corev1.Service + err := hubClient.Get(ctx, types.NamespacedName{Name: testService.Name, Namespace: testService.Namespace}, &service) + if err != nil { + return err + } + service.Spec.Type = corev1.ServiceTypeLoadBalancer + return hubClient.Update(ctx, &service) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to change the service type to LoadBalancer") + }) + + It("should update RP status as expected", func() { + failedServiceResourceIdentifier := placementv1beta1.ResourceIdentifier{ + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, + Kind: utils.ServiceKind, + Name: testService.Name, + Namespace: testService.Namespace, + } + // failedResourceObservedGeneration is set to 0 because generation is not populated for service. + rpStatusActual := safeRolloutWorkloadRPStatusUpdatedActual(wantSelectedResources, failedServiceResourceIdentifier, allMemberClusterNames, "1", 0) + Eventually(rpStatusActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) + + Context("Test an RP place workload successful and update it to be failed and then delete the resource snapshot,"+ + "rollout should eventually be successful after we correct the image", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + + BeforeAll(func() { + // Create the test resources. + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: appv1.SchemeGroupVersion.Group, + Version: appv1.SchemeGroupVersion.Version, + Kind: utils.DeploymentKind, + Name: testDeployment.Name, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the deployment resource in the namespace", func() { + Expect(hubClient.Create(ctx, &workNamespace)).To(Succeed(), "Failed to create namespace %s", workNamespace.Name) + testDeployment.Namespace = workNamespace.Name + Expect(hubClient.Create(ctx, &testDeployment)).To(Succeed(), "Failed to create test deployment %s", testDeployment.Name) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("create the RP that select the deployment", func() { + rp := buildRPForSafeRollout(workNamespace.Name) + rp.Spec.RevisionHistoryLimit = ptr.To(int32(1)) + rp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ + { + Group: appv1.SchemeGroupVersion.Group, + Kind: utils.DeploymentKind, + Version: appv1.SchemeGroupVersion.Version, + Name: testDeployment.Name, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(wantSelectedResources, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForDeploymentPlacementToReady(memberCluster, &testDeployment) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("change the image name in deployment, to make it unavailable", func() { + Eventually(func() error { + var dep appv1.Deployment + err := hubClient.Get(ctx, types.NamespacedName{Name: testDeployment.Name, Namespace: testDeployment.Namespace}, &dep) + if err != nil { + return err + } + dep.Spec.Template.Spec.Containers[0].Image = randomImageName + return hubClient.Update(ctx, &dep) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to change the image name in deployment") + }) + + It("should update RP status on deployment failed as expected", func() { + failedDeploymentResourceIdentifier := placementv1beta1.ResourceIdentifier{ + Group: appv1.SchemeGroupVersion.Group, + Version: appv1.SchemeGroupVersion.Version, + Kind: utils.DeploymentKind, + Name: testDeployment.Name, + Namespace: testDeployment.Namespace, + } + rpStatusActual := safeRolloutWorkloadRPStatusUpdatedActual(wantSelectedResources, failedDeploymentResourceIdentifier, allMemberClusterNames, "1", 2) + Eventually(rpStatusActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("update work to trigger a work generator reconcile", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx].ClusterName + namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, memberCluster) + workName := fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, fmt.Sprintf(placementv1beta1.WorkNameBaseFmt, workNamespace.Name, rpName)) + work := placementv1beta1.Work{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: namespaceName}, &work)).Should(Succeed(), "Failed to get the work") + if work.Status.ManifestConditions != nil { + work.Status.ManifestConditions = nil + } else { + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{ + Type: placementv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + Reason: "WorkNotAvailable", + }) + } + Expect(hubClient.Status().Update(ctx, &work)).Should(Succeed(), "Failed to update the work") + } + }) + + It("change the image name in deployment, to roll over the resourcesnapshot", func() { + rsList := &placementv1beta1.ResourceSnapshotList{} + listOptions := &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: rpName}), + Namespace: workNamespace.Name, + } + Expect(hubClient.List(ctx, rsList, listOptions)).Should(Succeed(), "Failed to list the resourcesnapshot") + Expect(len(rsList.Items) == 1).Should(BeTrue()) + oldRS := rsList.Items[0].Name + Expect(hubClient.Get(ctx, types.NamespacedName{Name: testDeployment.Name, Namespace: testDeployment.Namespace}, &testDeployment)).Should(Succeed(), "Failed to get deployment") + testDeployment.Spec.Template.Spec.Containers[0].Image = "extra-snapshot" + Expect(hubClient.Update(ctx, &testDeployment)).Should(Succeed(), "Failed to change the image name in deployment") + // wait for the new resourcesnapshot to be created + Eventually(func() bool { + Expect(hubClient.List(ctx, rsList, listOptions)).Should(Succeed(), "Failed to list the resourcesnapshot") + Expect(len(rsList.Items) == 1).Should(BeTrue()) + return rsList.Items[0].Name != oldRS + }, eventuallyDuration, eventuallyInterval).Should(BeTrue(), "Failed to remove the old resourcesnapshot") + }) + + It("update work to trigger a work generator reconcile", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx].ClusterName + namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, memberCluster) + workName := fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, fmt.Sprintf(placementv1beta1.WorkNameBaseFmt, workNamespace.Name, rpName)) + Eventually(func() error { + work := placementv1beta1.Work{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: namespaceName}, &work); err != nil { + return err + } + if work.Status.ManifestConditions != nil { + work.Status.ManifestConditions = nil + } else { + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{ + Type: placementv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + Reason: "WorkNotAvailable", + }) + } + return hubClient.Status().Update(ctx, &work) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the work") + } + }) + + It("change the image name in deployment, to make it available again", func() { + Eventually(func() error { + err := hubClient.Get(ctx, types.NamespacedName{Name: testDeployment.Name, Namespace: testDeployment.Namespace}, &testDeployment) + if err != nil { + return err + } + testDeployment.Spec.Template.Spec.Containers[0].Image = "nginx:1.26.2" + return hubClient.Update(ctx, &testDeployment) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to change the image name in deployment") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForDeploymentPlacementToReady(memberCluster, &testDeployment) + Eventually(workResourcesPlacedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + }) + + Context("Test an RP place workload objects successfully, don't block rollout based on job availability", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + unAvailablePeriodSeconds := 15 + + BeforeAll(func() { + // Create the test resources. + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: batchv1.SchemeGroupVersion.Group, + Version: batchv1.SchemeGroupVersion.Version, + Kind: utils.JobKind, + Name: testJob.Name, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the job resource in the namespace", func() { + Expect(hubClient.Create(ctx, &workNamespace)).To(Succeed(), "Failed to create namespace %s", workNamespace.Name) + testJob.Namespace = workNamespace.Name + Expect(hubClient.Create(ctx, &testJob)).To(Succeed(), "Failed to create test job %s", testJob.Name) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("create the RP that select the job", func() { + rp := buildRPForSafeRollout(workNamespace.Name) + // the job we are trying to propagate takes 10s to complete. MaxUnavailable is set to 1. So setting UnavailablePeriodSeconds to 15s + // so that after each rollout phase we only wait for 15s before proceeding to the next since Job is not trackable, + // we want rollout to finish in a reasonable time. + rp.Spec.Strategy.RollingUpdate.UnavailablePeriodSeconds = ptr.To(unAvailablePeriodSeconds) + rp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ + { + Group: batchv1.SchemeGroupVersion.Group, + Kind: utils.JobKind, + Version: batchv1.SchemeGroupVersion.Version, + Name: testJob.Name, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantSelectedResources, allMemberClusterNames, nil, "0", false) + Eventually(rpStatusUpdatedActual, 2*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForJobToBePlaced(memberCluster, &testJob) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("suspend job", func() { + Eventually(func() error { + var job batchv1.Job + err := hubClient.Get(ctx, types.NamespacedName{Name: testJob.Name, Namespace: testJob.Namespace}, &job) + if err != nil { + return err + } + job.Spec.Suspend = ptr.To(true) + return hubClient.Update(ctx, &job) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to suspend job") + }) + + // job is not trackable, so we need to wait for a bit longer for each roll out + It("should update RP status as expected", func() { + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantSelectedResources, allMemberClusterNames, nil, "1", false) + Eventually(rpStatusUpdatedActual, 5*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) +}) + +// These 2 testcases need to run in ordered because they are going to place the same CRD, +// and if they in parallel, a resource conflict may occur. +var _ = Describe("placing namespaced custom resources using a RP with rollout", Label("resourceplacement"), Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Name: rpName, Namespace: appNamespace().Name} + testCustomResourceKind := "" + + BeforeEach(OncePerOrdered, func() { + testCustomResource = testv1alpha1.TestResource{} + readTestCustomResource(&testCustomResource) + // I need to initialize the kind here because the returned obj after creating has Kind field emptied. + testCustomResourceKind = testCustomResource.Kind + + // Create the test resources, the CRD is already installed in BeforeSuite. + workNamespace := appNamespace() + Expect(hubClient.Create(ctx, &workNamespace)).To(Succeed(), "Failed to create namespace %s", workNamespace.Name) + testCustomResource.Namespace = workNamespace.Name + // Create the custom resource at the very beginning because our resource detect runs every 30s to detect new resources, + // thus giving it some grace period. + Expect(hubClient.Create(ctx, &testCustomResource)).To(Succeed(), "Failed to create test custom resource %s", testCustomResource.GetName()) + + // Create a namespace-only CRP that selects both namespace and CRD for custom resource placement + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: utils.NamespaceKind, + Version: corev1.SchemeGroupVersion.Version, + Name: appNamespace().Name, + SelectionScope: placementv1beta1.NamespaceOnly, + }, + { + Group: utils.CRDMetaGVK.Group, + Kind: utils.CRDMetaGVK.Kind, + Version: utils.CRDMetaGVK.Version, + Name: testResourceCRDName, + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + crpStatusUpdatedActual := crpStatusUpdatedActual([]placementv1beta1.ResourceIdentifier{ + { + Kind: utils.NamespaceKind, + Name: appNamespace().Name, + Version: corev1.SchemeGroupVersion.Version, + }, + { + Group: utils.CRDMetaGVK.Group, + Kind: utils.CRDMetaGVK.Kind, + Name: testResourceCRDName, + Version: utils.CRDMetaGVK.Version, + }, + }, allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterEach(OncePerOrdered, func() { + // Remove the custom deletion blocker finalizer from the RP and CRP. + ensureRPAndRelatedResourcesDeleted(rpKey, allMemberClusters, &testCustomResource) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("Test an RP place custom resource successfully, should wait to update resource", Ordered, func() { + var wantSelectedResources []placementv1beta1.ResourceIdentifier + var rp *placementv1beta1.ResourcePlacement + var observedResourceIdx string + unAvailablePeriodSeconds := 30 + workNamespace := appNamespace() + + BeforeAll(func() { + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: testv1alpha1.GroupVersion.Group, + Kind: testCustomResourceKind, + Name: testCustomResource.Name, + Version: testv1alpha1.GroupVersion.Version, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the RP that select the custom resource", func() { + rp = &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace.Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: testv1alpha1.GroupVersion.Group, + Kind: testCustomResourceKind, + Version: testv1alpha1.GroupVersion.Version, + Name: testCustomResource.Name, + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 1, + }, + UnavailablePeriodSeconds: ptr.To(unAvailablePeriodSeconds), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + // Wait until all the expected resources have been selected. + // + // This is to address a flakiness situation where it might take a while for Fleet + // to recognize the custom resource (even if it is created before the RP). + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return fmt.Errorf("failed to get RP: %w", err) + } + + if diff := cmp.Diff(rp.Status.SelectedResources, wantSelectedResources, cmpopts.SortSlices(utils.LessFuncResourceIdentifier)); diff != "" { + return fmt.Errorf("selected resources mismatched (-got, +want): %s", diff) + } + // Use the fresh observed resource index to verify the RP status later. + observedResourceIdx = rp.Status.ObservedResourceIndex + return nil + }, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to select all the expected resources") + + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantSelectedResources, []string{memberCluster1EastProdName}, nil, observedResourceIdx, false) + Eventually(rpStatusUpdatedActual, 2*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on member cluster", func() { + workResourcesPlacedActual := waitForTestResourceToBePlaced(memberCluster1EastProd, &testCustomResource) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster1EastProd.ClusterName) + }) + + It("update the custom resource", func() { + Eventually(func() error { + var cr testv1alpha1.TestResource + err := hubClient.Get(ctx, types.NamespacedName{Name: testCustomResource.Name, Namespace: workNamespace.Name}, &cr) + if err != nil { + return err + } + cr.Spec.Foo = valBar1 // Previously was "foo1" + return hubClient.Update(ctx, &cr) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update custom resource") + }) + + It("should not update the resource on member cluster before the unavailable second", func() { + // subtracting 5 seconds because transition between IT takes ~1 second + unavailablePeriod := time.Duration(*rp.Spec.Strategy.RollingUpdate.UnavailablePeriodSeconds)*time.Second - (5 * time.Second) + Consistently(func() bool { + var cr testv1alpha1.TestResource + err := memberCluster1EastProd.KubeClient.Get(ctx, types.NamespacedName{Name: testCustomResource.Name, Namespace: workNamespace.Name}, &cr) + if err != nil { + klog.Errorf("Failed to get custom resource %s/%s: %v", workNamespace.Name, testCustomResource.Name, err) + return false + } + if cr.Spec.Foo == valFoo1 { // Previously was "foo1" + return true + } + return false + }, unavailablePeriod, consistentlyInterval).Should(BeTrue(), "Test resource was updated when it shouldn't be") + }) + + It("should update RP status as expected", func() { + // Refresh the observed resource index. + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return fmt.Errorf("failed to get RP: %w", err) + } + + if rp.Status.ObservedResourceIndex == observedResourceIdx { + // It is expected that the observed resource index has been bumped by 1 + // due to the resource change. + return fmt.Errorf("observed resource index is not updated") + } + // Use the fresh observed resource index to verify the RP status later. + observedResourceIdx = rp.Status.ObservedResourceIndex + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to select all the expected resources") + + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantSelectedResources, []string{memberCluster1EastProdName}, nil, observedResourceIdx, false) + Eventually(rpStatusUpdatedActual, 4*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("delete the RP and related resources", func() { + }) + }) + + Context("Test an RP place custom resource successfully, should wait to update resource on multiple member clusters", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + var rp *placementv1beta1.ResourcePlacement + unAvailablePeriodSeconds := 30 + var observedResourceIdx string + + BeforeAll(func() { + // Create the test resources. + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: testv1alpha1.GroupVersion.Group, + Kind: testCustomResourceKind, + Name: testCustomResource.Name, + Version: testv1alpha1.GroupVersion.Version, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the RP that select the custom resource", func() { + rp = &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace.Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: testv1alpha1.GroupVersion.Group, + Kind: testCustomResourceKind, + Version: testv1alpha1.GroupVersion.Version, + Name: testCustomResource.Name, + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 1, + }, + UnavailablePeriodSeconds: ptr.To(unAvailablePeriodSeconds), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return fmt.Errorf("failed to get RP: %w", err) + } + + if diff := cmp.Diff(rp.Status.SelectedResources, wantSelectedResources, cmpopts.SortSlices(utils.LessFuncResourceIdentifier)); diff != "" { + return fmt.Errorf("selected resources mismatched (-got, +want): %s", diff) + } + // Use the fresh observed resource index to verify the RP status later. + observedResourceIdx = rp.Status.ObservedResourceIndex + return nil + }, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to select all the expected resources") + + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantSelectedResources, allMemberClusterNames, nil, observedResourceIdx, false) + Eventually(rpStatusUpdatedActual, 2*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForTestResourceToBePlaced(memberCluster, &testCustomResource) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("update the custom resource", func() { + Eventually(func() error { + var cr testv1alpha1.TestResource + err := hubClient.Get(ctx, types.NamespacedName{Name: testCustomResource.Name, Namespace: workNamespace.Name}, &cr) + if err != nil { + return err + } + cr.Spec.Foo = valBar1 // Previously was "foo1" + return hubClient.Update(ctx, &cr) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update custom resource") + }) + + It("should update one member cluster", func() { + // adding a buffer of 5 seconds + unavailablePeriod := time.Duration(*rp.Spec.Strategy.RollingUpdate.UnavailablePeriodSeconds)*time.Second + (5 * time.Second) + Eventually(func() bool { + // Check the number of clusters meeting the condition + countClustersMeetingCondition := func() int { + count := 0 + for _, cluster := range allMemberClusters { + if !checkCluster(cluster, testCustomResource.Name, workNamespace.Name) { + // resource field updated to "bar1" + count++ + } + } + return count + } + return countClustersMeetingCondition() == 1 + }, unavailablePeriod, eventuallyInterval).Should(BeTrue(), "Test resource was updated when it shouldn't be") + }) + + It("should not rollout update to the next member cluster before unavailable second", func() { + // subtracting a buffer of 5 seconds + unavailablePeriod := time.Duration(*rp.Spec.Strategy.RollingUpdate.UnavailablePeriodSeconds)*time.Second - (5 * time.Second) + Consistently(func() bool { + // Check the number of clusters meeting the condition + countClustersMeetingCondition := func() int { + count := 0 + for _, cluster := range allMemberClusters { + if !checkCluster(cluster, testCustomResource.Name, workNamespace.Name) { + // resource field updated to "bar1" + count++ + } + } + return count + } + return countClustersMeetingCondition() == 1 + }, unavailablePeriod, consistentlyInterval).Should(BeTrue(), "Test resource was updated when it shouldn't be") + }) + + It("should update RP status as expected", func() { + // Refresh the observed resource index. + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return fmt.Errorf("failed to get RP: %w", err) + } + + if rp.Status.ObservedResourceIndex == observedResourceIdx { + // It is expected that the observed resource index has been bumped by 1 + // due to the resource change. + return fmt.Errorf("observed resource index is not updated") + } + // Use the fresh observed resource index to verify the RP status later. + observedResourceIdx = rp.Status.ObservedResourceIndex + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to select all the expected resources") + + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantSelectedResources, allMemberClusterNames, nil, observedResourceIdx, false) + Eventually(rpStatusUpdatedActual, 4*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) +}) + +func buildRPForSafeRollout(namespace string) *placementv1beta1.ResourcePlacement { + return &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()), + Namespace: namespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 1, + }, + }, + }, + }, + } +} diff --git a/test/e2e/resource_placement_with_custom_config_test.go b/test/e2e/resource_placement_with_custom_config_test.go new file mode 100644 index 000000000..ef74403e9 --- /dev/null +++ b/test/e2e/resource_placement_with_custom_config_test.go @@ -0,0 +1,209 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package e2e + +import ( + "fmt" + "math" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" +) + +var _ = Describe("validating RP when using customized resourceSnapshotCreationMinimumInterval and resourceChangesCollectionDuration", Label("custom"), Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + + // skip entire suite if interval is zero + BeforeEach(OncePerOrdered, func() { + if resourceSnapshotCreationMinimumInterval == 0 && resourceChangesCollectionDuration == 0 { + Skip("Skipping customized-config placement test when RESOURCE_SNAPSHOT_CREATION_MINIMUM_INTERVAL=0m and RESOURCE_CHANGES_COLLECTION_DURATION=0m") + } + + // Create the resources. + createWorkResources() + + // Create the CRP with Namespace-only selector. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + By("should update CRP status as expected") + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + + By("creating RP") + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "ConfigMap", + Version: "v1", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + workNamespaceLabelName: fmt.Sprintf("test-%d", GinkgoParallelProcess()), + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + + }) + + AfterEach(OncePerOrdered, func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, allMemberClusters) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("validating RP status and should not update immediately", func() { + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual([]placementv1beta1.ResourceIdentifier{}, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should not place work resources on member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + + It("updating the resources on the hub", func() { + configMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + configMap := &corev1.ConfigMap{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: appNamespace().Name}, configMap)).Should(Succeed(), "Failed to get the configMap %s", configMapName) + configMap.Labels = map[string]string{ + workNamespaceLabelName: fmt.Sprintf("test-%d", GinkgoParallelProcess()), + } + Expect(hubClient.Update(ctx, configMap)).Should(Succeed(), "Failed to update configMap %s", configMapName) + + }) + + It("should not update RP status immediately", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual([]placementv1beta1.ResourceIdentifier{}, allMemberClusterNames, nil, "0") + Consistently(rpStatusUpdatedActual, resourceSnapshotDelayDuration-3*time.Second, consistentlyInterval).Should(Succeed(), "RP %s status should be unchanged", rpName) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("validating the resourceSnapshots are created", func() { + var resourceSnapshotList placementv1beta1.ResourceSnapshotList + masterResourceSnapshotLabels := client.MatchingLabels{ + placementv1beta1.PlacementTrackingLabel: rpName, + } + Expect(hubClient.List(ctx, &resourceSnapshotList, masterResourceSnapshotLabels, client.InNamespace(appNamespace().Name))).Should(Succeed(), "Failed to list ResourceSnapshots for RP %s", rpName) + Expect(len(resourceSnapshotList.Items)).Should(Equal(2), "Expected 2 ResourceSnapshots for RP %s, got %d", rpName, len(resourceSnapshotList.Items)) + // Use math.Abs to get the absolute value of the time difference in seconds. + snapshotDiffInSeconds := resourceSnapshotList.Items[0].CreationTimestamp.Time.Sub(resourceSnapshotList.Items[1].CreationTimestamp.Time).Seconds() + diff := math.Abs(snapshotDiffInSeconds) + Expect(time.Duration(diff)*time.Second >= resourceSnapshotDelayDuration).To(BeTrue(), "The time difference between ResourceSnapshots should be more than resourceSnapshotDelayDuration") + }) + }) + + Context("validating that RP status can be updated after updating the resources", func() { + It("validating the resourceSnapshots are created", func() { + Eventually(func() error { + var resourceSnapshotList placementv1beta1.ResourceSnapshotList + masterResourceSnapshotLabels := client.MatchingLabels{ + placementv1beta1.PlacementTrackingLabel: rpName, + } + if err := hubClient.List(ctx, &resourceSnapshotList, masterResourceSnapshotLabels, client.InNamespace(appNamespace().Name)); err != nil { + return fmt.Errorf("failed to list ResourceSnapshots for RP %s: %w", rpName, err) + } + if len(resourceSnapshotList.Items) != 1 { + return fmt.Errorf("got %d ResourceSnapshot for RP %s, want 1", len(resourceSnapshotList.Items), rpName) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to wait for ResourceSnapshots to be created") + }) + + It("updating the resources on the hub", func() { + configMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + configMap := &corev1.ConfigMap{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: appNamespace().Name}, configMap)).Should(Succeed(), "Failed to get the configMap %s", configMapName) + configMap.Labels = map[string]string{ + workNamespaceLabelName: fmt.Sprintf("test-%d", GinkgoParallelProcess()), + } + Expect(hubClient.Update(ctx, configMap)).Should(Succeed(), "Failed to update configMap %s", configMapName) + + }) + + It("should update RP status for snapshot 0 as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual([]placementv1beta1.ResourceIdentifier{}, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should update RP status for snapshot 1 as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(rpStatusUpdatedActual, resourceSnapshotDelayDuration+eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("validating the resourceSnapshots are created", func() { + var resourceSnapshotList placementv1beta1.ResourceSnapshotList + masterResourceSnapshotLabels := client.MatchingLabels{ + placementv1beta1.PlacementTrackingLabel: rpName, + } + Expect(hubClient.List(ctx, &resourceSnapshotList, masterResourceSnapshotLabels, client.InNamespace(appNamespace().Name))).Should(Succeed(), "Failed to list ResourceSnapshots for RP %s", rpName) + Expect(len(resourceSnapshotList.Items)).Should(Equal(2), "Expected 2 ResourceSnapshots for RP %s, got %d", rpName, len(resourceSnapshotList.Items)) + // Use math.Abs to get the absolute value of the time difference in seconds. + snapshotDiffInSeconds := resourceSnapshotList.Items[0].CreationTimestamp.Time.Sub(resourceSnapshotList.Items[1].CreationTimestamp.Time).Seconds() + diff := math.Abs(snapshotDiffInSeconds) + Expect(time.Duration(diff)*time.Second >= resourceSnapshotDelayDuration).To(BeTrue(), "The time difference between ResourceSnapshots should be more than resourceSnapshotDelayDuration") + }) + }) +}) diff --git a/test/e2e/resources_test.go b/test/e2e/resources_test.go index e41ef55d9..0dafedfef 100644 --- a/test/e2e/resources_test.go +++ b/test/e2e/resources_test.go @@ -39,6 +39,7 @@ const ( appDeploymentNameTemplate = "app-deploy-%d" appSecretNameTemplate = "app-secret-%d" // #nosec G101 crpNameTemplate = "crp-%d" + rpNameTemplate = "rp-%d" crpNameWithSubIndexTemplate = "crp-%d-%d" croNameTemplate = "cro-%d" roNameTemplate = "ro-%d" @@ -54,8 +55,20 @@ const ( workNamespaceLabelName = "process" ) -func workResourceSelector() []placementv1beta1.ClusterResourceSelector { - return []placementv1beta1.ClusterResourceSelector{ +func namespaceOnlySelector() []placementv1beta1.ResourceSelectorTerm { + return []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "Namespace", + Version: "v1", + Name: fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()), + SelectionScope: placementv1beta1.NamespaceOnly, + }, + } +} + +func workResourceSelector() []placementv1beta1.ResourceSelectorTerm { + return []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -65,7 +78,18 @@ func workResourceSelector() []placementv1beta1.ClusterResourceSelector { } } -func configMapSelector() []placementv1beta1.ResourceSelector { +func configMapSelector() []placementv1beta1.ResourceSelectorTerm { + return []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "ConfigMap", + Version: "v1", + Name: fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()), + }, + } +} + +func configMapOverrideSelector() []placementv1beta1.ResourceSelector { return []placementv1beta1.ResourceSelector{ { Group: "", @@ -76,8 +100,8 @@ func configMapSelector() []placementv1beta1.ResourceSelector { } } -func invalidWorkResourceSelector() []placementv1beta1.ClusterResourceSelector { - return []placementv1beta1.ClusterResourceSelector{ +func invalidWorkResourceSelector() []placementv1beta1.ResourceSelectorTerm { + return []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", diff --git a/test/e2e/rollout_test.go b/test/e2e/rollout_test.go index ce42b4f74..fee968dd7 100644 --- a/test/e2e/rollout_test.go +++ b/test/e2e/rollout_test.go @@ -286,7 +286,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", true) Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -370,7 +370,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", true) Eventually(crpStatusUpdatedActual, 2*workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -693,7 +693,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", false) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", false) Eventually(crpStatusUpdatedActual, 2*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -718,7 +718,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) // job is not trackable, so we need to wait for a bit longer for each roll out It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "1", false) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "1", false) Eventually(crpStatusUpdatedActual, 5*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -771,7 +771,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { It("create the CRP that select the namespace and CRD", func() { crp = buildCRPForSafeRollout() - crdClusterResourceSelector := placementv1beta1.ClusterResourceSelector{ + crdClusterResourceSelector := placementv1beta1.ResourceSelectorTerm{ Group: utils.CRDMetaGVK.Group, Kind: utils.CRDMetaGVK.Kind, Version: utils.CRDMetaGVK.Version, @@ -807,7 +807,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { return nil }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to select all the expected resources") - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, []string{memberCluster1EastProdName}, nil, observedResourceIdx, false) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, []string{memberCluster1EastProdName}, nil, observedResourceIdx, false) Eventually(crpStatusUpdatedActual, 2*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -863,7 +863,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { return nil }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to select all the expected resources") - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, []string{memberCluster1EastProdName}, nil, observedResourceIdx, false) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, []string{memberCluster1EastProdName}, nil, observedResourceIdx, false) Eventually(crpStatusUpdatedActual, 4*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -914,7 +914,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { It("create the CRP that select the namespace and CRD", func() { crp = buildCRPForSafeRollout() - crdClusterResourceSelector := placementv1beta1.ClusterResourceSelector{ + crdClusterResourceSelector := placementv1beta1.ResourceSelectorTerm{ Group: utils.CRDMetaGVK.Group, Kind: utils.CRDMetaGVK.Kind, Version: utils.CRDMetaGVK.Version, @@ -929,7 +929,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", false) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", false) Eventually(crpStatusUpdatedActual, 2*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -992,7 +992,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "1", false) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "1", false) Eventually(crpStatusUpdatedActual, 4*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) diff --git a/test/e2e/setup_test.go b/test/e2e/setup_test.go index f0127b17d..2aa14c39e 100644 --- a/test/e2e/setup_test.go +++ b/test/e2e/setup_test.go @@ -218,11 +218,11 @@ var ( // disappear from the status of the MemberCluster object. c.Type == string(clusterv1beta1.ConditionTypeClusterPropertyProviderStarted) }) - ignoreTimeTypeFields = cmpopts.IgnoreTypes(time.Time{}, metav1.Time{}) - ignoreCRPStatusDriftedPlacementsTimestampFields = cmpopts.IgnoreFields(placementv1beta1.DriftedResourcePlacement{}, "ObservationTime", "FirstDriftedObservedTime") - ignoreCRPStatusDiffedPlacementsTimestampFields = cmpopts.IgnoreFields(placementv1beta1.DiffedResourcePlacement{}, "ObservationTime", "FirstDiffedObservedTime") + ignoreTimeTypeFields = cmpopts.IgnoreTypes(time.Time{}, metav1.Time{}) + ignorePlacementStatusDriftedPlacementsTimestampFields = cmpopts.IgnoreFields(placementv1beta1.DriftedResourcePlacement{}, "ObservationTime", "FirstDriftedObservedTime") + ignorePlacementStatusDiffedPlacementsTimestampFields = cmpopts.IgnoreFields(placementv1beta1.DiffedResourcePlacement{}, "ObservationTime", "FirstDiffedObservedTime") - crpStatusCmpOptions = cmp.Options{ + placementStatusCmpOptions = cmp.Options{ cmpopts.SortSlices(lessFuncCondition), cmpopts.SortSlices(lessFuncPlacementStatus), cmpopts.SortSlices(utils.LessFuncResourceIdentifier), @@ -230,14 +230,14 @@ var ( cmpopts.SortSlices(utils.LessFuncDiffedResourcePlacements), cmpopts.SortSlices(utils.LessFuncDriftedResourcePlacements), utils.IgnoreConditionLTTAndMessageFields, - ignoreCRPStatusDriftedPlacementsTimestampFields, - ignoreCRPStatusDiffedPlacementsTimestampFields, + ignorePlacementStatusDriftedPlacementsTimestampFields, + ignorePlacementStatusDiffedPlacementsTimestampFields, cmpopts.EquateEmpty(), } // We don't sort ResourcePlacementStatus by their name since we don't know which cluster will become unavailable first, // prompting the rollout to be blocked for remaining clusters. - safeRolloutCRPStatusCmpOptions = cmp.Options{ + safeRolloutPlacementStatusCmpOptions = cmp.Options{ cmpopts.SortSlices(lessFuncCondition), cmpopts.SortSlices(lessFuncPlacementStatusByConditions), cmpopts.SortSlices(utils.LessFuncResourceIdentifier), diff --git a/test/e2e/updaterun_test.go b/test/e2e/updaterun_test.go index 901d70c44..86eceeec6 100644 --- a/test/e2e/updaterun_test.go +++ b/test/e2e/updaterun_test.go @@ -698,7 +698,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { Namespace: roNamespace, }, Spec: placementv1beta1.ResourceOverrideSpec{ - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index a03f77973..baeadc9fc 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -802,6 +802,16 @@ func checkIfPlacedNamespaceResourceOnAllMemberClusters() { } } +// checkIfRemovedConfigMapFromMemberCluster verifies that the ConfigMap has been removed from the specified member cluster. +func checkIfRemovedConfigMapFromMemberClusters(clusters []*framework.Cluster) { + for idx := range clusters { + memberCluster := clusters[idx] + + configMapRemovedActual := namespacedResourcesRemovedFromClusterActual(memberCluster) + Eventually(configMapRemovedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove config map from member cluster %s", memberCluster.ClusterName) + } +} + func checkIfRemovedWorkResourcesFromAllMemberClusters() { checkIfRemovedWorkResourcesFromMemberClusters(allMemberClusters) } @@ -815,6 +825,10 @@ func checkIfRemovedWorkResourcesFromMemberClusters(clusters []*framework.Cluster } } +func checkIfRemovedConfigMapFromAllMemberClusters() { + checkIfRemovedConfigMapFromMemberClusters(allMemberClusters) +} + func checkIfRemovedWorkResourcesFromAllMemberClustersConsistently() { checkIfRemovedWorkResourcesFromMemberClustersConsistently(allMemberClusters) } @@ -849,12 +863,11 @@ func checkNamespaceExistsWithOwnerRefOnMemberCluster(nsName, crpName string) { }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Namespace which is not owned by the CRP should not be deleted") } -// cleanupCRP deletes the CRP and waits until the resources are not found. -func cleanupCRP(name string) { +// cleanupPlacement deletes the placement and waits until the resources are not found. +func cleanupPlacement(placementKey types.NamespacedName) { // TODO(Arvindthiru): There is a conflict which requires the Eventually block, not sure of series of operations that leads to it yet. Eventually(func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - err := hubClient.Get(ctx, types.NamespacedName{Name: name}, crp) + placement, err := retrievePlacement(placementKey) if k8serrors.IsNotFound(err) { return nil } @@ -862,19 +875,19 @@ func cleanupCRP(name string) { return err } - // Delete the CRP (again, if applicable). + // Delete the placement (again, if applicable). // This helps the After All node to run successfully even if the steps above fail early. - if err = hubClient.Delete(ctx, crp); err != nil { + if err = hubClient.Delete(ctx, placement); err != nil { return err } - crp.Finalizers = []string{} - return hubClient.Update(ctx, crp) - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to delete CRP %s", name) + placement.SetFinalizers([]string{}) + return hubClient.Update(ctx, placement) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to delete placement %s", placementKey) - // Wait until the CRP is removed. - removedActual := crpRemovedActual(name) - Eventually(removedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove CRP %s", name) + // Wait until the placement is removed. + removedActual := placementRemovedActual(placementKey) + Eventually(removedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove placement %s", placementKey) // Check if work is deleted. Needed to ensure that the Work resource is cleaned up before the next CRP is created. // This is because the Work resource is created with a finalizer that blocks deletion until the all applied work @@ -882,9 +895,13 @@ func cleanupCRP(name string) { // and flakiness in subsequent tests. By("Check if work is deleted") var workNS string + workName := fmt.Sprintf("%s-work", placementKey.Name) + if placementKey.Namespace != "" { + workName = fmt.Sprintf("%s.%s", placementKey.Namespace, workName) + } work := &placementv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-work", name), + Name: workName, }, } Eventually(func() bool { @@ -962,7 +979,7 @@ func createClusterResourceOverrides(number int) { Name: fmt.Sprintf(croNameTemplate, i), }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", @@ -1024,11 +1041,11 @@ func ensureCRPAndRelatedResourcesDeleted(crpName string, memberClusters []*frame } // Verify that related finalizers have been removed from the CRP. - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP") // Remove the custom deletion blocker finalizer from the CRP. - cleanupCRP(crpName) + cleanupPlacement(types.NamespacedName{Name: crpName}) // Delete the created resources. cleanupWorkResources() @@ -1105,7 +1122,7 @@ func verifyWorkPropagationAndMarkAsAvailable(memberClusterName, crpName string, Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, LastTransitionTime: metav1.Now(), - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(workapplier.AvailabilityResultTypeAvailable), Message: "Set to be available", ObservedGeneration: w.Generation, }) @@ -1403,6 +1420,29 @@ func createCRP(crpName string) { createCRPWithApplyStrategy(crpName, nil) } +// createNamespaceOnlyCRP creates a ClusterResourcePlacement with namespace-only selector. +func createNamespaceOnlyCRP(crpName string) { + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + By(fmt.Sprintf("creating namespace-only placement %s", crpName)) + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create namespace-only CRP %s", crpName) +} + // ensureUpdateRunDeletion deletes the update run with the given name and checks all related approval requests are also deleted. func ensureUpdateRunDeletion(updateRunName string) { updateRun := &placementv1beta1.ClusterStagedUpdateRun{ @@ -1427,3 +1467,45 @@ func ensureUpdateRunStrategyDeletion(strategyName string) { removedActual := updateRunStrategyRemovedActual(strategyName) Eventually(removedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "ClusterStagedUpdateStrategy still exists") } + +// ensureRPAndRelatedResourcesDeleted deletes rp and verifies resources in the specified namespace placed by the rp are removed from the cluster. +// It checks if the placed configMap is removed by default, as this is tested in most of the test cases. +// For tests with additional resources placed, e.g. deployments, daemonSets, add those to placedResources. +func ensureRPAndRelatedResourcesDeleted(rpKey types.NamespacedName, memberClusters []*framework.Cluster, placedResources ...client.Object) { + // Delete the ResourcePlacement. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpKey.Name, + Namespace: rpKey.Namespace, + }, + } + Expect(hubClient.Delete(ctx, rp)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{}), "Failed to delete ResourcePlacement") + + // Verify that all resources placed have been removed from specified member clusters. + for idx := range memberClusters { + memberCluster := memberClusters[idx] + + workResourcesRemovedActual := namespacedResourcesRemovedFromClusterActual(memberCluster, placedResources...) + Eventually(workResourcesRemovedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove work resources from member cluster %s", memberCluster.ClusterName) + } + + // Verify that related finalizers have been removed from the ResourcePlacement. + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(rpKey) + Eventually(finalizerRemovedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from ResourcePlacement") + + // Remove the custom deletion blocker finalizer from the ResourcePlacement. + cleanupPlacement(rpKey) +} + +func retrievePlacement(placementKey types.NamespacedName) (placementv1beta1.PlacementObj, error) { + var placement placementv1beta1.PlacementObj + if placementKey.Namespace == "" { + placement = &placementv1beta1.ClusterResourcePlacement{} + } else { + placement = &placementv1beta1.ResourcePlacement{} + } + if err := hubClient.Get(ctx, placementKey, placement); err != nil { + return nil, err + } + return placement, nil +} diff --git a/test/e2e/webhook_test.go b/test/e2e/webhook_test.go index 203d29881..4de88c300 100644 --- a/test/e2e/webhook_test.go +++ b/test/e2e/webhook_test.go @@ -140,7 +140,7 @@ var _ = Describe("webhook tests for CRP CREATE operations", func() { Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "InvalidNamespace", @@ -170,7 +170,7 @@ var _ = Describe("webhook tests for CRP CREATE operations", func() { Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Kind: "Deployment", @@ -813,7 +813,7 @@ var _ = Describe("webhook tests for MC taints", Ordered, func() { var _ = Describe("webhook tests for ClusterResourceOverride CREATE operations", func() { croName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) - selector := placementv1beta1.ClusterResourceSelector{ + selector := placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Version: "v1", @@ -853,7 +853,7 @@ var _ = Describe("webhook tests for ClusterResourceOverride CREATE operations", It("should deny create CRO with invalid resource selection ", func() { Consistently(func(g Gomega) error { - invalidSelector := placementv1beta1.ClusterResourceSelector{ + invalidSelector := placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Version: "v1", @@ -862,7 +862,7 @@ var _ = Describe("webhook tests for ClusterResourceOverride CREATE operations", }, SelectionScope: placementv1beta1.NamespaceWithResources, } - invalidSelector1 := placementv1beta1.ClusterResourceSelector{ + invalidSelector1 := placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Version: "v1", @@ -874,7 +874,7 @@ var _ = Describe("webhook tests for ClusterResourceOverride CREATE operations", Name: croName, }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ invalidSelector, selector, selector, invalidSelector1, }, Policy: policy, @@ -912,7 +912,7 @@ var _ = Describe("webhook tests for ClusterResourceOverride CREATE operation lim Name: "test-cro-101", }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", @@ -964,7 +964,7 @@ var _ = Describe("webhook tests for ClusterResourceOverride CREATE operation lim var _ = Describe("webhook tests for ClusterResourceOverride CREATE operations resource selection limitations", Ordered, Serial, func() { croName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) - selector := placementv1beta1.ClusterResourceSelector{ + selector := placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Version: "v1", @@ -978,7 +978,7 @@ var _ = Describe("webhook tests for ClusterResourceOverride CREATE operations re Name: croName, }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ selector, }, Policy: &placementv1beta1.OverridePolicy{ @@ -1028,7 +1028,7 @@ var _ = Describe("webhook tests for ClusterResourceOverride CREATE operations re Name: fmt.Sprintf("test-cro-%d", GinkgoParallelProcess()), }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ selector, }, Policy: &placementv1beta1.OverridePolicy{ @@ -1097,7 +1097,7 @@ var _ = Describe("webhook tests for CRO UPDATE operations", Ordered, func() { Name: croName, }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", @@ -1138,7 +1138,7 @@ var _ = Describe("webhook tests for CRO UPDATE operations", Ordered, func() { Eventually(func(g Gomega) error { var cro placementv1beta1.ClusterResourceOverride g.Expect(hubClient.Get(ctx, types.NamespacedName{Name: croName}, &cro)).Should(Succeed()) - invalidSelector := placementv1beta1.ClusterResourceSelector{ + invalidSelector := placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Version: "v1", @@ -1147,7 +1147,7 @@ var _ = Describe("webhook tests for CRO UPDATE operations", Ordered, func() { }, SelectionScope: placementv1beta1.NamespaceWithResources, } - invalidSelector1 := placementv1beta1.ClusterResourceSelector{ + invalidSelector1 := placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Version: "v1", @@ -1178,7 +1178,7 @@ var _ = Describe("webhook tests for CRO UPDATE operations", Ordered, func() { Name: cro1Name, }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", @@ -1206,7 +1206,7 @@ var _ = Describe("webhook tests for CRO UPDATE operations", Ordered, func() { Expect(hubClient.Create(ctx, cro1)).To(Succeed(), "Failed to create CRO %s", cro1.Name) var cro placementv1beta1.ClusterResourceOverride g.Expect(hubClient.Get(ctx, types.NamespacedName{Name: croName}, &cro)).Should(Succeed()) - selector := placementv1beta1.ClusterResourceSelector{ + selector := placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Version: "v1", diff --git a/test/scheduler/actuals_test.go b/test/scheduler/actuals_test.go index 86b6299b0..14e48685f 100644 --- a/test/scheduler/actuals_test.go +++ b/test/scheduler/actuals_test.go @@ -25,9 +25,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" @@ -35,18 +33,15 @@ import ( // This file features common actuals (and utilities for generating actuals) in the test suites. -func noBindingsCreatedForCRPActual(crpName string) func() error { +func noBindingsCreatedForPlacementActual(placementKey types.NamespacedName) func() error { return func() error { - // List all bindings associated with the given CRP. - bindingList := &placementv1beta1.ClusterResourceBindingList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) - listOptions := &client.ListOptions{LabelSelector: labelSelector} - if err := hubClient.List(ctx, bindingList, listOptions); err != nil { - return err + bindingList, err := listBindings(placementKey) + if err != nil { + return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) } // Check that the returned list is empty. - if bindingCount := len(bindingList.Items); bindingCount != 0 { + if bindingCount := len(bindingList.GetBindingObjs()); bindingCount != 0 { return fmt.Errorf("%d bindings have been created unexpectedly", bindingCount) } @@ -54,16 +49,23 @@ func noBindingsCreatedForCRPActual(crpName string) func() error { } } -func crpSchedulerFinalizerAddedActual(crpName string) func() error { +func placementSchedulerFinalizerAddedActual(placementKey types.NamespacedName) func() error { return func() error { - // Retrieve the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + // Retrieve the placement. + var placement placementv1beta1.PlacementObj + if placementKey.Namespace == "" { + // Retrieve CRP. + placement = &placementv1beta1.ClusterResourcePlacement{} + } else { + // Retrieve RP. + placement = &placementv1beta1.ResourcePlacement{} + } + if err := hubClient.Get(ctx, types.NamespacedName{Name: placementKey.Name, Namespace: placementKey.Namespace}, placement); err != nil { return err } // Check that the scheduler finalizer has been added. - if !controllerutil.ContainsFinalizer(crp, placementv1beta1.SchedulerCleanupFinalizer) { + if !controllerutil.ContainsFinalizer(placement, placementv1beta1.SchedulerCleanupFinalizer) { return fmt.Errorf("scheduler cleanup finalizer has not been added") } @@ -71,16 +73,23 @@ func crpSchedulerFinalizerAddedActual(crpName string) func() error { } } -func crpSchedulerFinalizerRemovedActual(crpName string) func() error { +func placementSchedulerFinalizerRemovedActual(placementKey types.NamespacedName) func() error { return func() error { - // Retrieve the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + // Retrieve the placement. + var placement placementv1beta1.PlacementObj + if placementKey.Namespace == "" { + // Retrieve CRP. + placement = &placementv1beta1.ClusterResourcePlacement{} + } else { + // Retrieve RP. + placement = &placementv1beta1.ResourcePlacement{} + } + if err := hubClient.Get(ctx, types.NamespacedName{Name: placementKey.Name, Namespace: placementKey.Namespace}, placement); err != nil { return err } - // Check that the scheduler finalizer has been added. - if controllerutil.ContainsFinalizer(crp, placementv1beta1.SchedulerCleanupFinalizer) { + // Check that the scheduler finalizer has been removed. + if controllerutil.ContainsFinalizer(placement, placementv1beta1.SchedulerCleanupFinalizer) { return fmt.Errorf("scheduler cleanup finalizer is still present") } @@ -88,50 +97,72 @@ func crpSchedulerFinalizerRemovedActual(crpName string) func() error { } } -func scheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, crpName, policySnapshotName string) func() error { +func scheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, placementKey types.NamespacedName, policySnapshotName string) func() error { return func() error { - // List all bindings. - bindingList := &placementv1beta1.ClusterResourceBindingList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) - listOptions := &client.ListOptions{LabelSelector: labelSelector} - if err := hubClient.List(ctx, bindingList, listOptions); err != nil { - return err + bindingList, err := listBindings(placementKey) + if err != nil { + return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) } - // Find all the scheduled bindings. - scheduled := []placementv1beta1.ClusterResourceBinding{} + scheduled := []placementv1beta1.BindingObj{} clusterMap := make(map[string]bool) for _, name := range clusters { clusterMap[name] = true } - for _, binding := range bindingList.Items { - if _, ok := clusterMap[binding.Spec.TargetCluster]; ok && binding.Spec.State == placementv1beta1.BindingStateScheduled { + for _, binding := range bindingList.GetBindingObjs() { + if _, ok := clusterMap[binding.GetBindingSpec().TargetCluster]; ok && binding.GetBindingSpec().State == placementv1beta1.BindingStateScheduled { scheduled = append(scheduled, binding) } } // Verify that scheduled bindings are created as expected. - wantScheduled := []placementv1beta1.ClusterResourceBinding{} + wantScheduled := []placementv1beta1.BindingObj{} for _, name := range clusters { score := scoreByCluster[name] - binding := placementv1beta1.ClusterResourceBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: bindingNamePlaceholder, - Labels: map[string]string{ - placementv1beta1.PlacementTrackingLabel: crpName, + var binding placementv1beta1.BindingObj + if placementKey.Namespace == "" { + // Create CRB. + binding = &placementv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: bindingNamePlaceholder, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: placementKey.Name, + }, + Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, + }, + Spec: placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateScheduled, + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: name, + ClusterDecision: placementv1beta1.ClusterDecision{ + ClusterName: name, + Selected: true, + ClusterScore: score, + }, }, - Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, - }, - Spec: placementv1beta1.ResourceBindingSpec{ - State: placementv1beta1.BindingStateScheduled, - SchedulingPolicySnapshotName: policySnapshotName, - TargetCluster: name, - ClusterDecision: placementv1beta1.ClusterDecision{ - ClusterName: name, - Selected: true, - ClusterScore: score, + } + } else { + // Create RB. + binding = &placementv1beta1.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: bindingNamePlaceholder, + Namespace: placementKey.Namespace, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: placementKey.Name, + }, + Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, }, - }, + Spec: placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateScheduled, + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: name, + ClusterDecision: placementv1beta1.ClusterDecision{ + ClusterName: name, + Selected: true, + ClusterScore: score, + }, + }, + } } wantScheduled = append(wantScheduled, binding) } @@ -141,10 +172,10 @@ func scheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, score } // Verify that binding names are formatted correctly. - for _, binding := range bindingList.Items { - wantPrefix := fmt.Sprintf("%s-%s", crpName, binding.Spec.TargetCluster) - if !strings.HasPrefix(binding.Name, wantPrefix) { - return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.Name, wantPrefix) + for _, binding := range bindingList.GetBindingObjs() { + wantPrefix := fmt.Sprintf("%s-%s", placementKey.Name, binding.GetBindingSpec().TargetCluster) + if !strings.HasPrefix(binding.GetName(), wantPrefix) { + return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.GetName(), wantPrefix) } } @@ -152,49 +183,74 @@ func scheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, score } } -func boundBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, crpName, policySnapshotName string) func() error { +func boundBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, placementKey types.NamespacedName, policySnapshotName string) func() error { return func() error { - bindingList := &placementv1beta1.ClusterResourceBindingList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) - listOptions := &client.ListOptions{LabelSelector: labelSelector} - if err := hubClient.List(ctx, bindingList, listOptions); err != nil { - return err + bindingList, err := listBindings(placementKey) + if err != nil { + return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) } - bound := []placementv1beta1.ClusterResourceBinding{} + bound := []placementv1beta1.BindingObj{} clusterMap := make(map[string]bool) for _, name := range clusters { clusterMap[name] = true } - for _, binding := range bindingList.Items { - if _, ok := clusterMap[binding.Spec.TargetCluster]; ok && binding.Spec.State == placementv1beta1.BindingStateBound { + for _, binding := range bindingList.GetBindingObjs() { + if _, ok := clusterMap[binding.GetBindingSpec().TargetCluster]; ok && binding.GetBindingSpec().State == placementv1beta1.BindingStateBound { bound = append(bound, binding) } } - wantBound := []placementv1beta1.ClusterResourceBinding{} - for _, name := range clusters { - score := scoreByCluster[name] - binding := placementv1beta1.ClusterResourceBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: bindingNamePlaceholder, - Labels: map[string]string{ - placementv1beta1.PlacementTrackingLabel: crpName, + wantBound := []placementv1beta1.BindingObj{} + if placementKey.Namespace == "" { + for _, name := range clusters { + score := scoreByCluster[name] + binding := &placementv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: bindingNamePlaceholder, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: placementKey.Name, + }, + Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, }, - Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, - }, - Spec: placementv1beta1.ResourceBindingSpec{ - State: placementv1beta1.BindingStateBound, - SchedulingPolicySnapshotName: policySnapshotName, - TargetCluster: name, - ClusterDecision: placementv1beta1.ClusterDecision{ - ClusterName: name, - Selected: true, - ClusterScore: score, + Spec: placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateBound, + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: name, + ClusterDecision: placementv1beta1.ClusterDecision{ + ClusterName: name, + Selected: true, + ClusterScore: score, + }, }, - }, + } + wantBound = append(wantBound, binding) + } + } else { + for _, name := range clusters { + score := scoreByCluster[name] + binding := &placementv1beta1.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: bindingNamePlaceholder, + Namespace: placementKey.Namespace, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: placementKey.Name, + }, + Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, + }, + Spec: placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateBound, + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: name, + ClusterDecision: placementv1beta1.ClusterDecision{ + ClusterName: name, + Selected: true, + ClusterScore: score, + }, + }, + } + wantBound = append(wantBound, binding) } - wantBound = append(wantBound, binding) } if diff := cmp.Diff(bound, wantBound, ignoreResourceBindingFields...); diff != "" { @@ -202,10 +258,10 @@ func boundBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCl } // Verify that binding names are formatted correctly. - for _, binding := range bindingList.Items { - wantPrefix := fmt.Sprintf("%s-%s", crpName, binding.Spec.TargetCluster) - if !strings.HasPrefix(binding.Name, wantPrefix) { - return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.Name, wantPrefix) + for _, binding := range bindingList.GetBindingObjs() { + wantPrefix := fmt.Sprintf("%s-%s", placementKey.Name, binding.GetBindingSpec().TargetCluster) + if !strings.HasPrefix(binding.GetName(), wantPrefix) { + return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.GetName(), wantPrefix) } } @@ -213,49 +269,74 @@ func boundBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCl } } -func unscheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, crpName string, policySnapshotName string) func() error { +func unscheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, placementKey types.NamespacedName, policySnapshotName string) func() error { return func() error { - bindingList := &placementv1beta1.ClusterResourceBindingList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) - listOptions := &client.ListOptions{LabelSelector: labelSelector} - if err := hubClient.List(ctx, bindingList, listOptions); err != nil { - return err + bindingList, err := listBindings(placementKey) + if err != nil { + return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) } - unscheduled := []placementv1beta1.ClusterResourceBinding{} + unscheduled := []placementv1beta1.BindingObj{} clusterMap := make(map[string]bool) for _, name := range clusters { clusterMap[name] = true } - for _, binding := range bindingList.Items { - if _, ok := clusterMap[binding.Spec.TargetCluster]; ok && binding.Spec.State == placementv1beta1.BindingStateUnscheduled { + for _, binding := range bindingList.GetBindingObjs() { + if _, ok := clusterMap[binding.GetBindingSpec().TargetCluster]; ok && binding.GetBindingSpec().State == placementv1beta1.BindingStateUnscheduled { unscheduled = append(unscheduled, binding) } } // TODO (rzhang): fix me, compare the annotations when we know its previous state - wantUnscheduled := []placementv1beta1.ClusterResourceBinding{} - for _, name := range clusters { - score := scoreByCluster[name] - binding := placementv1beta1.ClusterResourceBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: bindingNamePlaceholder, - Labels: map[string]string{ - placementv1beta1.PlacementTrackingLabel: crpName, + wantUnscheduled := []placementv1beta1.BindingObj{} + if placementKey.Namespace == "" { + for _, name := range clusters { + score := scoreByCluster[name] + binding := &placementv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: bindingNamePlaceholder, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: placementKey.Name, + }, + Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, }, - Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, - }, - Spec: placementv1beta1.ResourceBindingSpec{ - State: placementv1beta1.BindingStateUnscheduled, - SchedulingPolicySnapshotName: policySnapshotName, - TargetCluster: name, - ClusterDecision: placementv1beta1.ClusterDecision{ - ClusterName: name, - Selected: true, - ClusterScore: score, + Spec: placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateUnscheduled, + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: name, + ClusterDecision: placementv1beta1.ClusterDecision{ + ClusterName: name, + Selected: true, + ClusterScore: score, + }, }, - }, + } + wantUnscheduled = append(wantUnscheduled, binding) + } + } else { + for _, name := range clusters { + score := scoreByCluster[name] + binding := &placementv1beta1.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: bindingNamePlaceholder, + Namespace: placementKey.Namespace, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: placementKey.Name, + }, + Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, + }, + Spec: placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateUnscheduled, + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: name, + ClusterDecision: placementv1beta1.ClusterDecision{ + ClusterName: name, + Selected: true, + ClusterScore: score, + }, + }, + } + wantUnscheduled = append(wantUnscheduled, binding) } - wantUnscheduled = append(wantUnscheduled, binding) } if diff := cmp.Diff(unscheduled, wantUnscheduled, ignoreResourceBindingFields...); diff != "" { @@ -263,10 +344,10 @@ func unscheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, sco } // Verify that binding names are formatted correctly. - for _, binding := range bindingList.Items { - wantPrefix := fmt.Sprintf("%s-%s", crpName, binding.Spec.TargetCluster) - if !strings.HasPrefix(binding.Name, wantPrefix) { - return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.Name, wantPrefix) + for _, binding := range bindingList.GetBindingObjs() { + wantPrefix := fmt.Sprintf("%s-%s", placementKey.Name, binding.GetBindingSpec().TargetCluster) + if !strings.HasPrefix(binding.GetName(), wantPrefix) { + return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.GetName(), wantPrefix) } } @@ -274,7 +355,7 @@ func unscheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, sco } } -func noBindingsCreatedForClustersActual(clusters []string, crpName string) func() error { +func noBindingsCreatedForClustersActual(clusters []string, placementKey types.NamespacedName) func() error { // Build a map for clusters for quicker lookup. clusterMap := map[string]bool{} for _, name := range clusters { @@ -282,17 +363,16 @@ func noBindingsCreatedForClustersActual(clusters []string, crpName string) func( } return func() error { - bindingList := &placementv1beta1.ClusterResourceBindingList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) - listOptions := &client.ListOptions{LabelSelector: labelSelector} - if err := hubClient.List(ctx, bindingList, listOptions); err != nil { - return err + // List all bindings. + bindingList, err := listBindings(placementKey) + if err != nil { + return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) } - bindings := bindingList.Items + bindings := bindingList.GetBindingObjs() for _, binding := range bindings { - if _, ok := clusterMap[binding.Spec.TargetCluster]; ok { - return fmt.Errorf("binding %s for cluster %s has been created unexpectedly", binding.Name, binding.Spec.TargetCluster) + if _, ok := clusterMap[binding.GetBindingSpec().TargetCluster]; ok { + return fmt.Errorf("binding %s for cluster %s has been created unexpectedly", binding.GetName(), binding.GetBindingSpec().TargetCluster) } } @@ -300,18 +380,18 @@ func noBindingsCreatedForClustersActual(clusters []string, crpName string) func( } } -func pickFixedPolicySnapshotStatusUpdatedActual(valid, invalidOrNotFound []string, policySnapshotName string) func() error { +func pickFixedPolicySnapshotStatusUpdatedActual(valid, invalidOrNotFound []string, policySnapshotKey types.NamespacedName) func() error { return func() error { - policySnapshot := &placementv1beta1.ClusterSchedulingPolicySnapshot{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: policySnapshotName}, policySnapshot); err != nil { - return err + policySnapshot, err := getSchedulingPolicySnapshot(policySnapshotKey) + if err != nil { + return fmt.Errorf("failed to get policy snapshot %s: %w", policySnapshotKey, err) } - // Verify that the observed CRP generation field is populated correctly. - wantCRPGeneration := policySnapshot.Annotations[placementv1beta1.CRPGenerationAnnotation] - observedCRPGeneration := policySnapshot.Status.ObservedCRPGeneration - if strconv.FormatInt(observedCRPGeneration, 10) != wantCRPGeneration { - return fmt.Errorf("policy snapshot observed CRP generation not match: want %s, got %d", wantCRPGeneration, observedCRPGeneration) + // Verify that the observed RP generation field is populated correctly. + wantRPGeneration := policySnapshot.GetAnnotations()[placementv1beta1.CRPGenerationAnnotation] + observedRPGeneration := policySnapshot.GetPolicySnapshotStatus().ObservedCRPGeneration + if strconv.FormatInt(observedRPGeneration, 10) != wantRPGeneration { + return fmt.Errorf("policy snapshot observed RP generation not match: want %s, got %d", wantRPGeneration, observedRPGeneration) } // Verify that cluster decisions are populated correctly. @@ -328,24 +408,24 @@ func pickFixedPolicySnapshotStatusUpdatedActual(valid, invalidOrNotFound []strin Selected: false, }) } - if diff := cmp.Diff(policySnapshot.Status.ClusterDecisions, wantClusterDecisions, ignoreClusterDecisionReasonField, cmpopts.SortSlices(lessFuncClusterDecision)); diff != "" { + if diff := cmp.Diff(policySnapshot.GetPolicySnapshotStatus().ClusterDecisions, wantClusterDecisions, ignoreClusterDecisionReasonField, cmpopts.SortSlices(lessFuncClusterDecision)); diff != "" { return fmt.Errorf("policy snapshot status cluster decisions (-got, +want): %s", diff) } // Verify that the scheduled condition is added correctly. - scheduledCondition := meta.FindStatusCondition(policySnapshot.Status.Conditions, string(placementv1beta1.PolicySnapshotScheduled)) + scheduledCondition := meta.FindStatusCondition(policySnapshot.GetPolicySnapshotStatus().Conditions, string(placementv1beta1.PolicySnapshotScheduled)) var wantScheduledCondition *metav1.Condition if len(invalidOrNotFound) == 0 { wantScheduledCondition = &metav1.Condition{ Type: string(placementv1beta1.PolicySnapshotScheduled), Status: metav1.ConditionTrue, - ObservedGeneration: policySnapshot.Generation, + ObservedGeneration: policySnapshot.GetGeneration(), } } else { wantScheduledCondition = &metav1.Condition{ Type: string(placementv1beta1.PolicySnapshotScheduled), Status: metav1.ConditionFalse, - ObservedGeneration: policySnapshot.Generation, + ObservedGeneration: policySnapshot.GetGeneration(), } } if diff := cmp.Diff(scheduledCondition, wantScheduledCondition, ignoreConditionTimeReasonAndMessageFields); diff != "" { @@ -356,18 +436,18 @@ func pickFixedPolicySnapshotStatusUpdatedActual(valid, invalidOrNotFound []strin } } -func pickAllPolicySnapshotStatusUpdatedActual(scored, filtered []string, policySnapshotName string) func() error { +func pickAllPolicySnapshotStatusUpdatedActual(scored, filtered []string, policySnapshotKey types.NamespacedName) func() error { return func() error { - policySnapshot := &placementv1beta1.ClusterSchedulingPolicySnapshot{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: policySnapshotName}, policySnapshot); err != nil { - return err + policySnapshot, err := getSchedulingPolicySnapshot(policySnapshotKey) + if err != nil { + return fmt.Errorf("failed to get policy snapshot %s: %w", policySnapshotKey, err) } - // Verify that the observed CRP generation field is populated correctly. - wantCRPGeneration := policySnapshot.Annotations[placementv1beta1.CRPGenerationAnnotation] - observedCRPGeneration := policySnapshot.Status.ObservedCRPGeneration - if strconv.FormatInt(observedCRPGeneration, 10) != wantCRPGeneration { - return fmt.Errorf("policy snapshot observed CRP generation not match: want %s, got %d", wantCRPGeneration, observedCRPGeneration) + // Verify that the observed RP generation field is populated correctly. + wantRPGeneration := policySnapshot.GetAnnotations()[placementv1beta1.CRPGenerationAnnotation] + observedRPGeneration := policySnapshot.GetPolicySnapshotStatus().ObservedCRPGeneration + if strconv.FormatInt(observedRPGeneration, 10) != wantRPGeneration { + return fmt.Errorf("policy snapshot observed RP generation not match: want %s, got %d", wantRPGeneration, observedRPGeneration) } // Verify that cluster decisions are populated correctly. @@ -385,16 +465,16 @@ func pickAllPolicySnapshotStatusUpdatedActual(scored, filtered []string, policyS Selected: false, }) } - if diff := cmp.Diff(policySnapshot.Status.ClusterDecisions, wantClusterDecisions, ignoreClusterDecisionReasonField, cmpopts.SortSlices(lessFuncClusterDecision)); diff != "" { + if diff := cmp.Diff(policySnapshot.GetPolicySnapshotStatus().ClusterDecisions, wantClusterDecisions, ignoreClusterDecisionReasonField, cmpopts.SortSlices(lessFuncClusterDecision)); diff != "" { return fmt.Errorf("policy snapshot status cluster decisions (-got, +want): %s", diff) } // Verify that the scheduled condition is added correctly. - scheduledCondition := meta.FindStatusCondition(policySnapshot.Status.Conditions, string(placementv1beta1.PolicySnapshotScheduled)) + scheduledCondition := meta.FindStatusCondition(policySnapshot.GetPolicySnapshotStatus().Conditions, string(placementv1beta1.PolicySnapshotScheduled)) wantScheduledCondition := &metav1.Condition{ Type: string(placementv1beta1.PolicySnapshotScheduled), Status: metav1.ConditionTrue, - ObservedGeneration: policySnapshot.Generation, + ObservedGeneration: policySnapshot.GetGeneration(), } if diff := cmp.Diff(scheduledCondition, wantScheduledCondition, ignoreConditionTimeReasonAndMessageFields); diff != "" { @@ -405,30 +485,28 @@ func pickAllPolicySnapshotStatusUpdatedActual(scored, filtered []string, policyS } } -func hasNScheduledOrBoundBindingsPresentActual(crpName string, clusters []string) func() error { +func hasNScheduledOrBoundBindingsPresentActual(placementKey types.NamespacedName, clusters []string) func() error { clusterMap := make(map[string]bool) for _, name := range clusters { clusterMap[name] = true } return func() error { - bindingList := &placementv1beta1.ClusterResourceBindingList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) - listOptions := &client.ListOptions{LabelSelector: labelSelector} - if err := hubClient.List(ctx, bindingList, listOptions); err != nil { - return err + bindingList, err := listBindings(placementKey) + if err != nil { + return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) } matchedScheduledOrBoundBindingCount := 0 - for _, binding := range bindingList.Items { + for _, binding := range bindingList.GetBindingObjs() { // A match is found iff the binding is of the scheduled or bound state, and its // target cluster is in the given list. // // We do not simply check against the state here as there exists a rare case where // the system might be in an in-between state and happen to have just the enough // number of bindings (though not the wanted ones). - _, matched := clusterMap[binding.Spec.TargetCluster] - if (binding.Spec.State == placementv1beta1.BindingStateBound || binding.Spec.State == placementv1beta1.BindingStateScheduled) && matched { + _, matched := clusterMap[binding.GetBindingSpec().TargetCluster] + if (binding.GetBindingSpec().State == placementv1beta1.BindingStateBound || binding.GetBindingSpec().State == placementv1beta1.BindingStateScheduled) && matched { matchedScheduledOrBoundBindingCount++ } } @@ -445,20 +523,20 @@ func pickNPolicySnapshotStatusUpdatedActual( numOfClusters int, picked, notPicked, filtered []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, - policySnapshotName string, + policySnapshotKey types.NamespacedName, opts []cmp.Option, ) func() error { return func() error { - policySnapshot := &placementv1beta1.ClusterSchedulingPolicySnapshot{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: policySnapshotName}, policySnapshot); err != nil { - return err + policySnapshot, err := getSchedulingPolicySnapshot(policySnapshotKey) + if err != nil { + return fmt.Errorf("failed to get policy snapshot %s: %w", policySnapshotKey, err) } - // Verify that the observed CRP generation field is populated correctly. - wantCRPGeneration := policySnapshot.Annotations[placementv1beta1.CRPGenerationAnnotation] - observedCRPGeneration := policySnapshot.Status.ObservedCRPGeneration - if strconv.FormatInt(observedCRPGeneration, 10) != wantCRPGeneration { - return fmt.Errorf("policy snapshot observed CRP generation not match: want %s, got %d", wantCRPGeneration, observedCRPGeneration) + // Verify that the observed RP generation field is populated correctly. + wantRPGeneration := policySnapshot.GetAnnotations()[placementv1beta1.CRPGenerationAnnotation] + observedRPGeneration := policySnapshot.GetPolicySnapshotStatus().ObservedCRPGeneration + if strconv.FormatInt(observedRPGeneration, 10) != wantRPGeneration { + return fmt.Errorf("policy snapshot observed RP generation not match: want %s, got %d", wantRPGeneration, observedRPGeneration) } // Verify that cluster decisions are populated correctly. @@ -484,24 +562,24 @@ func pickNPolicySnapshotStatusUpdatedActual( }) } if diff := cmp.Diff( - policySnapshot.Status.ClusterDecisions, wantClusterDecisions, + policySnapshot.GetPolicySnapshotStatus().ClusterDecisions, wantClusterDecisions, opts..., ); diff != "" { return fmt.Errorf("policy snapshot status cluster decisions (-got, +want): %s", diff) } // Verify that the scheduled condition is added correctly. - scheduledCondition := meta.FindStatusCondition(policySnapshot.Status.Conditions, string(placementv1beta1.PolicySnapshotScheduled)) + scheduledCondition := meta.FindStatusCondition(policySnapshot.GetPolicySnapshotStatus().Conditions, string(placementv1beta1.PolicySnapshotScheduled)) wantScheduledCondition := &metav1.Condition{ Type: string(placementv1beta1.PolicySnapshotScheduled), Status: metav1.ConditionTrue, - ObservedGeneration: policySnapshot.Generation, + ObservedGeneration: policySnapshot.GetGeneration(), } if len(picked) != numOfClusters { wantScheduledCondition = &metav1.Condition{ Type: string(placementv1beta1.PolicySnapshotScheduled), Status: metav1.ConditionFalse, - ObservedGeneration: policySnapshot.Generation, + ObservedGeneration: policySnapshot.GetGeneration(), } } diff --git a/test/scheduler/pickall_integration_test.go b/test/scheduler/pickall_integration_test.go index e1fd8d490..1d3e33ed0 100644 --- a/test/scheduler/pickall_integration_test.go +++ b/test/scheduler/pickall_integration_test.go @@ -25,6 +25,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" ) @@ -32,11 +33,12 @@ import ( var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { Context("pick all valid clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot. @@ -44,37 +46,38 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all healthy clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for unhealthy clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unhealthyClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unhealthyClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(healthyClusters, unhealthyClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(healthyClusters, unhealthyClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) // This is a serial test as adding a new member cluster may interrupt other test cases. Context("add a new healthy cluster", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) // Prepare a new cluster to avoid interrupting other concurrently running test cases. @@ -91,7 +94,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot. @@ -105,14 +108,14 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { }) It("should create scheduled bindings for the newly recovered cluster", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newUnhealthyMemberClusterName) @@ -122,6 +125,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { // This is a serial test as adding a new member cluster may interrupt other test cases. Context("a healthy cluster becomes unhealthy", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) // Prepare a new cluster to avoid interrupting other concurrently running test cases. @@ -138,7 +142,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot. @@ -151,7 +155,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { markClusterAsHealthy(newUnhealthyMemberClusterName) // Verify that a binding has been created for the cluster. - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") @@ -160,14 +164,14 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { }) It("should not remove binding for the cluster that just becomes unhealthy", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newUnhealthyMemberClusterName) @@ -178,11 +182,12 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { var _ = Describe("scheduling CRPs of the PickAll placement type", func() { Context("pick all valid clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -190,36 +195,37 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all healthy clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for unhealthy clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unhealthyClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unhealthyClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(healthyClusters, unhealthyClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(healthyClusters, unhealthyClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with specific affinities (single term, multiple selectors)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantTargetClusters := []string{ @@ -238,7 +244,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -271,36 +277,37 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with specific affinities (multiple terms, single selector)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantTargetClusters := []string{ @@ -319,7 +326,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -359,36 +366,37 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("affinities updated", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, crpName, 2) @@ -419,7 +427,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -454,12 +462,12 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { createPickAllCRPWithPolicySnapshot(crpName, policySnapshotName1, policy) // Verify that bindings have been created as expected. - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters1, zeroScoreByCluster, crpName, policySnapshotName1) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters1, zeroScoreByCluster, crpKey, policySnapshotName1) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") // Bind some bindings. - markBindingsAsBoundForClusters(crpName, boundClusters) + markBindingsAsBoundForClusters(crpKey, boundClusters) // Update the CRP with a new affinity. affinity := &placementv1beta1.Affinity{ @@ -495,42 +503,43 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create/update scheduled bindings for newly matched clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(scheduledClusters, zeroScoreByCluster, crpName, policySnapshotName2) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(scheduledClusters, zeroScoreByCluster, crpKey, policySnapshotName2) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should update bound bindings for newly matched clusters", func() { - boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(boundClusters, zeroScoreByCluster, crpName, policySnapshotName2) + boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(boundClusters, zeroScoreByCluster, crpKey, policySnapshotName2) Eventually(boundBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") Consistently(boundBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters2, wantIgnoredClusters2, policySnapshotName2) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters2, wantIgnoredClusters2, types.NamespacedName{Name: policySnapshotName2}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("no matching clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantIgnoredClusters := []string{ @@ -547,7 +556,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") policy := &placementv1beta1.PlacementPolicy{ @@ -587,25 +596,470 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual([]string{}, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual([]string{}, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) + }) + }) +}) + +var _ = Describe("scheduling RPs of the PickAll placement type", func() { + Context("pick all valid clusters", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, nil) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all healthy clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for unhealthy clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unhealthyClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(healthyClusters, unhealthyClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick clusters with specific affinities (single term, multiple selectors)", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + wantTargetClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster6WestProd, + } + wantIgnoredClusters := []string{ + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "prod", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"east", "west"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick clusters with specific affinities (multiple terms, single selector)", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + wantTargetClusters := []string{ + memberCluster3EastCanary, + memberCluster6WestProd, + memberCluster7WestCanary, + } + wantIgnoredClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "canary", + }, + }, + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{ + "east", + "central", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("affinities updated", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) + policySnapshotKey2 := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName2} + + wantTargetClusters1 := []string{ + memberCluster3EastCanary, + memberCluster7WestCanary, + } + wantTargetClusters2 := []string{ + memberCluster3EastCanary, + memberCluster6WestProd, + memberCluster7WestCanary, + } + wantIgnoredClusters2 := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + boundClusters := []string{ + memberCluster3EastCanary, + } + scheduledClusters := []string{ + memberCluster6WestProd, + memberCluster7WestCanary, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "canary", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + "east", + "west", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName1, policy) + + // Verify that bindings have been created as expected. + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters1, zeroScoreByCluster, rpKey, policySnapshotName1) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + + // Bind some bindings. + markBindingsAsBoundForClusters(rpKey, boundClusters) + + // Update the CRP with a new affinity. + affinity := &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "canary", + }, + }, + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{ + "east", + "central", + }, + }, + }, + }, + }, + }, + }, + }, + } + updatePickAllRPWithNewAffinity(testNamespace, rpName, affinity, policySnapshotName1, policySnapshotName2) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create/update scheduled bindings for newly matched clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(scheduledClusters, zeroScoreByCluster, rpKey, policySnapshotName2) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should update bound bindings for newly matched clusters", func() { + boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(boundClusters, zeroScoreByCluster, rpKey, policySnapshotName2) + Eventually(boundBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + Consistently(boundBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters2, wantIgnoredClusters2, policySnapshotKey2) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("no matching clusters", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + wantIgnoredClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "wonderland", + }, + }, + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{ + "east", + "central", + "west", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual([]string{}, wantIgnoredClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) }) }) }) diff --git a/test/scheduler/pickfixed_integration_test.go b/test/scheduler/pickfixed_integration_test.go index c6aa4a6b0..b1054fe78 100644 --- a/test/scheduler/pickfixed_integration_test.go +++ b/test/scheduler/pickfixed_integration_test.go @@ -24,11 +24,13 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" ) var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { Context("with valid target clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} targetClusters := []string{ memberCluster1EastProd, @@ -40,7 +42,7 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create the CRP and its associated policy snapshot. @@ -48,29 +50,30 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for valid target clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters, nilScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters, nilScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should report status correctly", func() { - statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters, []string{}, policySnapshotName) + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters, []string{}, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") }) AfterAll(func() { - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("with both valid and invalid/non-existent target clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} targetClusters := []string{ memberCluster1EastProd, @@ -99,7 +102,7 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create the CRP and its associated policy snapshot. @@ -107,35 +110,36 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for valid target clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(validClusters, nilScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(validClusters, nilScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create bindings for invalid target clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(invalidClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(invalidClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Created a binding for invalid or not found cluster") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Created a binding for invalid or not found cluster") }) It("should report status correctly", func() { - statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(validClusters, invalidClusters, policySnapshotName) + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(validClusters, invalidClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update policy snapshot status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update policy snapshot status") }) AfterAll(func() { - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("policy snapshot refresh with added clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} targetClusters1 := []string{ memberCluster1EastProd, @@ -166,55 +170,56 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create the CRP and its associated policy snapshot. createPickFixedCRPWithPolicySnapshot(crpName, targetClusters1, policySnapshotName1) // Make sure that the bindings have been created. - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters1, nilScoreByCluster, crpName, policySnapshotName1) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters1, nilScoreByCluster, crpKey, policySnapshotName1) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") // Mark all previously created bindings as bound. - markBindingsAsBoundForClusters(crpName, previouslyBoundClusters) + markBindingsAsBoundForClusters(crpKey, previouslyBoundClusters) // Update the CRP with new target clusters and refresh scheduling policy snapshots. updatePickFixedCRPWithNewTargetClustersAndRefreshSnapshots(crpName, targetClusters2, policySnapshotName1, policySnapshotName2) }) It("should create scheduled bindings for newly added valid target clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(newScheduledClusters, nilScoreByCluster, crpName, policySnapshotName2) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(newScheduledClusters, nilScoreByCluster, crpKey, policySnapshotName2) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should update bound bindings for previously added valid target clusters", func() { - boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(previouslyBoundClusters, nilScoreByCluster, crpName, policySnapshotName2) + boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(previouslyBoundClusters, nilScoreByCluster, crpKey, policySnapshotName2) Eventually(boundBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") Consistently(boundBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") }) It("should update scheduled bindings for previously added valid target clusters", func() { - scheduledBindingsUpdatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(previouslyScheduledClusters, nilScoreByCluster, crpName, policySnapshotName2) + scheduledBindingsUpdatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(previouslyScheduledClusters, nilScoreByCluster, crpKey, policySnapshotName2) Eventually(scheduledBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") Consistently(scheduledBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") }) It("should report status correctly", func() { - statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters2, []string{}, policySnapshotName2) + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters2, []string{}, types.NamespacedName{Name: policySnapshotName2}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update policy snapshot status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update policy snapshot status") }) AfterAll(func() { - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("policy snapshot refresh with removed clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} targetClusters1 := []string{ memberCluster1EastProd, @@ -244,44 +249,312 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create the CRP and its associated policy snapshot. createPickFixedCRPWithPolicySnapshot(crpName, targetClusters1, policySnapshotName1) // Make sure that the bindings have been created. - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters1, nilScoreByCluster, crpName, policySnapshotName1) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters1, nilScoreByCluster, crpKey, policySnapshotName1) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") // Mark some previously created bindings as bound. - markBindingsAsBoundForClusters(crpName, previouslyBoundClusters) + markBindingsAsBoundForClusters(crpKey, previouslyBoundClusters) // Update the CRP with new target clusters and refresh scheduling policy snapshots. updatePickFixedCRPWithNewTargetClustersAndRefreshSnapshots(crpName, targetClusters2, policySnapshotName1, policySnapshotName2) }) It("should create scheduled bindings for newly added valid target clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(scheduledClusters, nilScoreByCluster, crpName, policySnapshotName2) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(scheduledClusters, nilScoreByCluster, crpKey, policySnapshotName2) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should mark bindings as unscheduled for removed target clusters", func() { - unscheduledBindingsCreatedActual := unscheduledBindingsCreatedOrUpdatedForClustersActual(unscheduledClusters, nilScoreByCluster, crpName, policySnapshotName1) + unscheduledBindingsCreatedActual := unscheduledBindingsCreatedOrUpdatedForClustersActual(unscheduledClusters, nilScoreByCluster, crpKey, policySnapshotName1) Eventually(unscheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to mark bindings as unscheduled") Consistently(unscheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to mark bindings as unscheduled") }) It("should report status correctly", func() { - statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(scheduledClusters, []string{}, policySnapshotName2) + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(scheduledClusters, []string{}, types.NamespacedName{Name: policySnapshotName2}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update policy snapshot status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update policy snapshot status") }) AfterAll(func() { - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) + }) + }) +}) + +var _ = Describe("scheduling RPs of the PickFixed placement type", func() { + Context("with valid target clusters", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + + targetClusters := []string{ + memberCluster1EastProd, + memberCluster4CentralProd, + memberCluster6WestProd, + } + + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create the RP and its associated policy snapshot. + createPickFixedRPWithPolicySnapshot(testNamespace, rpName, targetClusters, policySnapshotName) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for valid target clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters, nilScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters, []string{}, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") + }) + + AfterAll(func() { + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("with both valid and invalid/non-existent target clusters", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + + targetClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster8UnhealthyEastProd, // An invalid cluster (unhealthy). + memberCluster9LeftCentralProd, // An invalid cluster (left). + memberCluster10NonExistent, // A cluster that cannot be found in the fleet. + } + validClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + } + invalidClusters := []string{ + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + memberCluster10NonExistent, + } + + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create the RP and its associated policy snapshot. + createPickFixedRPWithPolicySnapshot(testNamespace, rpName, targetClusters, policySnapshotName) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for valid target clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(validClusters, nilScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create bindings for invalid target clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(invalidClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Created a binding for invalid or not found cluster") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Created a binding for invalid or not found cluster") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(validClusters, invalidClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update policy snapshot status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update policy snapshot status") + }) + + AfterAll(func() { + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("policy snapshot refresh with added clusters", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + + targetClusters1 := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + } + targetClusters2 := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + } + previouslyBoundClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + } + previouslyScheduledClusters := []string{ + memberCluster4CentralProd, + } + newScheduledClusters := []string{ + memberCluster5CentralProd, + memberCluster6WestProd, + } + + policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) + policySnapshotKey2 := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName2} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create the RP and its associated policy snapshot. + createPickFixedRPWithPolicySnapshot(testNamespace, rpName, targetClusters1, policySnapshotName1) + + // Make sure that the bindings have been created. + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters1, nilScoreByCluster, rpKey, policySnapshotName1) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + + // Mark all previously created bindings as bound. + markBindingsAsBoundForClusters(rpKey, previouslyBoundClusters) + + // Update the CRP with new target clusters and refresh scheduling policy snapshots. + updatePickFixedRPWithNewTargetClustersAndRefreshSnapshots(testNamespace, rpName, targetClusters2, policySnapshotName1, policySnapshotName2) + }) + + It("should create scheduled bindings for newly added valid target clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(newScheduledClusters, nilScoreByCluster, rpKey, policySnapshotName2) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should update bound bindings for previously added valid target clusters", func() { + boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(previouslyBoundClusters, nilScoreByCluster, rpKey, policySnapshotName2) + Eventually(boundBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + Consistently(boundBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + }) + + It("should update scheduled bindings for previously added valid target clusters", func() { + scheduledBindingsUpdatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(previouslyScheduledClusters, nilScoreByCluster, rpKey, policySnapshotName2) + Eventually(scheduledBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + Consistently(scheduledBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters2, []string{}, policySnapshotKey2) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update policy snapshot status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update policy snapshot status") + }) + + AfterAll(func() { + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("policy snapshot refresh with removed clusters", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + + targetClusters1 := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + } + targetClusters2 := []string{ + memberCluster5CentralProd, + memberCluster6WestProd, + } + previouslyBoundClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + } + scheduledClusters := []string{ + memberCluster5CentralProd, + memberCluster6WestProd, + } + unscheduledClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + } + + policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) + policySnapshotKey2 := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName2} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create the RP and its associated policy snapshot. + createPickFixedRPWithPolicySnapshot(testNamespace, rpName, targetClusters1, policySnapshotName1) + + // Make sure that the bindings have been created. + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters1, nilScoreByCluster, rpKey, policySnapshotName1) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + + // Mark some previously created bindings as bound. + markBindingsAsBoundForClusters(rpKey, previouslyBoundClusters) + + // Update the RP with new target clusters and refresh scheduling policy snapshots. + updatePickFixedRPWithNewTargetClustersAndRefreshSnapshots(testNamespace, rpName, targetClusters2, policySnapshotName1, policySnapshotName2) + }) + + It("should create scheduled bindings for newly added valid target clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(scheduledClusters, nilScoreByCluster, rpKey, policySnapshotName2) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should mark bindings as unscheduled for removed target clusters", func() { + unscheduledBindingsCreatedActual := unscheduledBindingsCreatedOrUpdatedForClustersActual(unscheduledClusters, nilScoreByCluster, rpKey, policySnapshotName1) + Eventually(unscheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to mark bindings as unscheduled") + Consistently(unscheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to mark bindings as unscheduled") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(scheduledClusters, []string{}, policySnapshotKey2) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update policy snapshot status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update policy snapshot status") + }) + + AfterAll(func() { + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) }) }) }) diff --git a/test/scheduler/pickn_integration_test.go b/test/scheduler/pickn_integration_test.go index 15d5db175..0d96139b6 100644 --- a/test/scheduler/pickn_integration_test.go +++ b/test/scheduler/pickn_integration_test.go @@ -45,6 +45,7 @@ var ( var _ = Describe("scheduling CRPs of the PickN placement type", func() { Context("pick N clusters with no affinities/topology spread constraints specified", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(3) // Less than the number of clusters available (7) in the fleet. @@ -70,7 +71,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -82,36 +83,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("not enough clusters to pick", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(10) // More than the number of clusters available (7) in the fleet. @@ -135,7 +137,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -147,43 +149,44 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick 0 clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(0) BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -195,30 +198,31 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, []string{}) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, []string{}) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), []string{}, []string{}, []string{}, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), []string{}, []string{}, []string{}, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with required affinity", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(2) @@ -239,7 +243,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -275,36 +279,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with required affinity, multiple terms", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(4) @@ -326,7 +331,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -378,36 +383,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with preferred affinity", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(4) @@ -446,7 +452,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -483,36 +489,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with preferred affinity, multiple terms", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(4) @@ -554,7 +561,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -610,36 +617,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with required topology spread constraints", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(2) @@ -688,7 +696,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -707,36 +715,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with required topology spread constraints, multiple terms", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(2) @@ -811,7 +820,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -835,36 +844,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with preferred topology spread constraints", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(2) @@ -916,7 +926,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -935,36 +945,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with preferred topology spread constraints, multiple terms", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(2) @@ -1048,7 +1059,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1072,36 +1083,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with mixed affinities and topology spread constraints, required only", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(2) @@ -1151,7 +1163,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1191,36 +1203,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with mixed affinities and topology spread constraints, preferred only", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(6) @@ -1296,7 +1309,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1331,36 +1344,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with mixed affinities and topology spread constraints, mixed", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(3) @@ -1426,7 +1440,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1477,36 +1491,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("upscaling", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClustersBefore := int32(1) @@ -1537,7 +1552,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1548,11 +1563,11 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { createPickNCRPWithPolicySnapshot(crpName, policySnapshotName, policy) // Verify that scheduling has been completed. - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersBefore) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersBefore) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersBefore, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersBefore, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") @@ -1574,36 +1589,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersAfter) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersAfter) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClustersAfter, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClustersAfter, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("downscaling", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClustersBefore := int32(3) @@ -1628,7 +1644,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1639,11 +1655,11 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { createPickNCRPWithPolicySnapshot(crpName, policySnapshotName, policy) // Verify that scheduling has been completed. - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersBefore) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersBefore) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersBefore, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersBefore, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") @@ -1665,36 +1681,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersAfter) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersAfter) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClustersAfter, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClustersAfter, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("affinities and topology spread constraints updated", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotNameBefore := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) policySnapshotNameAfter := fmt.Sprintf(policySnapshotNameTemplate, crpName, 2) @@ -1812,7 +1829,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1862,11 +1879,11 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { createPickNCRPWithPolicySnapshot(crpName, policySnapshotNameBefore, policy) // Verify that scheduling has been completed. - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersBefore) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersBefore) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersBefore, scoreByClusterBefore, crpName, policySnapshotNameBefore) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersBefore, scoreByClusterBefore, crpKey, policySnapshotNameBefore) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") @@ -1898,31 +1915,513 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersAfter) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersAfter) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, scoreByClusterAfter, crpName, policySnapshotNameAfter) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, scoreByClusterAfter, crpKey, policySnapshotNameAfter) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClustersAfter, scoreByClusterAfter, policySnapshotNameAfter, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClustersAfter, scoreByClusterAfter, types.NamespacedName{Name: policySnapshotNameAfter}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) + }) + }) +}) + +var _ = Describe("scheduling RPs of the PickN placement type", func() { + Context("pick N clusters with no affinities/topology spread constraints specified", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + numOfClusters := int32(3) // Less than the number of clusters available (7) in the fleet. + + // The scheduler is designed to produce only deterministic decisions; if there are no + // comparable scores available for selected clusters, the scheduler will rank the clusters + // by their names. + wantPickedClusters := []string{ + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster7WestCanary, + } + wantNotPickedClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster3EastCanary, + memberCluster4CentralProd, + } + wantFilteredClusters := []string{ + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickN placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + } + createPickNRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the CRP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, wantPickedClusters) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should create scheduled bindings for selected clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + }) + + It("should report status correctly", func() { + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, zeroScoreByCluster, policySnapshotKey, pickNCmpOpts) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("not enough clusters to pick", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + numOfClusters := int32(10) // More than the number of clusters available (7) in the fleet. + + // The scheduler is designed to produce only deterministic decisions; if there are no + // comparable scores available for selected clusters, the scheduler will rank the clusters + // by their names. + wantPickedClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster7WestCanary, + } + wantFilteredClusters := []string{ + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a CRP of the PickN placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + } + createPickNRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the CRP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, wantPickedClusters) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should create scheduled bindings for selected clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + }) + + It("should report status correctly", func() { + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotKey, pickNCmpOpts) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick 0 clusters", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + numOfClusters := int32(0) + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a CRP of the PickN placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + } + createPickNRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, []string{}) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should report status correctly", func() { + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), []string{}, []string{}, []string{}, zeroScoreByCluster, policySnapshotKey, pickNCmpOpts) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick with required affinity", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + numOfClusters := int32(2) + + wantPickedClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + } + wantFilteredClusters := []string{ + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickN placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabel: "east", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + "prod", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickNRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, wantPickedClusters) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should create scheduled bindings for selected clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + }) + + It("should report status correctly", func() { + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotKey, pickNCmpOpts) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick with required affinity, multiple terms", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + numOfClusters := int32(4) + + // Note that the number of matching clusters is less than the desired one. + wantPickedClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster7WestCanary, + } + wantFilteredClusters := []string{ + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickN placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabel: "east", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + "prod", + }, + }, + }, + }, + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabel: "west", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabel, + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{ + "prod", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickNRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, wantPickedClusters) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should create scheduled bindings for selected clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + }) + + It("should report status correctly", func() { + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotKey, pickNCmpOpts) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick with preferred affinity", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + numOfClusters := int32(4) + + wantPickedClusters := []string{ + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster7WestCanary, + memberCluster6WestProd, + } + wantNotPickedClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster3EastCanary, + } + wantFilteredClusters := []string{ + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + scoreByCluster := map[string]*placementv1beta1.ClusterScore{ + memberCluster1EastProd: &zeroScore, + memberCluster2EastProd: &zeroScore, + memberCluster3EastCanary: &zeroScore, + memberCluster4CentralProd: { + AffinityScore: ptr.To(int32(10)), + TopologySpreadScore: ptr.To(int32(0)), + }, + memberCluster5CentralProd: { + AffinityScore: ptr.To(int32(10)), + TopologySpreadScore: ptr.To(int32(0)), + }, + memberCluster6WestProd: &zeroScore, + memberCluster7WestCanary: &zeroScore, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickN placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []placementv1beta1.PreferredClusterSelector{ + { + Weight: 10, + Preference: placementv1beta1.ClusterSelectorTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabel: "central", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + "prod", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickNRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, wantPickedClusters) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should create scheduled bindings for selected clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + }) + + It("should report status correctly", func() { + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotKey, pickNCmpOpts) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) }) }) }) diff --git a/test/scheduler/property_based_scheduling_integration_test.go b/test/scheduler/property_based_scheduling_integration_test.go index 897f78d67..9c7ff2600 100644 --- a/test/scheduler/property_based_scheduling_integration_test.go +++ b/test/scheduler/property_based_scheduling_integration_test.go @@ -42,6 +42,7 @@ const ( var _ = Describe("scheduling CRPs of the PickAll placement type using cluster properties", func() { Context("pick clusters with specific properties (single term, multiple expressions)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantTargetClusters := []string{ @@ -60,7 +61,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -113,36 +114,37 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with specific properties (multiple terms, single expression)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantTargetClusters := []string{ @@ -161,7 +163,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -232,36 +234,37 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with both label and property selectors (single term)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantTargetClusters := []string{ @@ -280,7 +283,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -317,36 +320,37 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with both label and property selectors (multiple terms)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantTargetClusters := []string{ @@ -365,7 +369,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -412,36 +416,37 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("property selector updated", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, crpName, 2) @@ -498,7 +503,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -537,30 +542,30 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantScheduledClusters1, zeroScoreByCluster, crpName, policySnapshotName1) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantScheduledClusters1, zeroScoreByCluster, crpKey, policySnapshotName1) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters1, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters1, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantScheduledClusters1, wantIgnoredClusters1, policySnapshotName1) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantScheduledClusters1, wantIgnoredClusters1, types.NamespacedName{Name: policySnapshotName1}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) It("can mark some bindings as bound", func() { - markBindingsAsBoundForClusters(crpName, wantBoundClusters1) + markBindingsAsBoundForClusters(crpKey, wantBoundClusters1) }) It("can update the scheduling policy with a new property selector", func() { @@ -596,43 +601,44 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should create/update scheduled bindings for newly matched clusters", func() { - scheduledBindingsCreatedOrUpdatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantScheduledClusters2, zeroScoreByCluster, crpName, policySnapshotName2) + scheduledBindingsCreatedOrUpdatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantScheduledClusters2, zeroScoreByCluster, crpKey, policySnapshotName2) Eventually(scheduledBindingsCreatedOrUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create/update the expected set of bindings") Consistently(scheduledBindingsCreatedOrUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create/update the expected set of bindings") }) It("should update bound bindings for newly matched clusters", func() { - boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(wantBoundClusters2, zeroScoreByCluster, crpName, policySnapshotName2) + boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(wantBoundClusters2, zeroScoreByCluster, crpKey, policySnapshotName2) Eventually(boundBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") Consistently(boundBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should mark bindings as unscheduled for clusters that were unselected", func() { - unscheduledBindingsUpdatedActual := unscheduledBindingsCreatedOrUpdatedForClustersActual(wantUnscheduledClusters2, zeroScoreByCluster, crpName, policySnapshotName1) + unscheduledBindingsUpdatedActual := unscheduledBindingsCreatedOrUpdatedForClustersActual(wantUnscheduledClusters2, zeroScoreByCluster, crpKey, policySnapshotName1) Eventually(unscheduledBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") Consistently(unscheduledBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantUnselectedClusters, policySnapshotName2) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantUnselectedClusters, types.NamespacedName{Name: policySnapshotName2}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("no matching clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantIgnoredClusters := []string{ @@ -649,7 +655,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -681,25 +687,25 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual([]string{}, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual([]string{}, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) @@ -707,6 +713,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr // interfere with other specs if run in parallel. Context("cluster properties refreshed", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) // wantTargetClusters1 and wantIgnoredClusters1 are the picked and unpicked clusters @@ -743,7 +750,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -775,24 +782,24 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters1, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters1, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters1, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters1, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters1, wantIgnoredClusters1, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters1, wantIgnoredClusters1, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) @@ -832,26 +839,26 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should create scheduled bindings for newly matched clusters while retaining old ones", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters2, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters2, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters2, wantIgnoredClusters2, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters2, wantIgnoredClusters2, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Reset the cluster properties. for idx := range wantTargetClusters2 { @@ -864,6 +871,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr var _ = Describe("scheduling CRPs of the PickN placement type using cluster properties", func() { Context("pick clusters with specific properties (single sorter, ascending)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numberOfClusters := 3 @@ -915,7 +923,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -942,36 +950,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with specific properties (single sorter, descending)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numberOfClusters := 3 @@ -1029,7 +1038,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -1056,31 +1065,31 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) @@ -1088,6 +1097,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop // interfere with other specs if run in parallel. Context("pick clusters with specific properties (single sorter, same property value across the board)", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numberOfClusters := 3 @@ -1135,7 +1145,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop } // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1162,31 +1172,31 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Reset the cluster properties. for clusterName := range propertiesByCluster { @@ -1197,6 +1207,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop Context("pick clusters with specific properties (single sorter, specified property not available across the board)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numberOfClusters := 3 @@ -1224,7 +1235,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1251,31 +1262,31 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Reset the cluster properties. for clusterName := range propertiesByCluster { @@ -1286,6 +1297,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop Context("pick clusters with specific properties (multiple sorters)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numberOfClusters := 3 @@ -1343,7 +1355,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1388,36 +1400,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with both label selector and property sorter (single preferred term)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numberOfClusters := 4 @@ -1460,7 +1473,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -1492,36 +1505,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with both label selectors and property sorters (multiple preferred terms)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numberOfClusters := 4 @@ -1570,7 +1584,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -1616,31 +1630,635 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) + }) + }) +}) + +var _ = Describe("scheduling RPs of the PickAll placement type using cluster properties", func() { + Context("pick clusters with specific properties (single term, multiple expressions)", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + wantTargetClusters := []string{ + memberCluster3EastCanary, + } + wantIgnoredClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "4", + }, + }, + { + Name: energyEfficiencyRatingPropertyName, + Operator: placementv1beta1.PropertySelectorLessThan, + Values: []string{ + "45", + }, + }, + { + Name: propertyprovider.AllocatableCPUCapacityProperty, + Operator: placementv1beta1.PropertySelectorNotEqualTo, + Values: []string{ + "14", + }, + }, + { + Name: propertyprovider.AvailableMemoryCapacityProperty, + Operator: placementv1beta1.PropertySelectorGreaterThan, + Values: []string{ + "4Gi", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick clusters with specific properties (multiple terms, single expression)", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + wantTargetClusters := []string{ + memberCluster1EastProd, + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster7WestCanary, + } + wantIgnoredClusters := []string{ + memberCluster2EastProd, + memberCluster6WestProd, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "8", + }, + }, + }, + }, + }, + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: energyEfficiencyRatingPropertyName, + Operator: placementv1beta1.PropertySelectorGreaterThan, + Values: []string{ + "99", + }, + }, + }, + }, + }, + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.TotalCPUCapacityProperty, + Operator: placementv1beta1.PropertySelectorEqualTo, + Values: []string{ + "12", + }, + }, + }, + }, + }, + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.TotalMemoryCapacityProperty, + Operator: placementv1beta1.PropertySelectorLessThanOrEqualTo, + Values: []string{ + "4Gi", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick clusters with both label and property selectors (single term)", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + wantTargetClusters := []string{ + memberCluster2EastProd, + memberCluster3EastCanary, + } + wantIgnoredClusters := []string{ + memberCluster1EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabel: "east", + }, + }, + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "4", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the CRP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick clusters with both label and property selectors (multiple terms)", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + + wantTargetClusters := []string{ + memberCluster5CentralProd, + memberCluster6WestProd, + } + wantIgnoredClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "prod", + regionLabel: "west", + }, + }, + }, + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: energyEfficiencyRatingPropertyName, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "40", + }, + }, + { + Name: propertyprovider.TotalCPUCapacityProperty, + Operator: placementv1beta1.PropertySelectorGreaterThan, + Values: []string{ + "12", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("property selector updated", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey1 := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName1} + policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) + policySnapshotKey2 := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName2} + + // wantScheduledClusters1, wantIgnoredClusters1, and wantBoundClusters1 are + // the clusters picked (bound) and unpicked respectively with the original + // property selector (before the property selector update). + wantScheduledClusters1 := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster6WestProd, + } + wantIgnoredClusters1 := []string{ + memberCluster5CentralProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + wantBoundClusters1 := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + } + + // wantScheduledClusters2, wantIgnoredClusters2, and wantBoundClusters2 are + // the clusters picked (bound) and unpicked respectively with the new + // property selector (after the property selector update). + wantScheduledClusters2 := []string{ + memberCluster3EastCanary, + memberCluster5CentralProd, + memberCluster7WestCanary, + } + wantBoundClusters2 := []string{ + memberCluster2EastProd, + } + wantUnscheduledClusters2 := []string{ + memberCluster1EastProd, + memberCluster4CentralProd, + memberCluster6WestProd, + } + wantIgnoredClusters2 := []string{ + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + // wantTargetClusters and wantUnselectedClusters are the clusters picked + // and unpicked respectively after the property selector update. + wantTargetClusters := []string{} + wantTargetClusters = append(wantTargetClusters, wantScheduledClusters2...) + wantTargetClusters = append(wantTargetClusters, wantBoundClusters2...) + wantUnselectedClusters := []string{} + wantUnselectedClusters = append(wantUnselectedClusters, wantUnscheduledClusters2...) + wantUnselectedClusters = append(wantUnselectedClusters, wantIgnoredClusters2...) + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorLessThanOrEqualTo, + Values: []string{ + "6", + }, + }, + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "2", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName1, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantScheduledClusters1, zeroScoreByCluster, rpKey, policySnapshotName1) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters1, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantScheduledClusters1, wantIgnoredClusters1, policySnapshotKey1) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + It("can mark some bindings as bound", func() { + markBindingsAsBoundForClusters(rpKey, wantBoundClusters1) + }) + + It("can update the scheduling policy with a new property selector", func() { + affinity := &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorLessThanOrEqualTo, + Values: []string{ + "8", + }, + }, + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "4", + }, + }, + }, + }, + }, + }, + }, + }, + } + updatePickAllRPWithNewAffinity(testNamespace, rpName, affinity, policySnapshotName1, policySnapshotName2) + }) + + It("should create/update scheduled bindings for newly matched clusters", func() { + scheduledBindingsCreatedOrUpdatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantScheduledClusters2, zeroScoreByCluster, rpKey, policySnapshotName2) + Eventually(scheduledBindingsCreatedOrUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create/update the expected set of bindings") + Consistently(scheduledBindingsCreatedOrUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create/update the expected set of bindings") + }) + + It("should update bound bindings for newly matched clusters", func() { + boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(wantBoundClusters2, zeroScoreByCluster, rpKey, policySnapshotName2) + Eventually(boundBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + Consistently(boundBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should mark bindings as unscheduled for clusters that were unselected", func() { + unscheduledBindingsUpdatedActual := unscheduledBindingsCreatedOrUpdatedForClustersActual(wantUnscheduledClusters2, zeroScoreByCluster, rpKey, policySnapshotName1) + Eventually(unscheduledBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + Consistently(unscheduledBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantUnselectedClusters, policySnapshotKey2) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) }) }) }) diff --git a/test/scheduler/suite_test.go b/test/scheduler/suite_test.go index 39f995c97..829dfaef9 100644 --- a/test/scheduler/suite_test.go +++ b/test/scheduler/suite_test.go @@ -49,10 +49,10 @@ import ( "go.goms.io/fleet/pkg/scheduler" "go.goms.io/fleet/pkg/scheduler/clustereligibilitychecker" "go.goms.io/fleet/pkg/scheduler/queue" - "go.goms.io/fleet/pkg/scheduler/watchers/clusterresourcebinding" - "go.goms.io/fleet/pkg/scheduler/watchers/clusterresourceplacement" - "go.goms.io/fleet/pkg/scheduler/watchers/clusterschedulingpolicysnapshot" + "go.goms.io/fleet/pkg/scheduler/watchers/binding" "go.goms.io/fleet/pkg/scheduler/watchers/membercluster" + "go.goms.io/fleet/pkg/scheduler/watchers/placement" + "go.goms.io/fleet/pkg/scheduler/watchers/schedulingpolicysnapshot" ) const ( @@ -524,6 +524,14 @@ func setupResources() { for clusterName := range propertiesByCluster { resetClusterPropertiesFor(clusterName) } + + // Create test namespace + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + } + Expect(hubClient.Create(ctx, namespace)).Should(Succeed()) } func beforeSuiteForProcess1() []byte { @@ -575,35 +583,57 @@ func beforeSuiteForProcess1() []byte { ) // Register the watchers. - crpReconciler := clusterresourceplacement.Reconciler{ + crpReconciler := placement.Reconciler{ Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, } err = crpReconciler.SetupWithManagerForClusterResourcePlacement(ctrlMgr) Expect(err).NotTo(HaveOccurred(), "Failed to set up CRP watcher with controller manager") - policySnapshotWatcher := clusterschedulingpolicysnapshot.Reconciler{ + rpReconciler := placement.Reconciler{ + Client: hubClient, + SchedulerWorkQueue: schedulerWorkQueue, + } + err = rpReconciler.SetupWithManagerForResourcePlacement(ctrlMgr) + Expect(err).NotTo(HaveOccurred(), "Failed to set up RP watcher with controller manager") + + clusterPolicySnapshotWatcher := schedulingpolicysnapshot.Reconciler{ Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, } - err = policySnapshotWatcher.SetupWithManagerForClusterSchedulingPolicySnapshot(ctrlMgr) + err = clusterPolicySnapshotWatcher.SetupWithManagerForClusterSchedulingPolicySnapshot(ctrlMgr) Expect(err).NotTo(HaveOccurred(), "Failed to set up cluster policy snapshot watcher with controller manager") + policySnapshotWatcher := schedulingpolicysnapshot.Reconciler{ + Client: hubClient, + SchedulerWorkQueue: schedulerWorkQueue, + } + err = policySnapshotWatcher.SetupWithManagerForSchedulingPolicySnapshot(ctrlMgr) + Expect(err).NotTo(HaveOccurred(), "Failed to set up policy snapshot watcher with controller manager") + memberClusterWatcher := membercluster.Reconciler{ Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, ClusterEligibilityChecker: clusterEligibilityChecker, + EnableResourcePlacement: true, } err = memberClusterWatcher.SetupWithManager(ctrlMgr) Expect(err).NotTo(HaveOccurred(), "Failed to set up member cluster watcher with controller manager") - clusterResourceBindingWatcher := clusterresourcebinding.Reconciler{ + clusterResourceBindingWatcher := binding.Reconciler{ Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, } err = clusterResourceBindingWatcher.SetupWithManagerForClusterResourceBinding(ctrlMgr) Expect(err).NotTo(HaveOccurred(), "Failed to set up cluster resource binding watcher with controller manager") + resourceBindingWatcher := binding.Reconciler{ + Client: hubClient, + SchedulerWorkQueue: schedulerWorkQueue, + } + err = resourceBindingWatcher.SetupWithManagerForResourceBinding(ctrlMgr) + Expect(err).NotTo(HaveOccurred(), "Failed to set up resource binding watcher with controller manager") + // Set up the scheduler. fw := buildSchedulerFramework(ctrlMgr, clusterEligibilityChecker) sched := scheduler.NewScheduler(defaultSchedulerName, fw, schedulerWorkQueue, ctrlMgr, 3) diff --git a/test/scheduler/tainttoleration_integration_test.go b/test/scheduler/tainttoleration_integration_test.go index 7af6a7ba9..13eb26b66 100644 --- a/test/scheduler/tainttoleration_integration_test.go +++ b/test/scheduler/tainttoleration_integration_test.go @@ -46,13 +46,14 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // This is a serial test as adding taints can affect other tests Context("pickFixed, valid target clusters with taints", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) targetClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster6WestProd} taintClusters := targetClusters BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 4, 6 from all regions. @@ -63,18 +64,18 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for valid target clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters, nilScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters, nilScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should report status correctly", func() { - statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters, []string{}, policySnapshotName) + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters, []string{}, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") }) @@ -83,13 +84,14 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) // This is a serial test as adding taints can affect other tests. Context("pick all valid cluster with no taints, ignore valid cluster with taints, CRP with no matching toleration", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) taintClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary} selectedClusters := []string{memberCluster2EastProd, memberCluster3EastCanary, memberCluster5CentralProd, memberCluster6WestProd} @@ -97,7 +99,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 4, 7 from all regions. @@ -108,24 +110,24 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all healthy clusters with no taints", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for unhealthy clusters, healthy cluster with taints", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) @@ -134,13 +136,14 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) // This is a serial test as adding taints can affect other tests. Context("pick all valid cluster with no taints, ignore valid cluster with taints, then remove taints after which CRP selects all clusters", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) taintClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary} selectedClusters1 := []string{memberCluster2EastProd, memberCluster3EastCanary, memberCluster5CentralProd, memberCluster6WestProd} @@ -150,7 +153,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 4, 7 from all regions. @@ -161,24 +164,24 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all healthy clusters with no taints", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters1, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters1, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for unhealthy clusters, healthy cluster with taints", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters1, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters1, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters1, unSelectedClusters1, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters1, unSelectedClusters1, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) @@ -189,32 +192,33 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should create scheduled bindings for all healthy clusters with no taints", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters2, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters2, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for unhealthy clusters, healthy cluster with taints", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters2, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters2, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters2, unSelectedClusters2, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters2, unSelectedClusters2, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) // This is a serial test as adding taints, tolerations can affect other tests. Context("pick all valid cluster with tolerated taints, ignore valid clusters with taints, CRP has some matching tolerations on creation", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) taintClusters := []string{memberCluster1EastProd, memberCluster2EastProd, memberCluster6WestProd} tolerateClusters := []string{memberCluster1EastProd, memberCluster2EastProd} @@ -223,7 +227,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 2, 6 from all regions. @@ -261,24 +265,24 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for clusters with tolerated taints", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for clusters with untolerated taints", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) @@ -287,13 +291,14 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) // This is a serial test as adding taints, tolerations can affect other tests. Context("pickAll valid cluster without taints, add a taint to a cluster that's already picked", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) selectedClusters := healthyClusters unSelectedClusters := []string{memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} @@ -301,7 +306,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") policy := &placementv1beta1.PlacementPolicy{ @@ -312,24 +317,24 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for valid clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for valid clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) @@ -340,19 +345,19 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should create scheduled bindings for valid clusters without taints, valid clusters with taint", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for valid clusters without taints, valid clusters with taint", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) @@ -361,13 +366,14 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) // This is a serial test as adding taints, tolerations can affect other tests. Context("pick N clusters with affinity specified, ignore valid clusters with taints, CRP has some matching tolerations after update", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) policySnapshotNameAfter := fmt.Sprintf(policySnapshotNameTemplate, crpName, 2) numOfClusters := int32(2) // Less than the number of clusters available (7) in the fleet. @@ -382,7 +388,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 2. @@ -421,24 +427,24 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, []string{}) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, []string{}) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual([]string{}, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual([]string{}, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(2, []string{}, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotName, taintTolerationCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(2, []string{}, []string{}, wantFilteredClusters, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, taintTolerationCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) @@ -449,19 +455,19 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersAfter) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersAfter) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual([]string{}, zeroScoreByCluster, crpName, policySnapshotNameAfter) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual([]string{}, zeroScoreByCluster, crpKey, policySnapshotNameAfter) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(2, wantPickedClustersAfter, []string{}, wantFilteredClustersAfter, zeroScoreByCluster, policySnapshotNameAfter, taintTolerationCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(2, wantPickedClustersAfter, []string{}, wantFilteredClustersAfter, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotNameAfter}, taintTolerationCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) @@ -470,20 +476,21 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) // This is a serial test as adding a new member cluster may interrupt other test cases. Context("pickAll, add a new healthy cluster with taint", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) // Prepare a new cluster to avoid interrupting other concurrently running test cases. newUnhealthyMemberClusterName := fmt.Sprintf(provisionalClusterNameTemplate, GinkgoParallelProcess()) BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot, no tolerations specified. @@ -497,14 +504,14 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should create scheduled bindings for existing clusters, and exclude new cluster with taint", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newUnhealthyMemberClusterName) }) @@ -513,6 +520,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // This is a serial test as adding a new member cluster may interrupt other test cases. Context("pickAll, add a new healthy cluster with taint and matching toleration", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) // Prepare a new cluster to avoid interrupting other concurrently running test cases. newUnhealthyMemberClusterName := fmt.Sprintf(provisionalClusterNameTemplate, GinkgoParallelProcess()) @@ -526,7 +534,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot, and toleration for new cluster. @@ -543,14 +551,14 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should create scheduled bindings for the newly recovered cluster with tolerated taint", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newUnhealthyMemberClusterName) }) @@ -559,6 +567,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // This is a serial test as adding a new member cluster may interrupt other test cases. Context("pickN with required topology spread constraints, add new cluster with taint, upscaling doesn't pick new cluster", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) // Prepare a new cluster to avoid interrupting other concurrently running test cases. newClusterName := fmt.Sprintf(provisionalClusterNameTemplate, GinkgoParallelProcess()) @@ -619,7 +628,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot, no tolerations specified. @@ -638,24 +647,24 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, taintTolerationCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, taintTolerationCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) @@ -696,26 +705,26 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersAfter) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersAfter) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, scoreByClusterAfter, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, scoreByClusterAfter, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClusters, scoreByClusterAfter, policySnapshotName, taintTolerationCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClusters, scoreByClusterAfter, types.NamespacedName{Name: policySnapshotName}, taintTolerationCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newClusterName) }) @@ -724,6 +733,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // This is a serial test as adding a new member cluster may interrupt other test cases. Context("pickN with required topology spread constraints, add new cluster with taint, upscaling picks new cluster with tolerated taint", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) // Prepare a new cluster to avoid interrupting other concurrently running test cases. newClusterName := fmt.Sprintf(provisionalClusterNameTemplate, GinkgoParallelProcess()) @@ -784,7 +794,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -806,24 +816,24 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, taintTolerationCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, taintTolerationCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) @@ -864,28 +874,474 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersAfter) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersAfter) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, scoreByClusterAfter, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, scoreByClusterAfter, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClusters, scoreByClusterAfter, policySnapshotName, taintTolerationCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClusters, scoreByClusterAfter, types.NamespacedName{Name: policySnapshotName}, taintTolerationCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newClusterName) }) }) }) + +var _ = Describe("scheduling RPs on member clusters with taints & tolerations", func() { + // This is a serial test as adding taints can affect other tests + Context("pickFixed, valid target clusters with taints", Serial, Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + targetClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster6WestProd} + taintClusters := targetClusters + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Add taints to some member clusters 1, 4, 6 from all regions. + addTaintsToMemberClusters(taintClusters, buildTaints(taintClusters)) + + // Create the RP and its associated policy snapshot. + createPickFixedRPWithPolicySnapshot(testNamespace, rpName, targetClusters, policySnapshotName) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for valid target clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters, nilScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters, []string{}, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") + }) + + AfterAll(func() { + // Remove taints + removeTaintsFromMemberClusters(taintClusters) + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + // This is a serial test as adding taints can affect other tests. + Context("pick all valid cluster with no taints, ignore valid cluster with taints, RP with no matching toleration", Serial, Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + taintClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary} + selectedClusters := []string{memberCluster2EastProd, memberCluster3EastCanary, memberCluster5CentralProd, memberCluster6WestProd} + unSelectedClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary, memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Add taints to some member clusters 1, 4, 7 from all regions. + addTaintsToMemberClusters(taintClusters, buildTaints(taintClusters)) + + // Create a RP with no scheduling policy specified, along with its associated policy snapshot, with no tolerations specified. + createNilSchedulingPolicyRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, nil) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all healthy clusters with no taints", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for unhealthy clusters, healthy cluster with taints", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Remove taints + removeTaintsFromMemberClusters(taintClusters) + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + // This is a serial test as adding taints can affect other tests. + Context("pick all valid cluster with no taints, ignore valid cluster with taints, then remove taints after which RP selects all clusters", Serial, Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + taintClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary} + selectedClusters1 := []string{memberCluster2EastProd, memberCluster3EastCanary, memberCluster5CentralProd, memberCluster6WestProd} + unSelectedClusters1 := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary, memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} + selectedClusters2 := []string{memberCluster1EastProd, memberCluster2EastProd, memberCluster3EastCanary, memberCluster4CentralProd, memberCluster5CentralProd, memberCluster6WestProd, memberCluster7WestCanary} + unSelectedClusters2 := []string{memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Add taints to some member clusters 1, 4, 7 from all regions. + addTaintsToMemberClusters(taintClusters, buildTaints(taintClusters)) + + // Create a RP with no scheduling policy specified, along with its associated policy snapshot, with no tolerations specified. + createNilSchedulingPolicyRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, nil) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all healthy clusters with no taints", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters1, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for unhealthy clusters, healthy cluster with taints", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters1, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters1, unSelectedClusters1, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + It("remove taints from member clusters", func() { + // Remove taints + removeTaintsFromMemberClusters(taintClusters) + }) + + It("should create scheduled bindings for all healthy clusters with no taints", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters2, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for unhealthy clusters, healthy cluster with taints", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters2, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters2, unSelectedClusters2, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + // This is a serial test as adding taints, tolerations can affect other tests. + Context("pick all valid cluster with tolerated taints, ignore valid clusters with taints, RP has some matching tolerations on creation", Serial, Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + taintClusters := []string{memberCluster1EastProd, memberCluster2EastProd, memberCluster6WestProd} + tolerateClusters := []string{memberCluster1EastProd, memberCluster2EastProd} + selectedClusters := tolerateClusters + unSelectedClusters := []string{memberCluster3EastCanary, memberCluster4CentralProd, memberCluster5CentralProd, memberCluster6WestProd, memberCluster7WestCanary, memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Add taints to some member clusters 1, 2, 6 from all regions. + addTaintsToMemberClusters(taintClusters, buildTaints(taintClusters)) + + // Create a RP with affinity, tolerations for clusters 1,2. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "prod", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"east", "west"}, + }, + }, + }, + }, + }, + }, + }, + }, + Tolerations: buildTolerations(tolerateClusters), + } + // Create RP . + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for clusters with tolerated taints", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for clusters with untolerated taints", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Remove taints + removeTaintsFromMemberClusters(taintClusters) + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + // This is a serial test as adding taints, tolerations can affect other tests. + Context("pickAll valid cluster without taints, add a taint to a cluster that's already picked", Serial, Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + selectedClusters := healthyClusters + unSelectedClusters := []string{memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} + taintClusters := []string{memberCluster1EastProd, memberCluster2EastProd} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + } + // Create RP with PickAll, no tolerations specified. + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for valid clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for valid clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + It("add taint to existing clusters", func() { + // Add taints to some member clusters 1, 2. + addTaintsToMemberClusters(taintClusters, buildTaints(taintClusters)) + }) + + It("should create scheduled bindings for valid clusters without taints, valid clusters with taint", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for valid clusters without taints, valid clusters with taint", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Remove taints + removeTaintsFromMemberClusters(taintClusters) + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + // This is a serial test as adding taints, tolerations can affect other tests. + Context("pick N clusters with affinity specified, ignore valid clusters with taints, RP has some matching tolerations after update", Serial, Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} + policySnapshotNameAfter := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) + policySnapshotNameAfterKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotNameAfter} + numOfClusters := int32(2) // Less than the number of clusters available (7) in the fleet. + taintClusters := []string{memberCluster1EastProd, memberCluster2EastProd} + tolerateClusters := taintClusters + // The scheduler is designed to produce only deterministic decisions; if there are no + // comparable scores available for selected clusters, the scheduler will rank the clusters + // by their names. + wantFilteredClusters := []string{memberCluster1EastProd, memberCluster2EastProd, memberCluster3EastCanary, memberCluster4CentralProd, memberCluster5CentralProd, memberCluster6WestProd, memberCluster7WestCanary, memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} + wantPickedClustersAfter := taintClusters + wantFilteredClustersAfter := []string{memberCluster3EastCanary, memberCluster4CentralProd, memberCluster5CentralProd, memberCluster6WestProd, memberCluster7WestCanary, memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Add taints to some member clusters 1, 2. + addTaintsToMemberClusters(taintClusters, buildTaints(taintClusters)) + + // Create a RP of the PickN placement type, along with its associated policy snapshot, no tolerations specified. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabel: "east", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + "prod", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickNRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, []string{}) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should create scheduled bindings for selected clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual([]string{}, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + }) + + It("should report status correctly", func() { + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(2, []string{}, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotKey, taintTolerationCmpOpts) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + It("update RP with new tolerations", func() { + // Update RP with tolerations for clusters 1,2. + updatePickNRPWithTolerations(testNamespace, rpName, buildTolerations(tolerateClusters), policySnapshotName, policySnapshotNameAfter) + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, wantPickedClustersAfter) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should create scheduled bindings for selected clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual([]string{}, zeroScoreByCluster, rpKey, policySnapshotNameAfter) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + }) + + It("should report status correctly", func() { + rpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(2, wantPickedClustersAfter, []string{}, wantFilteredClustersAfter, zeroScoreByCluster, policySnapshotNameAfterKey, taintTolerationCmpOpts) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(rpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + AfterAll(func() { + // Remove taints + removeTaintsFromMemberClusters(taintClusters) + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) +}) diff --git a/test/scheduler/utils_test.go b/test/scheduler/utils_test.go index 3bafcb9a5..439cd37f1 100644 --- a/test/scheduler/utils_test.go +++ b/test/scheduler/utils_test.go @@ -54,12 +54,15 @@ import ( const ( crpNameTemplate = "crp-%d" + rpNameTemplate = "rp-%d" policySnapshotNameTemplate = "%s-policy-snapshot-%d" provisionalClusterNameTemplate = "provisional-cluster-%d" policyHash = "policy-hash" bindingNamePlaceholder = "binding" + + testNamespace = "test-namespace" ) var ( @@ -67,7 +70,7 @@ var ( // by any controller (the scheduler cares only about policy snapshots and manipulates // bindings accordingly), it is safe for all suites to select the same set of resources // (which is not even provisioned in the environment). - defaultResourceSelectors = []placementv1beta1.ClusterResourceSelector{ + defaultResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ { Group: "core", Kind: "Namespace", @@ -95,8 +98,8 @@ var ( ) var ( - lessFuncBinding = func(binding1, binding2 placementv1beta1.ClusterResourceBinding) bool { - return binding1.Spec.TargetCluster < binding2.Spec.TargetCluster + lessFuncBinding = func(binding1, binding2 placementv1beta1.BindingObj) bool { + return binding1.GetBindingSpec().TargetCluster < binding2.GetBindingSpec().TargetCluster } lessFuncClusterDecision = func(decision1, decision2 placementv1beta1.ClusterDecision) bool { return decision1.ClusterName < decision2.ClusterName @@ -109,10 +112,12 @@ var ( ignoreObjectMetaNameField = cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Name") ignoreObjectMetaAnnotationField = cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Annotations") ignoreObjectMetaAutoGeneratedFields = cmpopts.IgnoreFields(metav1.ObjectMeta{}, "UID", "CreationTimestamp", "ResourceVersion", "Generation", "ManagedFields") - ignoreResourceBindingTypeMetaField = cmpopts.IgnoreFields(placementv1beta1.ClusterResourceBinding{}, "TypeMeta") + ignoreClusterResourceBindingTypeMetaField = cmpopts.IgnoreFields(placementv1beta1.ClusterResourceBinding{}, "TypeMeta") + ignoreResourceBindingTypeMetaField = cmpopts.IgnoreFields(placementv1beta1.ResourceBinding{}, "TypeMeta") ignoreConditionTimeReasonAndMessageFields = cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "Reason", "Message") ignoreResourceBindingFields = []cmp.Option{ + ignoreClusterResourceBindingTypeMetaField, ignoreResourceBindingTypeMetaField, ignoreObjectMetaNameField, ignoreObjectMetaAnnotationField, @@ -313,6 +318,49 @@ func createPickFixedCRPWithPolicySnapshot(crpName string, targetClusters []strin Expect(hubClient.Create(ctx, policySnapshot)).To(Succeed(), "Failed to create policy snapshot") } +func createPickFixedRPWithPolicySnapshot(namespace, rpName string, targetClusters []string, policySnapshotName string) { + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: targetClusters, + } + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: namespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: defaultResourceSelectors, + Policy: policy, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create CRP") + + rpGeneration := rp.Generation + + // Create the associated policy snapshot. + policySnapshot := &placementv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(rpGeneration, 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).To(Succeed(), "Failed to create policy snapshot") +} + func createNilSchedulingPolicyCRPWithPolicySnapshot(crpName string, policySnapshotName string, policy *placementv1beta1.PlacementPolicy) { // Create a CRP with no scheduling policy specified. crp := placementv1beta1.ClusterResourcePlacement{ @@ -349,6 +397,44 @@ func createNilSchedulingPolicyCRPWithPolicySnapshot(crpName string, policySnapsh Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") } +func createNilSchedulingPolicyRPWithPolicySnapshot(namespace, rpName string, policySnapshotName string, policy *placementv1beta1.PlacementPolicy) { + // Create a RP with no scheduling policy specified. + rp := placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: namespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: defaultResourceSelectors, + Policy: policy, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed(), "Failed to create RP") + + rpGeneration := rp.Generation + + // Create the associated policy snapshot. + policySnapshot := &placementv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(rpGeneration, 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") +} + func updatePickFixedCRPWithNewTargetClustersAndRefreshSnapshots(crpName string, targetClusters []string, oldPolicySnapshotName, newPolicySnapshotName string) { // Update the CRP. crp := &placementv1beta1.ClusterResourcePlacement{} @@ -387,76 +473,140 @@ func updatePickFixedCRPWithNewTargetClustersAndRefreshSnapshots(crpName string, Expect(hubClient.Create(ctx, policySnapshot)).To(Succeed(), "Failed to create policy snapshot") } -func markBindingsAsBoundForClusters(crpName string, boundClusters []string) { - bindingList := &placementv1beta1.ClusterResourceBindingList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) - listOptions := &client.ListOptions{LabelSelector: labelSelector} - Expect(hubClient.List(ctx, bindingList, listOptions)).To(Succeed(), "Failed to list bindings") +func updatePickFixedRPWithNewTargetClustersAndRefreshSnapshots(namespace, rpName string, targetClusters []string, oldPolicySnapshotName, newPolicySnapshotName string) { + // Update the RP. + rp := &placementv1beta1.ResourcePlacement{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: namespace}, rp)).To(Succeed(), "Failed to get RP") + + policy := rp.Spec.Policy.DeepCopy() + policy.ClusterNames = targetClusters + rp.Spec.Policy = policy + Expect(hubClient.Update(ctx, rp)).To(Succeed(), "Failed to update RP") + + rpGeneration := rp.Generation + + // Mark the old policy snapshot as inactive. + policySnapshot := &placementv1beta1.SchedulingPolicySnapshot{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: oldPolicySnapshotName, Namespace: namespace}, policySnapshot)).To(Succeed(), "Failed to get policy snapshot") + policySnapshot.Labels[placementv1beta1.IsLatestSnapshotLabel] = strconv.FormatBool(false) + Expect(hubClient.Update(ctx, policySnapshot)).To(Succeed(), "Failed to update policy snapshot") + + // Create a new policy snapshot. + policySnapshot = &placementv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: newPolicySnapshotName, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(rpGeneration, 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).To(Succeed(), "Failed to create policy snapshot") +} + +func markBindingsAsBoundForClusters(placementKey types.NamespacedName, boundClusters []string) { + bindingList, err := listBindings(placementKey) + Expect(err).ToNot(HaveOccurred(), "Failed to list bindings") + boundClusterMap := make(map[string]bool) for _, cluster := range boundClusters { boundClusterMap[cluster] = true } - for idx := range bindingList.Items { - binding := bindingList.Items[idx] - if _, ok := boundClusterMap[binding.Spec.TargetCluster]; ok && binding.Spec.State == placementv1beta1.BindingStateScheduled { - binding.Spec.State = placementv1beta1.BindingStateBound - Expect(hubClient.Update(ctx, &binding)).To(Succeed(), "Failed to update binding") + + for _, bindingObj := range bindingList.GetBindingObjs() { + if _, ok := boundClusterMap[bindingObj.GetBindingSpec().TargetCluster]; ok && bindingObj.GetBindingSpec().State == placementv1beta1.BindingStateScheduled { + bindingObj.GetBindingSpec().State = placementv1beta1.BindingStateBound + Expect(hubClient.Update(ctx, bindingObj)).To(Succeed(), "Failed to update binding") } } } -func ensureCRPAndAllRelatedResourcesDeletion(crpName string) { - // Delete the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - }, +func ensurePlacementAndAllRelatedResourcesDeletion(placementKey types.NamespacedName) { + namespace, placementName := placementKey.Namespace, placementKey.Name + // Delete the placement. + var placement placementv1beta1.PlacementObj + if namespace == "" { + // Delete CRP. + placement = &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: placementName, + }, + } + } else { + // Delete RP. + placement = &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: placementName, + Namespace: namespace, + }, + } } - Expect(hubClient.Delete(ctx, crp)).To(Succeed(), "Failed to delete CRP") + Expect(hubClient.Delete(ctx, placement)).To(Succeed(), "Failed to delete placement") // Ensure that all the bindings are deleted. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(placementKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to clear all bindings") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to clear all bindings") // Ensure that the scheduler finalizer is removed. - finalizerRemovedActual := crpSchedulerFinalizerRemovedActual(crpName) - Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove scheduler cleanup finalizer from CRP") + finalizerRemovedActual := placementSchedulerFinalizerRemovedActual(placementKey) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove scheduler cleanup finalizer from placement") - // Remove all the other finalizers from the CRP. + // Remove all the other finalizers from the placement. Eventually(func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + if err := hubClient.Get(ctx, placementKey, placement); err != nil { return err } - crp.Finalizers = []string{} - return hubClient.Update(ctx, crp) - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove all finalizers from CRP") + placement.SetFinalizers([]string{}) + return hubClient.Update(ctx, placement) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove all finalizers from placement") - // Ensure that the CRP is deleted. + // Ensure that the placement is deleted. Eventually(func() error { - err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, &placementv1beta1.ClusterResourcePlacement{}) + err := hubClient.Get(ctx, placementKey, placement) if errors.IsNotFound(err) { return nil } return err - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to delete CRP") + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to delete placement") // List all policy snapshots. - policySnapshotList := &placementv1beta1.ClusterSchedulingPolicySnapshotList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) + var policySnapshotList placementv1beta1.PolicySnapshotList + labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: placementName}) listOptions := &client.ListOptions{LabelSelector: labelSelector} + + if namespace == "" { + // List CSPS. + policySnapshotList = &placementv1beta1.ClusterSchedulingPolicySnapshotList{} + } else { + // List SPS. + policySnapshotList = &placementv1beta1.SchedulingPolicySnapshotList{} + listOptions.Namespace = namespace + } Expect(hubClient.List(ctx, policySnapshotList, listOptions)).To(Succeed(), "Failed to list policy snapshots") // Delete all policy snapshots and ensure their deletion. - for idx := range policySnapshotList.Items { - policySnapshot := policySnapshotList.Items[idx] - Expect(hubClient.Delete(ctx, &policySnapshot)).To(Succeed(), "Failed to delete policy snapshot") + for _, policySnapshot := range policySnapshotList.GetPolicySnapshotObjs() { + Expect(hubClient.Delete(ctx, policySnapshot)).To(Succeed(), "Failed to delete policy snapshot") Eventually(func() error { - err := hubClient.Get(ctx, types.NamespacedName{Name: policySnapshot.Name}, &placementv1beta1.ClusterSchedulingPolicySnapshot{}) + var ps placementv1beta1.PolicySnapshotObj + if namespace == "" { + ps = &placementv1beta1.ClusterSchedulingPolicySnapshot{} + } else { + ps = &placementv1beta1.SchedulingPolicySnapshot{} + } + err := hubClient.Get(ctx, types.NamespacedName{Name: policySnapshot.GetName(), Namespace: policySnapshot.GetNamespace()}, ps) if errors.IsNotFound(err) { return nil } @@ -522,6 +672,44 @@ func createPickAllCRPWithPolicySnapshot(crpName string, policySnapshotName strin Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") } +func createPickAllRPWithPolicySnapshot(namespace, rpName, policySnapshotName string, policy *placementv1beta1.PlacementPolicy) { + // Create a RP of the PickAll placement type. + rp := placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: namespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: defaultResourceSelectors, + Policy: policy, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed(), "Failed to create RP") + + rpGeneration := rp.Generation + + // Create the associated policy snapshot. + policySnapshot := &placementv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(rpGeneration, 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") +} + func updatePickAllCRPWithNewAffinity(crpName string, affinity *placementv1beta1.Affinity, oldPolicySnapshotName, newPolicySnapshotName string) { // Update the CRP. crp := &placementv1beta1.ClusterResourcePlacement{} @@ -560,6 +748,45 @@ func updatePickAllCRPWithNewAffinity(crpName string, affinity *placementv1beta1. Expect(hubClient.Create(ctx, policySnapshot)).To(Succeed(), "Failed to create policy snapshot") } +func updatePickAllRPWithNewAffinity(namespace, rpName string, affinity *placementv1beta1.Affinity, oldPolicySnapshotName, newPolicySnapshotName string) { + // Update the RP. + rp := &placementv1beta1.ResourcePlacement{} + Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: rpName}, rp)).To(Succeed(), "Failed to get RP") + + policy := rp.Spec.Policy.DeepCopy() + policy.Affinity = affinity + rp.Spec.Policy = policy + Expect(hubClient.Update(ctx, rp)).To(Succeed(), "Failed to update RP") + + rpGeneration := rp.Generation + + // Mark the old policy snapshot as inactive. + policySnapshot := &placementv1beta1.SchedulingPolicySnapshot{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: oldPolicySnapshotName, Namespace: namespace}, policySnapshot)).To(Succeed(), "Failed to get policy snapshot") + policySnapshot.Labels[placementv1beta1.IsLatestSnapshotLabel] = strconv.FormatBool(false) + Expect(hubClient.Update(ctx, policySnapshot)).To(Succeed(), "Failed to update policy snapshot") + + // Create a new policy snapshot. + policySnapshot = &placementv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: newPolicySnapshotName, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(rpGeneration, 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).To(Succeed(), "Failed to create policy snapshot") +} + func createPickNCRPWithPolicySnapshot(crpName string, policySnapshotName string, policy *placementv1beta1.PlacementPolicy) { // Create a CRP of the PickN placement type. crp := placementv1beta1.ClusterResourcePlacement{ @@ -597,6 +824,45 @@ func createPickNCRPWithPolicySnapshot(crpName string, policySnapshotName string, Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") } +func createPickNRPWithPolicySnapshot(namespace, rpName string, policySnapshotName string, policy *placementv1beta1.PlacementPolicy) { + // Create a RP of the PickN placement type. + rp := placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: namespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: defaultResourceSelectors, + Policy: policy, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed(), "Failed to create CRP") + + rpGeneration := rp.Generation + + // Create the associated policy snapshot. + policySnapshot := &placementv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(rpGeneration, 10), + placementv1beta1.NumberOfClustersAnnotation: strconv.FormatInt(int64(*policy.NumberOfClusters), 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") +} + func updatePickNCRPWithNewAffinityAndTopologySpreadConstraints( crpName string, affinity *placementv1beta1.Affinity, @@ -681,6 +947,45 @@ func updatePickNCRPWithTolerations(crpName string, tolerations []placementv1beta Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") } +func updatePickNRPWithTolerations(namespace, rpName string, tolerations []placementv1beta1.Toleration, oldPolicySnapshotName, newPolicySnapshotName string) { + rp := &placementv1beta1.ResourcePlacement{} + Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: rpName}, rp)).To(Succeed(), "Failed to get resource placement") + + policy := rp.Spec.Policy.DeepCopy() + policy.Tolerations = tolerations + numOfClusters := policy.NumberOfClusters + Expect(hubClient.Update(ctx, rp)).To(Succeed(), "Failed to update resource placement") + + rpGeneration := rp.Generation + + // Mark the old policy snapshot as inactive. + policySnapshot := &placementv1beta1.SchedulingPolicySnapshot{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: oldPolicySnapshotName, Namespace: namespace}, policySnapshot)).To(Succeed(), "Failed to get policy snapshot") + policySnapshot.Labels[placementv1beta1.IsLatestSnapshotLabel] = strconv.FormatBool(false) + Expect(hubClient.Update(ctx, policySnapshot)).To(Succeed(), "Failed to update policy snapshot") + + // Create the associated policy snapshot. + policySnapshot = &placementv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: newPolicySnapshotName, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(rpGeneration, 10), + placementv1beta1.NumberOfClustersAnnotation: strconv.FormatInt(int64(*numOfClusters), 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") +} + func buildTaints(memberClusterNames []string) []clusterv1beta1.Taint { var labels map[string]string taints := make([]clusterv1beta1.Taint, len(memberClusterNames)) @@ -768,3 +1073,39 @@ func resetClusterPropertiesFor(clusterName string) { return nil }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to reset cluster properties") } + +func listBindings(placementKey types.NamespacedName) (placementv1beta1.BindingObjList, error) { + var bindingList placementv1beta1.BindingObjList + labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: placementKey.Name}) + listOptions := &client.ListOptions{LabelSelector: labelSelector} + + if placementKey.Namespace == "" { + // List ClusterResourceBindings. + bindingList = &placementv1beta1.ClusterResourceBindingList{} + } else { + // List ResourceBindings. + bindingList = &placementv1beta1.ResourceBindingList{} + listOptions.Namespace = placementKey.Namespace + } + + if err := hubClient.List(ctx, bindingList, listOptions); err != nil { + return nil, err + } + return bindingList, nil +} + +func getSchedulingPolicySnapshot(policySnapshotKey types.NamespacedName) (placementv1beta1.PolicySnapshotObj, error) { + // Get the policy snapshot. + var policySnapshot placementv1beta1.PolicySnapshotObj + if policySnapshotKey.Namespace == "" { + // Get ClusterSchedulingPolicySnapshot. + policySnapshot = &placementv1beta1.ClusterSchedulingPolicySnapshot{} + } else { + // Get SchedulingPolicySnapshot. + policySnapshot = &placementv1beta1.SchedulingPolicySnapshot{} + } + if err := hubClient.Get(ctx, types.NamespacedName{Name: policySnapshotKey.Name, Namespace: policySnapshotKey.Namespace}, policySnapshot); err != nil { + return nil, err + } + return policySnapshot, nil +} diff --git a/test/upgrade/before/actuals_test.go b/test/upgrade/before/actuals_test.go index 4f71cab7b..64014eb73 100644 --- a/test/upgrade/before/actuals_test.go +++ b/test/upgrade/before/actuals_test.go @@ -428,7 +428,7 @@ func crpWithOneFailedAvailabilityCheckStatusUpdatedActual( Status: metav1.ConditionFalse, // The new and old applier uses the same reason string to make things // a bit easier. - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(workapplier.AvailabilityResultTypeNotYetAvailable), ObservedGeneration: wantFailedResourceObservedGeneration, }, }, @@ -486,7 +486,7 @@ func crpWithOneFailedApplyOpStatusUpdatedActual( Status: metav1.ConditionFalse, // The new and old applier uses the same reason string to make things // a bit easier. - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToApply), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToApply), ObservedGeneration: wantFailedResourceObservedGeneration, }, }, @@ -578,7 +578,7 @@ func crpWithStuckRolloutDueToOneFailedAvailabilityCheckStatusUpdatedActual( Status: metav1.ConditionFalse, // The new and old applier uses the same reason string to make things // a bit easier. - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(workapplier.AvailabilityResultTypeNotYetAvailable), ObservedGeneration: failedResourceObservedGeneration, }, }, @@ -694,7 +694,7 @@ func crpWithStuckRolloutDueToOneFailedApplyOpStatusUpdatedActual( Status: metav1.ConditionFalse, // The new and old applier uses the same reason string to make things // a bit easier. - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToApply), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToApply), ObservedGeneration: failedResourceObservedGeneration, }, }, diff --git a/test/upgrade/before/resources_test.go b/test/upgrade/before/resources_test.go index 5431d43c5..66ecedeb0 100644 --- a/test/upgrade/before/resources_test.go +++ b/test/upgrade/before/resources_test.go @@ -27,8 +27,8 @@ const ( workNamespaceLabelName = "target-test-spec" ) -func workResourceSelector(workNamespaceName string) []placementv1beta1.ClusterResourceSelector { - return []placementv1beta1.ClusterResourceSelector{ +func workResourceSelector(workNamespaceName string) []placementv1beta1.ResourceSelectorTerm { + return []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", diff --git a/test/utils/informer/manager.go b/test/utils/informer/manager.go index 6cd6d016e..b2a0d85f7 100644 --- a/test/utils/informer/manager.go +++ b/test/utils/informer/manager.go @@ -18,6 +18,10 @@ limitations under the License. package informer import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/tools/cache" @@ -25,6 +29,88 @@ import ( "go.goms.io/fleet/pkg/utils/informer" ) +// FakeLister is a simple fake lister for testing. +type FakeLister struct { + Objects []runtime.Object + Err error +} + +func (f *FakeLister) List(selector labels.Selector) ([]runtime.Object, error) { + if f.Err != nil { + return nil, f.Err + } + + if selector == nil { + return f.Objects, nil + } + + var filtered []runtime.Object + for _, obj := range f.Objects { + if uObj, ok := obj.(*unstructured.Unstructured); ok { + if selector.Matches(labels.Set(uObj.GetLabels())) { + filtered = append(filtered, obj) + } + } + } + return filtered, nil +} + +func (f *FakeLister) Get(name string) (runtime.Object, error) { + if f.Err != nil { + return nil, f.Err + } + for _, obj := range f.Objects { + if obj.(*unstructured.Unstructured).GetName() == name { + return obj, nil + } + } + return nil, apierrors.NewNotFound(schema.GroupResource{Resource: "test"}, name) +} + +func (f *FakeLister) ByNamespace(namespace string) cache.GenericNamespaceLister { + return &FakeNamespaceLister{Objects: f.Objects, Namespace: namespace, Err: f.Err} +} + +// FakeNamespaceLister implements cache.GenericNamespaceLister. +type FakeNamespaceLister struct { + Objects []runtime.Object + Namespace string + Err error +} + +func (f *FakeNamespaceLister) List(selector labels.Selector) ([]runtime.Object, error) { + if f.Err != nil { + return nil, f.Err + } + + var filtered []runtime.Object + for _, obj := range f.Objects { + if uObj, ok := obj.(*unstructured.Unstructured); ok { + // Filter by namespace first + if uObj.GetNamespace() != f.Namespace { + continue + } + // Then filter by label selector if provided + if selector == nil || selector.Matches(labels.Set(uObj.GetLabels())) { + filtered = append(filtered, obj) + } + } + } + return filtered, nil +} + +func (f *FakeNamespaceLister) Get(name string) (runtime.Object, error) { + if f.Err != nil { + return nil, f.Err + } + for _, obj := range f.Objects { + if uObj := obj.(*unstructured.Unstructured); uObj.GetName() == name && uObj.GetNamespace() == f.Namespace { + return obj, nil + } + } + return nil, apierrors.NewNotFound(schema.GroupResource{Resource: "test"}, name) +} + // FakeManager is a fake informer manager. type FakeManager struct { // APIResources map collects all the api resources we watch. @@ -35,6 +121,10 @@ type FakeManager struct { // If false, the map stores all the namespace scoped resource. If the resource is not in the map, it will be treated // as the cluster scoped resource. IsClusterScopedResource bool + // Listers provides fake listers for testing. + Listers map[schema.GroupVersionResource]*FakeLister + // NamespaceScopedResources is the list of namespace-scoped resources for testing. + NamespaceScopedResources []schema.GroupVersionResource } func (m *FakeManager) AddDynamicResources(_ []informer.APIResourceMeta, _ cache.ResourceEventHandler, _ bool) { @@ -44,7 +134,7 @@ func (m *FakeManager) AddStaticResource(_ informer.APIResourceMeta, _ cache.Reso } func (m *FakeManager) IsInformerSynced(_ schema.GroupVersionResource) bool { - return false + return true } func (m *FakeManager) Start() { @@ -53,12 +143,15 @@ func (m *FakeManager) Start() { func (m *FakeManager) Stop() { } -func (m *FakeManager) Lister(_ schema.GroupVersionResource) cache.GenericLister { - return nil +func (m *FakeManager) Lister(gvr schema.GroupVersionResource) cache.GenericLister { + if lister, exists := m.Listers[gvr]; exists { + return lister + } + return &FakeLister{Objects: []runtime.Object{}} } func (m *FakeManager) GetNameSpaceScopedResources() []schema.GroupVersionResource { - return nil + return m.NamespaceScopedResources } func (m *FakeManager) IsClusterScopedResources(gvk schema.GroupVersionKind) bool {