From 9e0ca9e85ba3d5c3beecb1fa5615591ff673c03c Mon Sep 17 00:00:00 2001 From: Zhiying Lin <54013513+zhiying-lin@users.noreply.github.com> Date: Tue, 12 Aug 2025 10:04:52 +0800 Subject: [PATCH 01/38] test: update unit test to use the test util fake informer (#180) Signed-off-by: Zhiying Lin --- CLAUDE.md | 3 + .../resourcechange_controller_test.go | 313 ++++++------------ test/utils/informer/manager.go | 70 +++- 3 files changed, 176 insertions(+), 210 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 7833c61ec..8d65cd224 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -181,6 +181,9 @@ cmd/memberagent/ # Member agent main and setup ### Test Coding Style - Use `want` or `wanted` instead of `expect` or `expected` when creating the desired state +- Comments that are complete sentences should be capitalized and punctuated like standard English sentences. (As an exception, it is okay to begin a sentence with an uncapitalized identifier name if it is otherwise clear. Such cases are probably best done only at the beginning of a paragraph.) +- Comments that are sentence fragments have no such requirements for punctuation or capitalization. +- Documentation comments should always be complete sentences, and as such should always be capitalized and punctuated. Simple end-of-line comments (especially for struct fields) can be simple phrases that assume the field name is the subject. ## Key Patterns diff --git a/pkg/controllers/resourcechange/resourcechange_controller_test.go b/pkg/controllers/resourcechange/resourcechange_controller_test.go index b1681e205..a5c9abd5a 100644 --- a/pkg/controllers/resourcechange/resourcechange_controller_test.go +++ b/pkg/controllers/resourcechange/resourcechange_controller_test.go @@ -26,15 +26,11 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/tools/cache" ctrl "sigs.k8s.io/controller-runtime" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" @@ -43,6 +39,7 @@ import ( "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" "github.com/kubefleet-dev/kubefleet/pkg/utils/informer" "github.com/kubefleet-dev/kubefleet/pkg/utils/keys" + testinformer "github.com/kubefleet-dev/kubefleet/test/utils/informer" ) var _ controller.Controller = &fakeController{} @@ -71,104 +68,6 @@ func (w *fakeController) Enqueue(obj interface{}) { w.QueueObj = append(w.QueueObj, obj.(string)) } -// fakeLister is a simple fake lister for testing -type fakeLister struct { - objects []runtime.Object - err error -} - -func (f *fakeLister) List(_ labels.Selector) ([]runtime.Object, error) { - if f.err != nil { - return nil, f.err - } - return f.objects, nil -} - -func (f *fakeLister) Get(name string) (runtime.Object, error) { - if f.err != nil { - return nil, f.err - } - for _, obj := range f.objects { - if obj.(*unstructured.Unstructured).GetName() == name { - return obj, nil - } - } - return nil, apierrors.NewNotFound(schema.GroupResource{Resource: "test"}, name) -} - -func (f *fakeLister) ByNamespace(namespace string) cache.GenericNamespaceLister { - return &fakeNamespaceLister{objects: f.objects, namespace: namespace, err: f.err} -} - -// fakeNamespaceLister implements cache.GenericNamespaceLister -type fakeNamespaceLister struct { - objects []runtime.Object - namespace string - err error -} - -func (f *fakeNamespaceLister) List(_ labels.Selector) ([]runtime.Object, error) { - if f.err != nil { - return nil, f.err - } - return f.objects, nil -} - -func (f *fakeNamespaceLister) Get(name string) (runtime.Object, error) { - if f.err != nil { - return nil, f.err - } - for _, obj := range f.objects { - if obj.(*unstructured.Unstructured).GetName() == name { - return obj, nil - } - } - return nil, apierrors.NewNotFound(schema.GroupResource{Resource: "test"}, name) -} - -// fakeInformerManager is a test-specific informer manager -type fakeInformerManager struct { - listers map[schema.GroupVersionResource]*fakeLister -} - -func (f *fakeInformerManager) AddDynamicResources(_ []informer.APIResourceMeta, _ cache.ResourceEventHandler, _ bool) { -} - -func (f *fakeInformerManager) AddStaticResource(_ informer.APIResourceMeta, _ cache.ResourceEventHandler) { -} - -func (f *fakeInformerManager) IsInformerSynced(_ schema.GroupVersionResource) bool { - return true -} - -func (f *fakeInformerManager) Start() { -} - -func (f *fakeInformerManager) Stop() { -} - -func (f *fakeInformerManager) Lister(gvr schema.GroupVersionResource) cache.GenericLister { - if lister, exists := f.listers[gvr]; exists { - return lister - } - return &fakeLister{objects: []runtime.Object{}} -} - -func (f *fakeInformerManager) GetNameSpaceScopedResources() []schema.GroupVersionResource { - return nil -} - -func (f *fakeInformerManager) IsClusterScopedResources(_ schema.GroupVersionKind) bool { - return true -} - -func (f *fakeInformerManager) WaitForCacheSync() { -} - -func (f *fakeInformerManager) GetClient() dynamic.Interface { - return nil -} - func TestFindPlacementsSelectedDeletedResV1Alpha1(t *testing.T) { deletedRes := fleetv1alpha1.ResourceIdentifier{ Group: "abc", @@ -1576,22 +1475,22 @@ func TestHandleUpdatedResource(t *testing.T) { }, } - defaultFakeInformerManager := &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + defaultFakeInformerManager := &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ {Group: "placement.kubernetes-fleet.io", Version: "v1beta1", Resource: "clusterresourceplacements"}: { - objects: func() []runtime.Object { + Objects: func() []runtime.Object { uMap, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(testCRP) return []runtime.Object{&unstructured.Unstructured{Object: uMap}} }(), }, {Group: "placement.kubernetes-fleet.io", Version: "v1beta1", Resource: "resourceplacements"}: { - objects: func() []runtime.Object { + Objects: func() []runtime.Object { uMap, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(testRP) return []runtime.Object{&unstructured.Unstructured{Object: uMap}} }(), }, {Group: "", Version: "v1", Resource: "namespaces"}: { - objects: func() []runtime.Object { + Objects: func() []runtime.Object { uObj, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(testNamespace) return []runtime.Object{&unstructured.Unstructured{Object: uObj}} }(), @@ -1684,16 +1583,16 @@ func TestHandleUpdatedResource(t *testing.T) { }, clusterObj: testDeployment, isClusterScoped: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ {Group: "placement.kubernetes-fleet.io", Version: "v1beta1", Resource: "clusterresourceplacements"}: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, {Group: "placement.kubernetes-fleet.io", Version: "v1beta1", Resource: "resourceplacements"}: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, {Group: "", Version: "v1", Resource: "namespaces"}: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, }, }, @@ -1714,19 +1613,19 @@ func TestHandleUpdatedResource(t *testing.T) { }, clusterObj: testDeployment, isClusterScoped: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ {Group: "placement.kubernetes-fleet.io", Version: "v1beta1", Resource: "clusterresourceplacements"}: { - objects: func() []runtime.Object { + Objects: func() []runtime.Object { uMap, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(testCRP) return []runtime.Object{&unstructured.Unstructured{Object: uMap}} }(), }, {Group: "placement.kubernetes-fleet.io", Version: "v1beta1", Resource: "resourceplacements"}: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, {Group: "", Version: "v1", Resource: "namespaces"}: { - objects: func() []runtime.Object { + Objects: func() []runtime.Object { uObj, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(testNamespace) return []runtime.Object{&unstructured.Unstructured{Object: uObj}} }(), @@ -1842,10 +1741,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, }, }, @@ -1862,10 +1761,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, }, }, @@ -1883,10 +1782,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, }, }, @@ -1904,10 +1803,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, }, }, @@ -1924,11 +1823,11 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, - err: errors.New("test lister error"), + Objects: []runtime.Object{}, + Err: errors.New("test lister error"), }, }, }, @@ -1947,11 +1846,11 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ResourcePlacementGVR: { - objects: []runtime.Object{}, - err: errors.New("test lister error"), + Objects: []runtime.Object{}, + Err: errors.New("test lister error"), }, }, }, @@ -1969,10 +1868,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, }, }, @@ -1990,10 +1889,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, }, }, @@ -2010,10 +1909,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: func() []runtime.Object { + Objects: func() []runtime.Object { // Create multiple CRPs that have selected this namespace testCRP1 := &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -2089,10 +1988,10 @@ func TestTriggerAffectedPlacementsForDeletedRes(t *testing.T) { }, }, isClusterScope: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ResourcePlacementGVR: { - objects: func() []runtime.Object { + Objects: func() []runtime.Object { // Create multiple ResourcePlacements that have selected this deployment testRP1 := &placementv1beta1.ResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -2275,13 +2174,13 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { }, }, resource: testNamespace, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, }, }, @@ -2300,13 +2199,13 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { }, }, resource: createDeploymentUnstructured(createTestDeployment()), - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, }, }, @@ -2365,13 +2264,13 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { uMap, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(nonMatchingRP) rpObjects := []runtime.Object{&unstructured.Unstructured{Object: uMap}} - return &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + return &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, }, } @@ -2404,13 +2303,13 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { } return createNamespaceUnstructured(otherResource) }(), - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, utils.ResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, }, }, @@ -2451,13 +2350,13 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { } uMap1, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(testClusterResourcePlacement) uMap2, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(testCRP2) - return &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + return &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{&unstructured.Unstructured{Object: uMap1}, &unstructured.Unstructured{Object: uMap2}}, + Objects: []runtime.Object{&unstructured.Unstructured{Object: uMap1}, &unstructured.Unstructured{Object: uMap2}}, }, utils.ResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, }, } @@ -2478,13 +2377,13 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { }, }, resource: testNamespace, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, }, }, @@ -2528,13 +2427,13 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { uMap, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(crpWithNamespaceOnlySelector) crpObjects := []runtime.Object{&unstructured.Unstructured{Object: uMap}} - return &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + return &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, utils.ResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, }, } @@ -2643,10 +2542,10 @@ func TestHandleDeletedResource(t *testing.T) { }, }, isClusterScoped: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, }, }, @@ -2665,10 +2564,10 @@ func TestHandleDeletedResource(t *testing.T) { }, }, isClusterScoped: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, }, }, @@ -2688,16 +2587,16 @@ func TestHandleDeletedResource(t *testing.T) { }, }, isClusterScoped: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, utils.NamespaceGVR: { - objects: []runtime.Object{testNamespace}, + Objects: []runtime.Object{testNamespace}, }, }, }, @@ -2717,14 +2616,14 @@ func TestHandleDeletedResource(t *testing.T) { }, }, isClusterScoped: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, - err: errors.New("CRP lister error"), + Objects: []runtime.Object{}, + Err: errors.New("CRP lister error"), }, utils.NamespaceGVR: { - objects: []runtime.Object{testNamespace}, + Objects: []runtime.Object{testNamespace}, }, }, }, @@ -2744,14 +2643,14 @@ func TestHandleDeletedResource(t *testing.T) { }, }, isClusterScoped: false, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, + Objects: []runtime.Object{}, }, utils.ResourcePlacementGVR: { - objects: []runtime.Object{}, - err: errors.New("RP lister error"), + Objects: []runtime.Object{}, + Err: errors.New("RP lister error"), }, }, }, @@ -2770,11 +2669,11 @@ func TestHandleDeletedResource(t *testing.T) { }, }, isClusterScoped: true, - informerManager: &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + informerManager: &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: []runtime.Object{}, - err: errors.New("CRP lister error"), + Objects: []runtime.Object{}, + Err: errors.New("CRP lister error"), }, }, }, @@ -2813,16 +2712,16 @@ func TestHandleDeletedResource(t *testing.T) { } uMap, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(crpWithNamespaceOnlySelector) crpObjects := []runtime.Object{&unstructured.Unstructured{Object: uMap}} - return &fakeInformerManager{ - listers: map[schema.GroupVersionResource]*fakeLister{ + return &testinformer.FakeManager{ + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ utils.ClusterResourcePlacementGVR: { - objects: crpObjects, + Objects: crpObjects, }, utils.ResourcePlacementGVR: { - objects: rpObjects, + Objects: rpObjects, }, utils.NamespaceGVR: { - objects: []runtime.Object{testNamespace}, + Objects: []runtime.Object{testNamespace}, }, }, } diff --git a/test/utils/informer/manager.go b/test/utils/informer/manager.go index 55b49eb70..301357c78 100644 --- a/test/utils/informer/manager.go +++ b/test/utils/informer/manager.go @@ -18,6 +18,10 @@ limitations under the License. package informer import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/tools/cache" @@ -25,6 +29,61 @@ import ( "github.com/kubefleet-dev/kubefleet/pkg/utils/informer" ) +// FakeLister is a simple fake lister for testing. +type FakeLister struct { + Objects []runtime.Object + Err error +} + +func (f *FakeLister) List(_ labels.Selector) ([]runtime.Object, error) { + if f.Err != nil { + return nil, f.Err + } + return f.Objects, nil +} + +func (f *FakeLister) Get(name string) (runtime.Object, error) { + if f.Err != nil { + return nil, f.Err + } + for _, obj := range f.Objects { + if obj.(*unstructured.Unstructured).GetName() == name { + return obj, nil + } + } + return nil, apierrors.NewNotFound(schema.GroupResource{Resource: "test"}, name) +} + +func (f *FakeLister) ByNamespace(namespace string) cache.GenericNamespaceLister { + return &FakeNamespaceLister{Objects: f.Objects, Namespace: namespace, Err: f.Err} +} + +// FakeNamespaceLister implements cache.GenericNamespaceLister. +type FakeNamespaceLister struct { + Objects []runtime.Object + Namespace string + Err error +} + +func (f *FakeNamespaceLister) List(_ labels.Selector) ([]runtime.Object, error) { + if f.Err != nil { + return nil, f.Err + } + return f.Objects, nil +} + +func (f *FakeNamespaceLister) Get(name string) (runtime.Object, error) { + if f.Err != nil { + return nil, f.Err + } + for _, obj := range f.Objects { + if obj.(*unstructured.Unstructured).GetName() == name { + return obj, nil + } + } + return nil, apierrors.NewNotFound(schema.GroupResource{Resource: "test"}, name) +} + // FakeManager is a fake informer manager. type FakeManager struct { // APIResources map collects all the api resources we watch. @@ -35,6 +94,8 @@ type FakeManager struct { // If false, the map stores all the namespace scoped resource. If the resource is not in the map, it will be treated // as the cluster scoped resource. IsClusterScopedResource bool + // Listers provides fake listers for testing. + Listers map[schema.GroupVersionResource]*FakeLister } func (m *FakeManager) AddDynamicResources(_ []informer.APIResourceMeta, _ cache.ResourceEventHandler, _ bool) { @@ -44,7 +105,7 @@ func (m *FakeManager) AddStaticResource(_ informer.APIResourceMeta, _ cache.Reso } func (m *FakeManager) IsInformerSynced(_ schema.GroupVersionResource) bool { - return false + return true } func (m *FakeManager) Start() { @@ -53,8 +114,11 @@ func (m *FakeManager) Start() { func (m *FakeManager) Stop() { } -func (m *FakeManager) Lister(_ schema.GroupVersionResource) cache.GenericLister { - return nil +func (m *FakeManager) Lister(gvr schema.GroupVersionResource) cache.GenericLister { + if lister, exists := m.Listers[gvr]; exists { + return lister + } + return &FakeLister{Objects: []runtime.Object{}} } func (m *FakeManager) GetNameSpaceScopedResources() []schema.GroupVersionResource { From 8ea90d2dcbce9cf604027d0c5dd69d9d0cf04e53 Mon Sep 17 00:00:00 2001 From: Wantong Date: Mon, 11 Aug 2025 23:45:14 -0700 Subject: [PATCH 02/38] feat: enable scheduler for RP (#171) --- .../placement/v1beta1/policysnapshot_types.go | 2 +- ...es-fleet.io_clusterresourceplacements.yaml | 2 +- ...t.io_clusterresourceplacementstatuses.yaml | 2 +- ...t.io_clusterschedulingpolicysnapshots.yaml | 2 +- ...ubernetes-fleet.io_resourceplacements.yaml | 2 +- ...es-fleet.io_schedulingpolicysnapshots.yaml | 2 +- pkg/scheduler/framework/framework.go | 10 +- pkg/scheduler/framework/frameworkutils.go | 6 +- pkg/scheduler/queue/queue.go | 50 +- pkg/scheduler/scheduler.go | 31 +- pkg/scheduler/scheduler_test.go | 91 +- pkg/scheduler/watchers/membercluster/utils.go | 80 +- .../watchers/membercluster/utils_test.go | 823 ++++++++++++++++-- .../watchers/membercluster/watcher.go | 32 +- pkg/utils/controller/placement_resolver.go | 6 +- test/scheduler/actuals_test.go | 450 ++++++---- test/scheduler/pickall_integration_test.go | 490 ++++++++++- test/scheduler/pickfixed_integration_test.go | 289 +++++- test/scheduler/pickn_integration_test.go | 585 +++++++++++-- ...perty_based_scheduling_integration_test.go | 689 ++++++++++++++- test/scheduler/suite_test.go | 33 +- .../tainttoleration_integration_test.go | 503 ++++++++++- test/scheduler/utils_test.go | 430 ++++++++- 23 files changed, 4073 insertions(+), 537 deletions(-) diff --git a/apis/placement/v1beta1/policysnapshot_types.go b/apis/placement/v1beta1/policysnapshot_types.go index 767e3da29..0d33112c7 100644 --- a/apis/placement/v1beta1/policysnapshot_types.go +++ b/apis/placement/v1beta1/policysnapshot_types.go @@ -153,7 +153,7 @@ type SchedulingPolicySnapshotStatus struct { // +patchMergeKey=type // +patchStrategy=merge - // ObservedCRPGeneration is the generation of the CRP which the scheduler uses to perform + // ObservedCRPGeneration is the generation of the resource placement which the scheduler uses to perform // the scheduling cycle and prepare the scheduling status. // +required ObservedCRPGeneration int64 `json:"observedCRPGeneration"` diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml index 1d1c20c92..b2379cdf9 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml @@ -2105,7 +2105,7 @@ spec: type: string placementStatuses: description: |- - PlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. + PerClusterPlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. Each selected cluster according to the observed resource placement is guaranteed to have a corresponding placementStatuses. In the pickN case, there are N placement statuses where N = NumberOfClusters; Or in the pickFixed case, there are N placement statuses where N = ClusterNames. diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml index 20ed0c16a..f881f466c 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml @@ -488,7 +488,7 @@ spec: type: string placementStatuses: description: |- - PlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. + PerClusterPlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. Each selected cluster according to the observed resource placement is guaranteed to have a corresponding placementStatuses. In the pickN case, there are N placement statuses where N = NumberOfClusters; Or in the pickFixed case, there are N placement statuses where N = ClusterNames. diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterschedulingpolicysnapshots.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterschedulingpolicysnapshots.yaml index fe9057f48..ffa988280 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterschedulingpolicysnapshots.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterschedulingpolicysnapshots.yaml @@ -1187,7 +1187,7 @@ spec: x-kubernetes-list-type: map observedCRPGeneration: description: |- - ObservedCRPGeneration is the generation of the CRP which the scheduler uses to perform + ObservedCRPGeneration is the generation of the resource placement which the scheduler uses to perform the scheduling cycle and prepare the scheduling status. format: int64 type: integer diff --git a/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml b/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml index 07f092ef7..602567f8f 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml @@ -1036,7 +1036,7 @@ spec: type: string placementStatuses: description: |- - PlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. + PerClusterPlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. Each selected cluster according to the observed resource placement is guaranteed to have a corresponding placementStatuses. In the pickN case, there are N placement statuses where N = NumberOfClusters; Or in the pickFixed case, there are N placement statuses where N = ClusterNames. diff --git a/config/crd/bases/placement.kubernetes-fleet.io_schedulingpolicysnapshots.yaml b/config/crd/bases/placement.kubernetes-fleet.io_schedulingpolicysnapshots.yaml index 4406774f9..b0b6c5ba1 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_schedulingpolicysnapshots.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_schedulingpolicysnapshots.yaml @@ -571,7 +571,7 @@ spec: x-kubernetes-list-type: map observedCRPGeneration: description: |- - ObservedCRPGeneration is the generation of the CRP which the scheduler uses to perform + ObservedCRPGeneration is the generation of the resource placement which the scheduler uses to perform the scheduling cycle and prepare the scheduling status. format: int64 type: integer diff --git a/pkg/scheduler/framework/framework.go b/pkg/scheduler/framework/framework.go index 1126446fe..847269fd4 100644 --- a/pkg/scheduler/framework/framework.go +++ b/pkg/scheduler/framework/framework.go @@ -93,7 +93,7 @@ type Handle interface { type Framework interface { Handle - // RunSchedulingCycleFor performs scheduling for a cluster resource placement, specifically + // RunSchedulingCycleFor performs scheduling for a resource placement, specifically // its associated latest scheduling policy snapshot. RunSchedulingCycleFor(ctx context.Context, placementKey queue.PlacementKey, policy placementv1beta1.PolicySnapshotObj) (result ctrl.Result, err error) } @@ -242,7 +242,7 @@ func (f *framework) ClusterEligibilityChecker() *clustereligibilitychecker.Clust return f.clusterEligibilityChecker } -// RunSchedulingCycleFor performs scheduling for a cluster resource placement +// RunSchedulingCycleFor performs scheduling for a resource placement // (more specifically, its associated scheduling policy snapshot). func (f *framework) RunSchedulingCycleFor(ctx context.Context, placementKey queue.PlacementKey, policy placementv1beta1.PolicySnapshotObj) (result ctrl.Result, err error) { startTime := time.Now() @@ -313,7 +313,7 @@ func (f *framework) RunSchedulingCycleFor(ctx context.Context, placementKey queu // result so that we won't have a ever increasing chain of flip flop bindings. bound, scheduled, obsolete, unscheduled, dangling, deleting := classifyBindings(policy, bindings, clusters) - // Remove scheduler CRB cleanup finalizer on all deleting bindings. + // Remove scheduler binding cleanup finalizer on all deleting bindings. if err := f.updateBindings(ctx, deleting, removeFinalizerAndUpdate); err != nil { klog.ErrorS(err, "Failed to remove finalizers from deleting bindings", "policySnapshot", policyRef) return ctrl.Result{}, err @@ -379,7 +379,7 @@ var markUnscheduledForAndUpdate = func(ctx context.Context, hubClient client.Cli return err } -// removeFinalizerAndUpdate removes scheduler CRB cleanup finalizer from binding and updates it. +// removeFinalizerAndUpdate removes scheduler binding cleanup finalizer from binding and updates it. var removeFinalizerAndUpdate = func(ctx context.Context, hubClient client.Client, binding placementv1beta1.BindingObj) error { controllerutil.RemoveFinalizer(binding, placementv1beta1.SchedulerBindingCleanupFinalizer) err := hubClient.Update(ctx, binding, &client.UpdateOptions{}) @@ -430,7 +430,7 @@ func (f *framework) runSchedulingCycleForPickAllPlacementType( // The scheduler always needs to take action when processing scheduling policies of the PickAll // placement type; enter the actual scheduling stages right away. - klog.V(2).InfoS("Scheduling is always needed for CRPs of the PickAll placement type; entering scheduling stages", "policySnapshot", policyRef) + klog.V(2).InfoS("Scheduling is always needed for placements of the PickAll placement type; entering scheduling stages", "policySnapshot", policyRef) // Run all plugins needed. // diff --git a/pkg/scheduler/framework/frameworkutils.go b/pkg/scheduler/framework/frameworkutils.go index c88458a79..4fbc69ecc 100644 --- a/pkg/scheduler/framework/frameworkutils.go +++ b/pkg/scheduler/framework/frameworkutils.go @@ -67,7 +67,7 @@ func classifyBindings(policy placementv1beta1.PolicySnapshotObj, bindings []plac switch { case !binding.GetDeletionTimestamp().IsZero(): - // we need remove scheduler CRB cleanup finalizer from deleting bindings. + // we need remove scheduler binding cleanup finalizer from deleting bindings. deleting = append(deleting, binding) case bindingSpec.State == placementv1beta1.BindingStateUnscheduled: // we need to remember those bindings so that we will not create another one. @@ -503,7 +503,7 @@ func equalDecisions(current, desired []placementv1beta1.ClusterDecision) bool { // many scheduled or bound bindings it should remove. func shouldDownscale(policy placementv1beta1.PolicySnapshotObj, desired, present, obsolete int) (act bool, count int) { if policy.GetPolicySnapshotSpec().Policy.PlacementType == placementv1beta1.PickNPlacementType && desired <= present { - // Downscale only applies to CRPs of the Pick N placement type; and it only applies when the number of + // Downscale only applies to placements of the Pick N placement type; and it only applies when the number of // clusters requested by the user is less than the number of currently bound + scheduled bindings combined; // or there are the right number of bound + scheduled bindings, yet some obsolete bindings still linger // in the system. @@ -531,7 +531,7 @@ func sortByClusterScoreAndName(bindings []placementv1beta1.BindingObj) (sorted [ switch { case scoreA == nil && scoreB == nil: // Both bindings have no assigned cluster scores; normally this will never happen, - // as for CRPs of the PickN type, the scheduler will always assign cluster scores + // as for placements of the PickN type, the scheduler will always assign cluster scores // to bindings. // // In this case, compare their target cluster names instead. diff --git a/pkg/scheduler/queue/queue.go b/pkg/scheduler/queue/queue.go index cee4da8ca..552fd64fc 100644 --- a/pkg/scheduler/queue/queue.go +++ b/pkg/scheduler/queue/queue.go @@ -38,12 +38,12 @@ type PlacementSchedulingQueueWriter interface { // Add adds a PlacementKey to the work queue. // // Note that this bypasses the rate limiter. - Add(crpKey PlacementKey) + Add(placementKey PlacementKey) // AddRateLimited adds a PlacementKey to the work queue after the rate limiter (if any) // says that it is OK. - AddRateLimited(crpKey PlacementKey) + AddRateLimited(placementKey PlacementKey) // AddAfter adds a PlacementKey to the work queue after a set duration. - AddAfter(crpKey PlacementKey, duration time.Duration) + AddAfter(placementKey PlacementKey, duration time.Duration) } // PlacementSchedulingQueue is an interface which queues PlacementKeys for the scheduler @@ -61,9 +61,9 @@ type PlacementSchedulingQueue interface { // NextPlacementKey returns the next-in-line PlacementKey for the scheduler to consume. NextPlacementKey() (key PlacementKey, closed bool) // Done marks a PlacementKey as done. - Done(crpKey PlacementKey) + Done(placementKey PlacementKey) // Forget untracks a PlacementKey from rate limiter(s) (if any) set up with the queue. - Forget(crpKey PlacementKey) + Forget(placementKey PlacementKey) } // simplePlacementSchedulingQueue is a simple implementation of @@ -127,53 +127,53 @@ func (sq *simplePlacementSchedulingQueue) CloseWithDrain() { sq.active.ShutDownWithDrain() } -// NextPlacementKey returns the next ClusterResourcePlacementKey in the work queue for -// the scheduler to process. +// NextPlacementKey returns the next PlacementKey (either clusterResourcePlacementKey or resourcePlacementKey) +// in the work queue for the scheduler to process. // // Note that for now the queue simply wraps a work queue, and consider its state (whether it // is shut down or not) as its own closedness. In the future, when more queues are added, the // queue implementation must manage its own state. func (sq *simplePlacementSchedulingQueue) NextPlacementKey() (key PlacementKey, closed bool) { // This will block on a condition variable if the queue is empty. - crpKey, shutdown := sq.active.Get() + placementKey, shutdown := sq.active.Get() if shutdown { return "", true } - return crpKey.(PlacementKey), false + return placementKey.(PlacementKey), false } -// Done marks a ClusterResourcePlacementKey as done. -func (sq *simplePlacementSchedulingQueue) Done(crpKey PlacementKey) { - sq.active.Done(crpKey) +// Done marks a PlacementKey as done. +func (sq *simplePlacementSchedulingQueue) Done(placementKey PlacementKey) { + sq.active.Done(placementKey) } -// Add adds a ClusterResourcePlacementKey to the work queue. +// Add adds a PlacementKey to the work queue. // // Note that this bypasses the rate limiter (if any). -func (sq *simplePlacementSchedulingQueue) Add(crpKey PlacementKey) { - sq.active.Add(crpKey) +func (sq *simplePlacementSchedulingQueue) Add(placementKey PlacementKey) { + sq.active.Add(placementKey) } -// AddRateLimited adds a ClusterResourcePlacementKey to the work queue after the rate limiter (if any) +// AddRateLimited adds a PlacementKey to the work queue after the rate limiter (if any) // says that it is OK. -func (sq *simplePlacementSchedulingQueue) AddRateLimited(crpKey PlacementKey) { - sq.active.AddRateLimited(crpKey) +func (sq *simplePlacementSchedulingQueue) AddRateLimited(placementKey PlacementKey) { + sq.active.AddRateLimited(placementKey) } -// AddAfter adds a ClusterResourcePlacementKey to the work queue after a set duration. +// AddAfter adds a PlacementKey to the work queue after a set duration. // // Note that this bypasses the rate limiter (if any) -func (sq *simplePlacementSchedulingQueue) AddAfter(crpKey PlacementKey, duration time.Duration) { - sq.active.AddAfter(crpKey, duration) +func (sq *simplePlacementSchedulingQueue) AddAfter(placementKey PlacementKey, duration time.Duration) { + sq.active.AddAfter(placementKey, duration) } -// Forget untracks a ClusterResourcePlacementKey from rate limiter(s) (if any) set up with the queue. -func (sq *simplePlacementSchedulingQueue) Forget(crpKey PlacementKey) { - sq.active.Forget(crpKey) +// Forget untracks a PlacementKey from rate limiter(s) (if any) set up with the queue. +func (sq *simplePlacementSchedulingQueue) Forget(placementKey PlacementKey) { + sq.active.Forget(placementKey) } // NewSimplePlacementSchedulingQueue returns a -// simpleClusterResourcePlacementSchedulingQueue. +// simplePlacementSchedulingQueue. func NewSimplePlacementSchedulingQueue(opts ...Option) PlacementSchedulingQueue { options := defaultSimplePlacementSchedulingQueueOptions for _, opt := range opts { diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 84838f72d..5ee068311 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -124,14 +124,13 @@ func (s *Scheduler) scheduleOnce(ctx context.Context, worker int) { defer metrics.SchedulerActiveWorkers.WithLabelValues().Add(-1) startTime := time.Now() - placementRef := klog.KRef("", string(placementKey)) - klog.V(2).InfoS("Schedule once started", "placement", placementRef, "worker", worker) + klog.V(2).InfoS("Schedule once started", "placement", placementKey, "worker", worker) defer func() { // Note that the time spent on pulling keys from the work queue (and the time spent on waiting // for a key to arrive) is not counted here, as we cannot reliably distinguish between // system processing latencies and actual duration of placement absence. latency := time.Since(startTime).Milliseconds() - klog.V(2).InfoS("Schedule once completed", "placement", placementRef, "latency", latency, "worker", worker) + klog.V(2).InfoS("Schedule once completed", "placement", placementKey, "latency", latency, "worker", worker) }() // Retrieve the placement object (either ClusterResourcePlacement or ResourcePlacement). @@ -143,18 +142,18 @@ func (s *Scheduler) scheduleOnce(ctx context.Context, worker int) { // has been marked for deletion but does not have the scheduler cleanup finalizer to // the work queue. Such placements needs no further processing any way though, as the absence // of the cleanup finalizer implies that bindings derived from the placement are no longer present. - klog.ErrorS(err, "placement is already deleted", "placement", placementRef) + klog.ErrorS(err, "placement is already deleted", "placement", placementKey) return } if errors.Is(err, controller.ErrUnexpectedBehavior) { // The placement is in an unexpected state; this is a scheduler-side error, and // Note that this is a scheduler-side error, so it does not return an error to the caller. // Raise an alert for it. - klog.ErrorS(err, "Placement is in an unexpected state", "placement", placementRef) + klog.ErrorS(err, "Placement is in an unexpected state", "placement", placementKey) return } // Wrap the error for metrics; this method does not return an error. - klog.ErrorS(controller.NewAPIServerError(true, err), "Failed to get placement", "placement", placementRef) + klog.ErrorS(controller.NewAPIServerError(true, err), "Failed to get placement", "placement", placementKey) // Requeue for later processing. s.queue.AddRateLimited(placementKey) @@ -163,10 +162,10 @@ func (s *Scheduler) scheduleOnce(ctx context.Context, worker int) { // Check if the placement has been marked for deletion, and if it has the scheduler cleanup finalizer. if placement.GetDeletionTimestamp() != nil { - // Use SchedulerCRPCleanupFinalizer consistently for all placement types + // Use SchedulerCleanupFinalizer consistently for all placement types if controllerutil.ContainsFinalizer(placement, fleetv1beta1.SchedulerCleanupFinalizer) { if err := s.cleanUpAllBindingsFor(ctx, placement); err != nil { - klog.ErrorS(err, "Failed to clean up all bindings for placement", "placement", placementRef) + klog.ErrorS(err, "Failed to clean up all bindings for placement", "placement", placementKey) if errors.Is(err, controller.ErrUnexpectedBehavior) { // The placement is in an unexpected state, don't requeue it. return @@ -189,7 +188,7 @@ func (s *Scheduler) scheduleOnce(ctx context.Context, worker int) { // Verify that it has an active policy snapshot. latestPolicySnapshot, err := s.lookupLatestPolicySnapshot(ctx, placement) if err != nil { - klog.ErrorS(err, "Failed to lookup latest policy snapshot", "placement", placementRef) + klog.ErrorS(err, "Failed to lookup latest policy snapshot", "placement", placementKey) // No requeue is needed; the scheduler will be triggered again when an active policy // snapshot is created. @@ -200,7 +199,7 @@ func (s *Scheduler) scheduleOnce(ctx context.Context, worker int) { // Add the scheduler cleanup finalizer to the placement (if it does not have one yet). if err := s.addSchedulerCleanUpFinalizer(ctx, placement); err != nil { - klog.ErrorS(err, "Failed to add scheduler cleanup finalizer", "placement", placementRef) + klog.ErrorS(err, "Failed to add scheduler cleanup finalizer", "placement", placementKey) // Requeue for later processing. s.queue.AddRateLimited(placementKey) return @@ -211,18 +210,18 @@ func (s *Scheduler) scheduleOnce(ctx context.Context, worker int) { // Note that the scheduler will enter this cycle as long as the placement is active and an active // policy snapshot has been produced. cycleStartTime := time.Now() - res, err := s.framework.RunSchedulingCycleFor(ctx, controller.GetObjectKeyFromObj(placement), latestPolicySnapshot) + res, err := s.framework.RunSchedulingCycleFor(ctx, placementKey, latestPolicySnapshot) if err != nil { if errors.Is(err, controller.ErrUnexpectedBehavior) { // The placement is in an unexpected state; this is a scheduler-side error, and // Note that this is a scheduler-side error, so it does not return an error to the caller. // Raise an alert for it. - klog.ErrorS(err, "Placement is in an unexpected state", "placement", placementRef) + klog.ErrorS(err, "Placement is in an unexpected state", "placement", placementKey) observeSchedulingCycleMetrics(cycleStartTime, true, false) return } // Requeue for later processing. - klog.ErrorS(err, "Failed to run scheduling cycle", "placement", placementRef) + klog.ErrorS(err, "Failed to run scheduling cycle", "placement", placementKey) s.queue.AddRateLimited(placementKey) observeSchedulingCycleMetrics(cycleStartTime, true, true) return @@ -310,7 +309,7 @@ func (s *Scheduler) cleanUpAllBindingsFor(ctx context.Context, placement fleetv1 return err } - // Remove scheduler CRB cleanup finalizer from deleting bindings. + // Remove scheduler binding cleanup finalizer from deleting bindings. // // Note that once a placement has been marked for deletion, it will no longer enter the scheduling cycle, // so any cleanup finalizer has to be removed here. @@ -335,7 +334,7 @@ func (s *Scheduler) cleanUpAllBindingsFor(ctx context.Context, placement fleetv1 } // All bindings have been deleted; remove the scheduler cleanup finalizer from the placement. - // Use SchedulerCRPCleanupFinalizer consistently for all placement types. + // Use SchedulerCleanupFinalizer consistently for all placement types. controllerutil.RemoveFinalizer(placement, fleetv1beta1.SchedulerCleanupFinalizer) if err := s.client.Update(ctx, placement); err != nil { klog.ErrorS(err, "Failed to remove scheduler cleanup finalizer from placement", "placement", placementRef) @@ -376,7 +375,7 @@ func (s *Scheduler) lookupLatestPolicySnapshot(ctx context.Context, placement fl case len(policySnapshots) == 0: // There is no latest policy snapshot associated with the placement; it could happen when // * the placement is newly created; or - // * the new policy snapshots is in the middle of being replaced. + // * the new policy snapshot is in the middle of being replaced. // // Either way, it is out of the scheduler's scope to handle such a case; the scheduler will // be triggered again if the situation is corrected. diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 8548c62bd..6b1a28313 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -69,7 +69,7 @@ func TestAddSchedulerCleanUpFinalizer(t *testing.T) { wantFinalizers []string }{ { - name: "cluster-scoped placement should add CRP finalizer", + name: "cluster-scoped placement should add scheduler cleanup finalizer", placement: func() fleetv1beta1.PlacementObj { return &fleetv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -80,7 +80,7 @@ func TestAddSchedulerCleanUpFinalizer(t *testing.T) { wantFinalizers: []string{fleetv1beta1.SchedulerCleanupFinalizer}, }, { - name: "namespaced placement should also add CRP finalizer", + name: "namespaced placement should also add scheduler cleanup finalizer", placement: func() fleetv1beta1.PlacementObj { return &fleetv1beta1.ResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -175,7 +175,7 @@ func TestCleanUpAllBindingsFor(t *testing.T) { wantRemainingBindings: []fleetv1beta1.BindingObj{}, }, { - name: "cluster-scoped placement cleanup without bindings but have CRP cleanup finalizer", + name: "cluster-scoped placement cleanup without bindings but have placement cleanup finalizer", placement: func() fleetv1beta1.PlacementObj { return &fleetv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -328,7 +328,7 @@ func TestCleanUpAllBindingsFor(t *testing.T) { cmpopts.SortSlices(func(b1, b2 fleetv1beta1.BindingObj) bool { return b1.GetName() < b2.GetName() })); diff != "" { - t.Errorf("Remaining bindings diff (+ got, - want): %s", diff) + t.Errorf("Remaining bindings diff (-got, +want): %s", diff) } }) } @@ -469,6 +469,89 @@ func TestLookupLatestPolicySnapshot(t *testing.T) { }, }, }, + { + name: "cluster-scoped placement should not select namespaced policy snapshot with same placement label", + placement: func() fleetv1beta1.PlacementObj { + return &fleetv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + Finalizers: []string{fleetv1beta1.SchedulerCleanupFinalizer}, + }, + } + }, + policySnapshots: []fleetv1beta1.PolicySnapshotObj{ + &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: crpName, + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + }, + }, + }, + &fleetv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespaced-policy-snapshot", + Namespace: "test-namespace", + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: crpName, + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + }, + }, + }, + }, + wantPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: crpName, + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + }, + }, + }, + }, + { + name: "namespaced placement should not select cluster-scoped policy snapshot with same placement label", + placement: func() fleetv1beta1.PlacementObj { + return &fleetv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rp", + Namespace: "test-namespace", + }, + } + }, + policySnapshots: []fleetv1beta1.PolicySnapshotObj{ + &fleetv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Namespace: "test-namespace", + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: "test-rp", + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + }, + }, + }, + &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-policy-snapshot", + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: "test-rp", + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + }, + }, + }, + }, + wantPolicySnapshot: &fleetv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Namespace: "test-namespace", + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: "test-rp", + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + }, + }, + }, + }, } for _, tc := range testCases { diff --git a/pkg/scheduler/watchers/membercluster/utils.go b/pkg/scheduler/watchers/membercluster/utils.go index 60335cd37..d7056b65a 100644 --- a/pkg/scheduler/watchers/membercluster/utils.go +++ b/pkg/scheduler/watchers/membercluster/utils.go @@ -18,56 +18,82 @@ package membercluster import ( "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" ) -// isCRPFullyScheduled returns whether a CRP is fully scheduled. -func isCRPFullyScheduled(crp *fleetv1beta1.ClusterResourcePlacement) bool { - // Check the scheduled condition on the CRP to determine if it is fully scheduled. +// isPlacementFullyScheduled returns whether a placement is fully scheduled. +func isPlacementFullyScheduled(placement fleetv1beta1.PlacementObj) bool { + // Check the scheduled condition on the placement to determine if it is fully scheduled. // // Here the controller checks the status rather than listing all the bindings and verify - // if the count matches with the CRP spec as the former approach has less overhead and + // if the count matches with the placement spec as the former approach has less overhead and // (more importantly) avoids leaking scheduler-specific logic into this controller. The - // trade-off is that the controller may consider some fully scheduled CRPs as not fully - // scheduled, if the CRP-side controller(s) cannot update the CRP status in a timely + // trade-off is that the controller may consider some fully scheduled placements as not fully + // scheduled, if the placement-side controller(s) cannot update the placement status in a timely // manner. - scheduledCondition := meta.FindStatusCondition(crp.Status.Conditions, string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType)) - // Check if the CRP is fully scheduled, or its scheduled condition is out of date. - return condition.IsConditionStatusTrue(scheduledCondition, crp.Generation) + var scheduledCondition *metav1.Condition + if placement.GetNamespace() == "" { + // Find CRP scheduled condition. + scheduledCondition = meta.FindStatusCondition(placement.GetPlacementStatus().Conditions, string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType)) + } else { + // Find RP scheduled condition. + scheduledCondition = meta.FindStatusCondition(placement.GetPlacementStatus().Conditions, string(fleetv1beta1.ResourcePlacementScheduledConditionType)) + } + // Check if the placement is fully scheduled, or its scheduled condition is out of date. + return condition.IsConditionStatusTrue(scheduledCondition, placement.GetGeneration()) } -// classifyCRPs returns a list of CRPs that are affected by cluster side changes in case 1a) and +// classifyPlacements returns a list of placements that are affected by cluster side changes in case 1a) and // 1b). -func classifyCRPs(crps []fleetv1beta1.ClusterResourcePlacement) (toProcess []fleetv1beta1.ClusterResourcePlacement) { +func classifyPlacements(placements []fleetv1beta1.PlacementObj) (toProcess []fleetv1beta1.PlacementObj) { // Pre-allocate array. - toProcess = make([]fleetv1beta1.ClusterResourcePlacement, 0, len(crps)) + toProcess = make([]fleetv1beta1.PlacementObj, 0, len(placements)) - for idx := range crps { - crp := crps[idx] + for idx := range placements { + placement := placements[idx] switch { - case crp.Spec.Policy == nil: - // CRPs with no placement policy specified are considered to be of the PickAll placement + case placement.GetPlacementSpec().Policy == nil: + // Placements with no placement policy specified are considered to be of the PickAll placement // type and are affected by cluster side changes in case 1a) and 1b). - toProcess = append(toProcess, crp) - case crp.Spec.Policy.PlacementType == fleetv1beta1.PickFixedPlacementType: - if !isCRPFullyScheduled(&crp) { - // Any CRP with an non-empty list of target cluster names can be affected by cluster + toProcess = append(toProcess, placement) + case placement.GetPlacementSpec().Policy.PlacementType == fleetv1beta1.PickFixedPlacementType: + if !isPlacementFullyScheduled(placement) { + // Any Placement with an non-empty list of target cluster names can be affected by cluster // side changes in case 1b), if it is not yet fully scheduled. - toProcess = append(toProcess, crp) + toProcess = append(toProcess, placement) } - case crp.Spec.Policy.PlacementType == fleetv1beta1.PickAllPlacementType: - // CRPs of the PickAll placement type are affected by cluster side changes in case 1a) + case placement.GetPlacementSpec().Policy.PlacementType == fleetv1beta1.PickAllPlacementType: + // Placements of the PickAll placement type are affected by cluster side changes in case 1a) // and 1b). - toProcess = append(toProcess, crp) - case !isCRPFullyScheduled(&crp): - // CRPs of the PickN placement type, which have not been fully scheduled, are affected + toProcess = append(toProcess, placement) + case !isPlacementFullyScheduled(placement): + // Placements of the PickN placement type, which have not been fully scheduled, are affected // by cluster side changes in case 1a) and 1b) listed in the Reconcile func. - toProcess = append(toProcess, crp) + toProcess = append(toProcess, placement) } } return toProcess } + +// convertCRPArrayToPlacementObjs converts a slice of ClusterResourcePlacement items to PlacementObj array. +func convertCRPArrayToPlacementObjs(crps []fleetv1beta1.ClusterResourcePlacement) []fleetv1beta1.PlacementObj { + placements := make([]fleetv1beta1.PlacementObj, len(crps)) + for i := range crps { + placements[i] = &crps[i] + } + return placements +} + +// convertRPArrayToPlacementObjs converts a slice of ResourcePlacement items to PlacementObj array. +func convertRPArrayToPlacementObjs(rps []fleetv1beta1.ResourcePlacement) []fleetv1beta1.PlacementObj { + placements := make([]fleetv1beta1.PlacementObj, len(rps)) + for i := range rps { + placements[i] = &rps[i] + } + return placements +} diff --git a/pkg/scheduler/watchers/membercluster/utils_test.go b/pkg/scheduler/watchers/membercluster/utils_test.go index 1e297ee7f..6c11cb3e7 100644 --- a/pkg/scheduler/watchers/membercluster/utils_test.go +++ b/pkg/scheduler/watchers/membercluster/utils_test.go @@ -26,31 +26,39 @@ import ( ) const ( - crpName = "test-crp" - clusterName1 = "bravelion" - clusterName2 = "jumpingcat" - crpName1 = "crp-1" - crpName2 = "crp-2" - crpName3 = "crp-3" - crpName4 = "crp-4" - crpName5 = "crp-5" - crpName6 = "crp-6" + crpName = "test-crp" + rpName = "test-rp" + clusterName1 = "bravelion" + clusterName2 = "jumpingcat" + crpName1 = "crp-1" + crpName2 = "crp-2" + crpName3 = "crp-3" + crpName4 = "crp-4" + crpName5 = "crp-5" + crpName6 = "crp-6" + rpName1 = "rp-1" + rpName2 = "rp-2" + rpName3 = "rp-3" + rpName4 = "rp-4" + rpName5 = "rp-5" + rpName6 = "rp-6" + testNamespace = "test-namespace" ) var ( numOfClusters = int32(10) ) -// TestIsCRPFullyScheduled tests the isCRPFullyScheduled function. -func TestIsPickNCRPFullyScheduled(t *testing.T) { +// TestIsPlacementFullyScheduled tests the isPlacementFullyScheduled function. +func TestIsPlacementFullyScheduled(t *testing.T) { testCases := []struct { - name string - crp *placementv1beta1.ClusterResourcePlacement - want bool + name string + placement placementv1beta1.PlacementObj + want bool }{ { - name: "no scheduled condition", - crp: &placementv1beta1.ClusterResourcePlacement{ + name: "no scheduled condition in CRP", + placement: &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -66,8 +74,8 @@ func TestIsPickNCRPFullyScheduled(t *testing.T) { }, }, { - name: "scheduled condition is false", - crp: &placementv1beta1.ClusterResourcePlacement{ + name: "scheduled condition is false in CRP", + placement: &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -88,8 +96,8 @@ func TestIsPickNCRPFullyScheduled(t *testing.T) { }, }, { - name: "scheduled condition is true, observed generation is out of date", - crp: &placementv1beta1.ClusterResourcePlacement{ + name: "scheduled condition is true, observed generation is out of date in CRP", + placement: &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, Generation: 1, @@ -112,8 +120,8 @@ func TestIsPickNCRPFullyScheduled(t *testing.T) { }, }, { - name: "fully scheduled", - crp: &placementv1beta1.ClusterResourcePlacement{ + name: "resourcePlacementScheduled condition is true in CRP (should not happen)", + placement: &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, Generation: 1, @@ -124,6 +132,122 @@ func TestIsPickNCRPFullyScheduled(t *testing.T) { NumberOfClusters: &numOfClusters, }, }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + { + name: "fully scheduled CRP", + placement: &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + want: true, + }, + { + name: "no scheduled condition in RP", + placement: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{}, + }, + }, + }, + { + name: "scheduled condition is false in RP", + placement: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionFalse, + }, + }, + }, + }, + }, + { + name: "scheduled condition is true, observed generation is out of date in RP", + placement: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 0, + }, + }, + }, + }, + }, + { + name: "clusterResourcePlacementScheduled condition is true in RP (should not happen)", + placement: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, Status: placementv1beta1.PlacementStatus{ Conditions: []metav1.Condition{ { @@ -134,38 +258,63 @@ func TestIsPickNCRPFullyScheduled(t *testing.T) { }, }, }, + }, + { + name: "fully scheduled RP", + placement: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, want: true, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - scheduled := isCRPFullyScheduled(tc.crp) + scheduled := isPlacementFullyScheduled(tc.placement) if scheduled != tc.want { - t.Errorf("isPickNCRPFullyScheduled() = %v, want %v", scheduled, tc.want) + t.Errorf("isPlacementFullyScheduled() = %v, want %v", scheduled, tc.want) } }) } } -// TestClassifyCRPs tests the classifyCRPs function. -func TestClassifyCRPs(t *testing.T) { +// TestClassifyPlacements tests the classifyPlacements function. +func TestClassifyPlacements(t *testing.T) { testCases := []struct { - name string - crps []placementv1beta1.ClusterResourcePlacement - want []placementv1beta1.ClusterResourcePlacement + name string + placements []placementv1beta1.PlacementObj + want []placementv1beta1.PlacementObj }{ { name: "single crp, no policy", - crps: []placementv1beta1.ClusterResourcePlacement{ - { + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, }, }, - want: []placementv1beta1.ClusterResourcePlacement{ - { + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -174,8 +323,8 @@ func TestClassifyCRPs(t *testing.T) { }, { name: "single crp, fixed list of clusters, not fully scheduled", - crps: []placementv1beta1.ClusterResourcePlacement{ - { + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -186,8 +335,8 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - want: []placementv1beta1.ClusterResourcePlacement{ - { + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -201,8 +350,8 @@ func TestClassifyCRPs(t *testing.T) { }, { name: "single crp, fixed list of clusters, fully scheduled", - crps: []placementv1beta1.ClusterResourcePlacement{ - { + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, Generation: 1, @@ -223,12 +372,12 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - want: []placementv1beta1.ClusterResourcePlacement{}, + want: []placementv1beta1.PlacementObj{}, }, { name: "single crp, pick all placement type", - crps: []placementv1beta1.ClusterResourcePlacement{ - { + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -239,8 +388,8 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - want: []placementv1beta1.ClusterResourcePlacement{ - { + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -254,8 +403,8 @@ func TestClassifyCRPs(t *testing.T) { }, { name: "single crp, pick N placement type, not fully scheduled", - crps: []placementv1beta1.ClusterResourcePlacement{ - { + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -267,8 +416,8 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - want: []placementv1beta1.ClusterResourcePlacement{ - { + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, @@ -283,8 +432,8 @@ func TestClassifyCRPs(t *testing.T) { }, { name: "single crp, pick N placement type, fully scheduled", - crps: []placementv1beta1.ClusterResourcePlacement{ - { + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, Generation: 1, @@ -306,12 +455,12 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - want: []placementv1beta1.ClusterResourcePlacement{}, + want: []placementv1beta1.PlacementObj{}, }, { - name: "mixed", - crps: []placementv1beta1.ClusterResourcePlacement{ - { + name: "mixed crps", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName2, }, @@ -321,12 +470,12 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - { + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName1, }, }, - { + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName5, Generation: 1, @@ -347,7 +496,7 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - { + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName4, }, @@ -357,7 +506,7 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - { + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName3, }, @@ -369,8 +518,8 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - want: []placementv1beta1.ClusterResourcePlacement{ - { + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName2, }, @@ -380,12 +529,12 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - { + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName1, }, }, - { + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName4, }, @@ -395,7 +544,7 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - { + &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName3, }, @@ -408,13 +557,547 @@ func TestClassifyCRPs(t *testing.T) { }, }, }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - toProcess := classifyCRPs(tc.crps) - if diff := cmp.Diff(toProcess, tc.want); diff != "" { - t.Errorf("classifyCRPs() toProcess (-got, +want): %s", diff) + { + name: "single rp, no policy", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + }, + }, + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + }, + }, + }, + { + name: "single rp, fixed list of clusters, not fully scheduled", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1}, + }, + }, + }, + }, + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1}, + }, + }, + }, + }, + }, + { + name: "single rp, fixed list of clusters, fully scheduled", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1}, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + want: []placementv1beta1.PlacementObj{}, + }, + { + name: "single rp, pick all placement type, fully scheduled", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + }, + { + name: "single rp, pick N placement type, not fully scheduled", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + }, + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + }, + }, + { + name: "single rp, pick N placement type, fully scheduled", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + want: []placementv1beta1.PlacementObj{}, + }, + { + name: "mixed rps", + placements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName2, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1}, + }, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName1, + Namespace: testNamespace, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName5, + Namespace: testNamespace, + Generation: 1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + Status: placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName4, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName3, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + }, + want: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName2, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1}, + }, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName1, + Namespace: testNamespace, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName4, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName3, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + toProcess := classifyPlacements(tc.placements) + if diff := cmp.Diff(toProcess, tc.want); diff != "" { + t.Errorf("classifyPlacements() toProcess (-got, +want): %s", diff) + } + }) + } +} + +// TestConvertCRPArrayToPlacementObjs tests the convertCRPArrayToPlacementObjs function. +func TestConvertCRPArrayToPlacementObjs(t *testing.T) { + testCases := []struct { + name string + crps []placementv1beta1.ClusterResourcePlacement + wantPlacements []placementv1beta1.PlacementObj + }{ + { + name: "empty array", + crps: []placementv1beta1.ClusterResourcePlacement{}, + wantPlacements: []placementv1beta1.PlacementObj{}, + }, + { + name: "single crp", + crps: []placementv1beta1.ClusterResourcePlacement{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + }, + wantPlacements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + }, + }, + { + name: "multiple crps", + crps: []placementv1beta1.ClusterResourcePlacement{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName2, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName3, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1, clusterName2}, + }, + }, + }, + }, + wantPlacements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName2, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName3, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1, clusterName2}, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + placements := convertCRPArrayToPlacementObjs(tc.crps) + + if diff := cmp.Diff(placements, tc.wantPlacements); diff != "" { + t.Errorf("ConvertCRPArrayToPlacementObjs() diff (-got +want):\n%s", diff) + } + }) + } +} + +// TestConvertRPArrayToPlacementObjs tests the convertRPArrayToPlacementObjs function. +func TestConvertRPArrayToPlacementObjs(t *testing.T) { + testCases := []struct { + name string + rps []placementv1beta1.ResourcePlacement + wantPlacements []placementv1beta1.PlacementObj + }{ + { + name: "empty array", + rps: []placementv1beta1.ResourcePlacement{}, + wantPlacements: []placementv1beta1.PlacementObj{}, + }, + { + name: "single rp", + rps: []placementv1beta1.ResourcePlacement{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + }, + wantPlacements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + }, + }, + { + name: "multiple rps", + rps: []placementv1beta1.ResourcePlacement{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName2, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: crpName3, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1, clusterName2}, + }, + }, + }, + }, + wantPlacements: []placementv1beta1.PlacementObj{ + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName2, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + }, + }, + }, + &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName3, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + ClusterNames: []string{clusterName1, clusterName2}, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + placements := convertRPArrayToPlacementObjs(tc.rps) + + if diff := cmp.Diff(placements, tc.wantPlacements); diff != "" { + t.Errorf("ConvertRPArrayToPlacementObjs() diff (-got +want):\n%s", diff) } }) } diff --git a/pkg/scheduler/watchers/membercluster/watcher.go b/pkg/scheduler/watchers/membercluster/watcher.go index b401601c7..01d3742bb 100644 --- a/pkg/scheduler/watchers/membercluster/watcher.go +++ b/pkg/scheduler/watchers/membercluster/watcher.go @@ -132,35 +132,41 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // Do nothing if there is no error returned. } - // List all CRPs. + // List all placements. // - // Note that this controller reads CRPs from the same cache as the scheduler. + // Note that this controller reads placements from the same cache as the scheduler. crpList := &placementv1beta1.ClusterResourcePlacementList{} if err := r.Client.List(ctx, crpList); err != nil { klog.ErrorS(err, "Failed to list CRPs", "memberCluster", memberClusterRef) return ctrl.Result{}, controller.NewAPIServerError(true, err) } + rpList := &placementv1beta1.ResourcePlacementList{} + // Empty namespace provided to list RPs across all namespaces. + if err := r.Client.List(ctx, rpList, client.InNamespace("")); err != nil { + klog.ErrorS(err, "Failed to list RPs", "memberCluster", memberClusterRef) + return ctrl.Result{}, controller.NewAPIServerError(true, err) + } - crps := crpList.Items + placements := append(convertCRPArrayToPlacementObjs(crpList.Items), convertRPArrayToPlacementObjs(rpList.Items)...) if !isMemberClusterMissing && memberCluster.GetDeletionTimestamp().IsZero() { // If the member cluster is set to the left state, the scheduler needs to process all - // CRPs (case 2c)); otherwise, only CRPs of the PickAll type + CRPs of the PickN type, + // placements (case 2c)); otherwise, only placements of the PickAll type + placements of the PickN type, // which have not been fully scheduled, need to be processed (case 1a) and 1b)). - crps = classifyCRPs(crpList.Items) + placements = classifyPlacements(placements) } - // Enqueue the CRPs. + // Enqueue the placements. // - // Note that all the CRPs in the system are enqueued; technically speaking, for situation - // 1a), 1b) and 1c), PickN CRPs that have been fully scheduled needs no further processing, however, + // Note that all the placements in the system are enqueued; technically speaking, for situation + // 1a), 1b) and 1c), PickN placements that have been fully scheduled needs no further processing, however, // for simplicity reasons, this controller will not distinguish between the cases. - for idx := range crps { - crp := &crps[idx] + for idx := range placements { + placement := placements[idx] klog.V(2).InfoS( - "Enqueueing CRP for scheduler processing", + "Enqueueing placement for scheduler processing", "memberCluster", memberClusterRef, - "clusterResourcePlacement", klog.KObj(crp)) - r.SchedulerWorkQueue.Add(queue.PlacementKey(crp.Name)) + "placement", klog.KObj(placement)) + r.SchedulerWorkQueue.Add(controller.GetObjectKeyFromObj(placement)) } // The reconciliation loop completes. diff --git a/pkg/utils/controller/placement_resolver.go b/pkg/utils/controller/placement_resolver.go index f6db05477..c789ae6e6 100644 --- a/pkg/utils/controller/placement_resolver.go +++ b/pkg/utils/controller/placement_resolver.go @@ -96,7 +96,11 @@ func GetObjectKeyFromNamespaceName(namespace, name string) string { // ExtractNamespaceNameFromKey resolves a PlacementKey to a (namespace, name) tuple of the placement object. func ExtractNamespaceNameFromKey(key queue.PlacementKey) (string, string, error) { - keyStr := string(key) + return ExtractNamespaceNameFromKeyStr(string(key)) +} + +// ExtractNamespaceNameFromKeyStr resolves a PlacementKey string to a (namespace, name) tuple of the placement object. +func ExtractNamespaceNameFromKeyStr(keyStr string) (string, string, error) { // Check if the key contains a namespace separator if strings.Contains(keyStr, namespaceSeparator) { // This is a namespaced ResourcePlacement diff --git a/test/scheduler/actuals_test.go b/test/scheduler/actuals_test.go index 37210c24f..bb7db1c69 100644 --- a/test/scheduler/actuals_test.go +++ b/test/scheduler/actuals_test.go @@ -25,28 +25,24 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) // This file features common actuals (and utilities for generating actuals) in the test suites. -func noBindingsCreatedForCRPActual(crpName string) func() error { +func noBindingsCreatedForPlacementActual(placementKey string) func() error { return func() error { - // List all bindings associated with the given CRP. - bindingList := &placementv1beta1.ClusterResourceBindingList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) - listOptions := &client.ListOptions{LabelSelector: labelSelector} - if err := hubClient.List(ctx, bindingList, listOptions); err != nil { - return err + bindingList, err := listBindings(placementKey) + if err != nil { + return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) } // Check that the returned list is empty. - if bindingCount := len(bindingList.Items); bindingCount != 0 { + if bindingCount := len(bindingList.GetBindingObjs()); bindingCount != 0 { return fmt.Errorf("%d bindings have been created unexpectedly", bindingCount) } @@ -54,16 +50,28 @@ func noBindingsCreatedForCRPActual(crpName string) func() error { } } -func crpSchedulerFinalizerAddedActual(crpName string) func() error { +func placementSchedulerFinalizerAddedActual(placementKey string) func() error { return func() error { - // Retrieve the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + namespace, placementName, err := controller.ExtractNamespaceNameFromKeyStr(placementKey) + if err != nil { + return fmt.Errorf("failed to extract namespace and name from placement key %s: %w", placementKey, err) + } + + // Retrieve the placement. + var placement placementv1beta1.PlacementObj + if namespace == "" { + // Retrieve CRP. + placement = &placementv1beta1.ClusterResourcePlacement{} + } else { + // Retrieve RP. + placement = &placementv1beta1.ResourcePlacement{} + } + if err := hubClient.Get(ctx, types.NamespacedName{Name: placementName, Namespace: namespace}, placement); err != nil { return err } // Check that the scheduler finalizer has been added. - if !controllerutil.ContainsFinalizer(crp, placementv1beta1.SchedulerCleanupFinalizer) { + if !controllerutil.ContainsFinalizer(placement, placementv1beta1.SchedulerCleanupFinalizer) { return fmt.Errorf("scheduler cleanup finalizer has not been added") } @@ -71,16 +79,28 @@ func crpSchedulerFinalizerAddedActual(crpName string) func() error { } } -func crpSchedulerFinalizerRemovedActual(crpName string) func() error { +func placementSchedulerFinalizerRemovedActual(placementKey string) func() error { return func() error { - // Retrieve the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + namespace, placementName, err := controller.ExtractNamespaceNameFromKeyStr(placementKey) + if err != nil { + return fmt.Errorf("failed to extract namespace and name from placement key %s: %w", placementKey, err) + } + + // Retrieve the placement. + var placement placementv1beta1.PlacementObj + if namespace == "" { + // Retrieve CRP. + placement = &placementv1beta1.ClusterResourcePlacement{} + } else { + // Retrieve RP. + placement = &placementv1beta1.ResourcePlacement{} + } + if err := hubClient.Get(ctx, types.NamespacedName{Name: placementName, Namespace: namespace}, placement); err != nil { return err } - // Check that the scheduler finalizer has been added. - if controllerutil.ContainsFinalizer(crp, placementv1beta1.SchedulerCleanupFinalizer) { + // Check that the scheduler finalizer has been removed. + if controllerutil.ContainsFinalizer(placement, placementv1beta1.SchedulerCleanupFinalizer) { return fmt.Errorf("scheduler cleanup finalizer is still present") } @@ -88,50 +108,77 @@ func crpSchedulerFinalizerRemovedActual(crpName string) func() error { } } -func scheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, crpName, policySnapshotName string) func() error { +func scheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, placementKey, policySnapshotName string) func() error { return func() error { - // List all bindings. - bindingList := &placementv1beta1.ClusterResourceBindingList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) - listOptions := &client.ListOptions{LabelSelector: labelSelector} - if err := hubClient.List(ctx, bindingList, listOptions); err != nil { - return err + namespace, placementName, err := controller.ExtractNamespaceNameFromKeyStr(placementKey) + if err != nil { + return fmt.Errorf("failed to extract namespace and name from placement key %s: %w", placementKey, err) } + bindingList, err := listBindings(placementKey) + if err != nil { + return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) + } // Find all the scheduled bindings. - scheduled := []placementv1beta1.ClusterResourceBinding{} + scheduled := []placementv1beta1.BindingObj{} clusterMap := make(map[string]bool) for _, name := range clusters { clusterMap[name] = true } - for _, binding := range bindingList.Items { - if _, ok := clusterMap[binding.Spec.TargetCluster]; ok && binding.Spec.State == placementv1beta1.BindingStateScheduled { + for _, binding := range bindingList.GetBindingObjs() { + if _, ok := clusterMap[binding.GetBindingSpec().TargetCluster]; ok && binding.GetBindingSpec().State == placementv1beta1.BindingStateScheduled { scheduled = append(scheduled, binding) } } // Verify that scheduled bindings are created as expected. - wantScheduled := []placementv1beta1.ClusterResourceBinding{} + wantScheduled := []placementv1beta1.BindingObj{} for _, name := range clusters { score := scoreByCluster[name] - binding := placementv1beta1.ClusterResourceBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: bindingNamePlaceholder, - Labels: map[string]string{ - placementv1beta1.PlacementTrackingLabel: crpName, + var binding placementv1beta1.BindingObj + if namespace == "" { + // Create CRB. + binding = &placementv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: bindingNamePlaceholder, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: placementName, + }, + Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, + }, + Spec: placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateScheduled, + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: name, + ClusterDecision: placementv1beta1.ClusterDecision{ + ClusterName: name, + Selected: true, + ClusterScore: score, + }, + }, + } + } else { + // Create RB. + binding = &placementv1beta1.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: bindingNamePlaceholder, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: placementName, + }, + Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, }, - Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, - }, - Spec: placementv1beta1.ResourceBindingSpec{ - State: placementv1beta1.BindingStateScheduled, - SchedulingPolicySnapshotName: policySnapshotName, - TargetCluster: name, - ClusterDecision: placementv1beta1.ClusterDecision{ - ClusterName: name, - Selected: true, - ClusterScore: score, + Spec: placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateScheduled, + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: name, + ClusterDecision: placementv1beta1.ClusterDecision{ + ClusterName: name, + Selected: true, + ClusterScore: score, + }, }, - }, + } } wantScheduled = append(wantScheduled, binding) } @@ -141,10 +188,10 @@ func scheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, score } // Verify that binding names are formatted correctly. - for _, binding := range bindingList.Items { - wantPrefix := fmt.Sprintf("%s-%s", crpName, binding.Spec.TargetCluster) - if !strings.HasPrefix(binding.Name, wantPrefix) { - return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.Name, wantPrefix) + for _, binding := range bindingList.GetBindingObjs() { + wantPrefix := fmt.Sprintf("%s-%s", placementName, binding.GetBindingSpec().TargetCluster) + if !strings.HasPrefix(binding.GetName(), wantPrefix) { + return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.GetName(), wantPrefix) } } @@ -152,49 +199,78 @@ func scheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, score } } -func boundBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, crpName, policySnapshotName string) func() error { +func boundBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, placementKey, policySnapshotName string) func() error { return func() error { - bindingList := &placementv1beta1.ClusterResourceBindingList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) - listOptions := &client.ListOptions{LabelSelector: labelSelector} - if err := hubClient.List(ctx, bindingList, listOptions); err != nil { - return err + namespace, placementName, err := controller.ExtractNamespaceNameFromKeyStr(placementKey) + if err != nil { + return fmt.Errorf("failed to extract namespace and name from placement key %s: %w", placementKey, err) + } + bindingList, err := listBindings(placementKey) + if err != nil { + return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) } - bound := []placementv1beta1.ClusterResourceBinding{} + bound := []placementv1beta1.BindingObj{} clusterMap := make(map[string]bool) for _, name := range clusters { clusterMap[name] = true } - for _, binding := range bindingList.Items { - if _, ok := clusterMap[binding.Spec.TargetCluster]; ok && binding.Spec.State == placementv1beta1.BindingStateBound { + for _, binding := range bindingList.GetBindingObjs() { + if _, ok := clusterMap[binding.GetBindingSpec().TargetCluster]; ok && binding.GetBindingSpec().State == placementv1beta1.BindingStateBound { bound = append(bound, binding) } } - wantBound := []placementv1beta1.ClusterResourceBinding{} - for _, name := range clusters { - score := scoreByCluster[name] - binding := placementv1beta1.ClusterResourceBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: bindingNamePlaceholder, - Labels: map[string]string{ - placementv1beta1.PlacementTrackingLabel: crpName, + wantBound := []placementv1beta1.BindingObj{} + if namespace == "" { + for _, name := range clusters { + score := scoreByCluster[name] + binding := &placementv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: bindingNamePlaceholder, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: placementName, + }, + Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, }, - Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, - }, - Spec: placementv1beta1.ResourceBindingSpec{ - State: placementv1beta1.BindingStateBound, - SchedulingPolicySnapshotName: policySnapshotName, - TargetCluster: name, - ClusterDecision: placementv1beta1.ClusterDecision{ - ClusterName: name, - Selected: true, - ClusterScore: score, + Spec: placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateBound, + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: name, + ClusterDecision: placementv1beta1.ClusterDecision{ + ClusterName: name, + Selected: true, + ClusterScore: score, + }, }, - }, + } + wantBound = append(wantBound, binding) + } + } else { + for _, name := range clusters { + score := scoreByCluster[name] + binding := &placementv1beta1.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: bindingNamePlaceholder, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: placementName, + }, + Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, + }, + Spec: placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateBound, + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: name, + ClusterDecision: placementv1beta1.ClusterDecision{ + ClusterName: name, + Selected: true, + ClusterScore: score, + }, + }, + } + wantBound = append(wantBound, binding) } - wantBound = append(wantBound, binding) } if diff := cmp.Diff(bound, wantBound, ignoreResourceBindingFields...); diff != "" { @@ -202,10 +278,10 @@ func boundBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCl } // Verify that binding names are formatted correctly. - for _, binding := range bindingList.Items { - wantPrefix := fmt.Sprintf("%s-%s", crpName, binding.Spec.TargetCluster) - if !strings.HasPrefix(binding.Name, wantPrefix) { - return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.Name, wantPrefix) + for _, binding := range bindingList.GetBindingObjs() { + wantPrefix := fmt.Sprintf("%s-%s", placementName, binding.GetBindingSpec().TargetCluster) + if !strings.HasPrefix(binding.GetName(), wantPrefix) { + return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.GetName(), wantPrefix) } } @@ -213,49 +289,78 @@ func boundBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCl } } -func unscheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, crpName string, policySnapshotName string) func() error { +func unscheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, placementKey string, policySnapshotName string) func() error { return func() error { - bindingList := &placementv1beta1.ClusterResourceBindingList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) - listOptions := &client.ListOptions{LabelSelector: labelSelector} - if err := hubClient.List(ctx, bindingList, listOptions); err != nil { - return err + namespace, placementName, err := controller.ExtractNamespaceNameFromKeyStr(placementKey) + if err != nil { + return fmt.Errorf("failed to extract namespace and name from placement key %s: %w", placementKey, err) + } + bindingList, err := listBindings(placementKey) + if err != nil { + return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) } - unscheduled := []placementv1beta1.ClusterResourceBinding{} + unscheduled := []placementv1beta1.BindingObj{} clusterMap := make(map[string]bool) for _, name := range clusters { clusterMap[name] = true } - for _, binding := range bindingList.Items { - if _, ok := clusterMap[binding.Spec.TargetCluster]; ok && binding.Spec.State == placementv1beta1.BindingStateUnscheduled { + for _, binding := range bindingList.GetBindingObjs() { + if _, ok := clusterMap[binding.GetBindingSpec().TargetCluster]; ok && binding.GetBindingSpec().State == placementv1beta1.BindingStateUnscheduled { unscheduled = append(unscheduled, binding) } } // TODO (rzhang): fix me, compare the annotations when we know its previous state - wantUnscheduled := []placementv1beta1.ClusterResourceBinding{} - for _, name := range clusters { - score := scoreByCluster[name] - binding := placementv1beta1.ClusterResourceBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: bindingNamePlaceholder, - Labels: map[string]string{ - placementv1beta1.PlacementTrackingLabel: crpName, + wantUnscheduled := []placementv1beta1.BindingObj{} + if namespace == "" { + for _, name := range clusters { + score := scoreByCluster[name] + binding := &placementv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: bindingNamePlaceholder, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: placementName, + }, + Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, + }, + Spec: placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateUnscheduled, + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: name, + ClusterDecision: placementv1beta1.ClusterDecision{ + ClusterName: name, + Selected: true, + ClusterScore: score, + }, }, - Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, - }, - Spec: placementv1beta1.ResourceBindingSpec{ - State: placementv1beta1.BindingStateUnscheduled, - SchedulingPolicySnapshotName: policySnapshotName, - TargetCluster: name, - ClusterDecision: placementv1beta1.ClusterDecision{ - ClusterName: name, - Selected: true, - ClusterScore: score, + } + wantUnscheduled = append(wantUnscheduled, binding) + } + } else { + for _, name := range clusters { + score := scoreByCluster[name] + binding := &placementv1beta1.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: bindingNamePlaceholder, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: placementName, + }, + Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, + }, + Spec: placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateUnscheduled, + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: name, + ClusterDecision: placementv1beta1.ClusterDecision{ + ClusterName: name, + Selected: true, + ClusterScore: score, + }, }, - }, + } + wantUnscheduled = append(wantUnscheduled, binding) } - wantUnscheduled = append(wantUnscheduled, binding) } if diff := cmp.Diff(unscheduled, wantUnscheduled, ignoreResourceBindingFields...); diff != "" { @@ -263,10 +368,10 @@ func unscheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, sco } // Verify that binding names are formatted correctly. - for _, binding := range bindingList.Items { - wantPrefix := fmt.Sprintf("%s-%s", crpName, binding.Spec.TargetCluster) - if !strings.HasPrefix(binding.Name, wantPrefix) { - return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.Name, wantPrefix) + for _, binding := range bindingList.GetBindingObjs() { + wantPrefix := fmt.Sprintf("%s-%s", placementName, binding.GetBindingSpec().TargetCluster) + if !strings.HasPrefix(binding.GetName(), wantPrefix) { + return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.GetName(), wantPrefix) } } @@ -274,7 +379,7 @@ func unscheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, sco } } -func noBindingsCreatedForClustersActual(clusters []string, crpName string) func() error { +func noBindingsCreatedForClustersActual(clusters []string, placementKey string) func() error { // Build a map for clusters for quicker lookup. clusterMap := map[string]bool{} for _, name := range clusters { @@ -282,17 +387,16 @@ func noBindingsCreatedForClustersActual(clusters []string, crpName string) func( } return func() error { - bindingList := &placementv1beta1.ClusterResourceBindingList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) - listOptions := &client.ListOptions{LabelSelector: labelSelector} - if err := hubClient.List(ctx, bindingList, listOptions); err != nil { - return err + // List all bindings. + bindingList, err := listBindings(placementKey) + if err != nil { + return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) } - bindings := bindingList.Items + bindings := bindingList.GetBindingObjs() for _, binding := range bindings { - if _, ok := clusterMap[binding.Spec.TargetCluster]; ok { - return fmt.Errorf("binding %s for cluster %s has been created unexpectedly", binding.Name, binding.Spec.TargetCluster) + if _, ok := clusterMap[binding.GetBindingSpec().TargetCluster]; ok { + return fmt.Errorf("binding %s for cluster %s has been created unexpectedly", binding.GetName(), binding.GetBindingSpec().TargetCluster) } } @@ -300,18 +404,18 @@ func noBindingsCreatedForClustersActual(clusters []string, crpName string) func( } } -func pickFixedPolicySnapshotStatusUpdatedActual(valid, invalidOrNotFound []string, policySnapshotName string) func() error { +func pickFixedPolicySnapshotStatusUpdatedActual(valid, invalidOrNotFound []string, policySnapshotKey string) func() error { return func() error { - policySnapshot := &placementv1beta1.ClusterSchedulingPolicySnapshot{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: policySnapshotName}, policySnapshot); err != nil { - return err + policySnapshot, err := getSchedulingPolicySnapshot(policySnapshotKey) + if err != nil { + return fmt.Errorf("failed to get policy snapshot %s: %w", policySnapshotKey, err) } - // Verify that the observed CRP generation field is populated correctly. - wantCRPGeneration := policySnapshot.Annotations[placementv1beta1.CRPGenerationAnnotation] - observedCRPGeneration := policySnapshot.Status.ObservedCRPGeneration - if strconv.FormatInt(observedCRPGeneration, 10) != wantCRPGeneration { - return fmt.Errorf("policy snapshot observed CRP generation not match: want %s, got %d", wantCRPGeneration, observedCRPGeneration) + // Verify that the observed RP generation field is populated correctly. + wantRPGeneration := policySnapshot.GetAnnotations()[placementv1beta1.CRPGenerationAnnotation] + observedRPGeneration := policySnapshot.GetPolicySnapshotStatus().ObservedCRPGeneration + if strconv.FormatInt(observedRPGeneration, 10) != wantRPGeneration { + return fmt.Errorf("policy snapshot observed RP generation not match: want %s, got %d", wantRPGeneration, observedRPGeneration) } // Verify that cluster decisions are populated correctly. @@ -328,24 +432,24 @@ func pickFixedPolicySnapshotStatusUpdatedActual(valid, invalidOrNotFound []strin Selected: false, }) } - if diff := cmp.Diff(policySnapshot.Status.ClusterDecisions, wantClusterDecisions, ignoreClusterDecisionReasonField, cmpopts.SortSlices(lessFuncClusterDecision)); diff != "" { + if diff := cmp.Diff(policySnapshot.GetPolicySnapshotStatus().ClusterDecisions, wantClusterDecisions, ignoreClusterDecisionReasonField, cmpopts.SortSlices(lessFuncClusterDecision)); diff != "" { return fmt.Errorf("policy snapshot status cluster decisions (-got, +want): %s", diff) } // Verify that the scheduled condition is added correctly. - scheduledCondition := meta.FindStatusCondition(policySnapshot.Status.Conditions, string(placementv1beta1.PolicySnapshotScheduled)) + scheduledCondition := meta.FindStatusCondition(policySnapshot.GetPolicySnapshotStatus().Conditions, string(placementv1beta1.PolicySnapshotScheduled)) var wantScheduledCondition *metav1.Condition if len(invalidOrNotFound) == 0 { wantScheduledCondition = &metav1.Condition{ Type: string(placementv1beta1.PolicySnapshotScheduled), Status: metav1.ConditionTrue, - ObservedGeneration: policySnapshot.Generation, + ObservedGeneration: policySnapshot.GetGeneration(), } } else { wantScheduledCondition = &metav1.Condition{ Type: string(placementv1beta1.PolicySnapshotScheduled), Status: metav1.ConditionFalse, - ObservedGeneration: policySnapshot.Generation, + ObservedGeneration: policySnapshot.GetGeneration(), } } if diff := cmp.Diff(scheduledCondition, wantScheduledCondition, ignoreConditionTimeReasonAndMessageFields); diff != "" { @@ -356,18 +460,18 @@ func pickFixedPolicySnapshotStatusUpdatedActual(valid, invalidOrNotFound []strin } } -func pickAllPolicySnapshotStatusUpdatedActual(scored, filtered []string, policySnapshotName string) func() error { +func pickAllPolicySnapshotStatusUpdatedActual(scored, filtered []string, policySnapshotKey string) func() error { return func() error { - policySnapshot := &placementv1beta1.ClusterSchedulingPolicySnapshot{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: policySnapshotName}, policySnapshot); err != nil { - return err + policySnapshot, err := getSchedulingPolicySnapshot(policySnapshotKey) + if err != nil { + return fmt.Errorf("failed to get policy snapshot %s: %w", policySnapshotKey, err) } - // Verify that the observed CRP generation field is populated correctly. - wantCRPGeneration := policySnapshot.Annotations[placementv1beta1.CRPGenerationAnnotation] - observedCRPGeneration := policySnapshot.Status.ObservedCRPGeneration - if strconv.FormatInt(observedCRPGeneration, 10) != wantCRPGeneration { - return fmt.Errorf("policy snapshot observed CRP generation not match: want %s, got %d", wantCRPGeneration, observedCRPGeneration) + // Verify that the observed RP generation field is populated correctly. + wantRPGeneration := policySnapshot.GetAnnotations()[placementv1beta1.CRPGenerationAnnotation] + observedRPGeneration := policySnapshot.GetPolicySnapshotStatus().ObservedCRPGeneration + if strconv.FormatInt(observedRPGeneration, 10) != wantRPGeneration { + return fmt.Errorf("policy snapshot observed RP generation not match: want %s, got %d", wantRPGeneration, observedRPGeneration) } // Verify that cluster decisions are populated correctly. @@ -385,16 +489,16 @@ func pickAllPolicySnapshotStatusUpdatedActual(scored, filtered []string, policyS Selected: false, }) } - if diff := cmp.Diff(policySnapshot.Status.ClusterDecisions, wantClusterDecisions, ignoreClusterDecisionReasonField, cmpopts.SortSlices(lessFuncClusterDecision)); diff != "" { + if diff := cmp.Diff(policySnapshot.GetPolicySnapshotStatus().ClusterDecisions, wantClusterDecisions, ignoreClusterDecisionReasonField, cmpopts.SortSlices(lessFuncClusterDecision)); diff != "" { return fmt.Errorf("policy snapshot status cluster decisions (-got, +want): %s", diff) } // Verify that the scheduled condition is added correctly. - scheduledCondition := meta.FindStatusCondition(policySnapshot.Status.Conditions, string(placementv1beta1.PolicySnapshotScheduled)) + scheduledCondition := meta.FindStatusCondition(policySnapshot.GetPolicySnapshotStatus().Conditions, string(placementv1beta1.PolicySnapshotScheduled)) wantScheduledCondition := &metav1.Condition{ Type: string(placementv1beta1.PolicySnapshotScheduled), Status: metav1.ConditionTrue, - ObservedGeneration: policySnapshot.Generation, + ObservedGeneration: policySnapshot.GetGeneration(), } if diff := cmp.Diff(scheduledCondition, wantScheduledCondition, ignoreConditionTimeReasonAndMessageFields); diff != "" { @@ -405,30 +509,28 @@ func pickAllPolicySnapshotStatusUpdatedActual(scored, filtered []string, policyS } } -func hasNScheduledOrBoundBindingsPresentActual(crpName string, clusters []string) func() error { +func hasNScheduledOrBoundBindingsPresentActual(placementKey string, clusters []string) func() error { clusterMap := make(map[string]bool) for _, name := range clusters { clusterMap[name] = true } return func() error { - bindingList := &placementv1beta1.ClusterResourceBindingList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) - listOptions := &client.ListOptions{LabelSelector: labelSelector} - if err := hubClient.List(ctx, bindingList, listOptions); err != nil { - return err + bindingList, err := listBindings(placementKey) + if err != nil { + return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) } matchedScheduledOrBoundBindingCount := 0 - for _, binding := range bindingList.Items { + for _, binding := range bindingList.GetBindingObjs() { // A match is found iff the binding is of the scheduled or bound state, and its // target cluster is in the given list. // // We do not simply check against the state here as there exists a rare case where // the system might be in an in-between state and happen to have just the enough // number of bindings (though not the wanted ones). - _, matched := clusterMap[binding.Spec.TargetCluster] - if (binding.Spec.State == placementv1beta1.BindingStateBound || binding.Spec.State == placementv1beta1.BindingStateScheduled) && matched { + _, matched := clusterMap[binding.GetBindingSpec().TargetCluster] + if (binding.GetBindingSpec().State == placementv1beta1.BindingStateBound || binding.GetBindingSpec().State == placementv1beta1.BindingStateScheduled) && matched { matchedScheduledOrBoundBindingCount++ } } @@ -445,20 +547,20 @@ func pickNPolicySnapshotStatusUpdatedActual( numOfClusters int, picked, notPicked, filtered []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, - policySnapshotName string, + policySnapshotKey string, opts []cmp.Option, ) func() error { return func() error { - policySnapshot := &placementv1beta1.ClusterSchedulingPolicySnapshot{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: policySnapshotName}, policySnapshot); err != nil { - return err + policySnapshot, err := getSchedulingPolicySnapshot(policySnapshotKey) + if err != nil { + return fmt.Errorf("failed to get policy snapshot %s: %w", policySnapshotKey, err) } - // Verify that the observed CRP generation field is populated correctly. - wantCRPGeneration := policySnapshot.Annotations[placementv1beta1.CRPGenerationAnnotation] - observedCRPGeneration := policySnapshot.Status.ObservedCRPGeneration - if strconv.FormatInt(observedCRPGeneration, 10) != wantCRPGeneration { - return fmt.Errorf("policy snapshot observed CRP generation not match: want %s, got %d", wantCRPGeneration, observedCRPGeneration) + // Verify that the observed RP generation field is populated correctly. + wantRPGeneration := policySnapshot.GetAnnotations()[placementv1beta1.CRPGenerationAnnotation] + observedRPGeneration := policySnapshot.GetPolicySnapshotStatus().ObservedCRPGeneration + if strconv.FormatInt(observedRPGeneration, 10) != wantRPGeneration { + return fmt.Errorf("policy snapshot observed RP generation not match: want %s, got %d", wantRPGeneration, observedRPGeneration) } // Verify that cluster decisions are populated correctly. @@ -484,24 +586,24 @@ func pickNPolicySnapshotStatusUpdatedActual( }) } if diff := cmp.Diff( - policySnapshot.Status.ClusterDecisions, wantClusterDecisions, + policySnapshot.GetPolicySnapshotStatus().ClusterDecisions, wantClusterDecisions, opts..., ); diff != "" { return fmt.Errorf("policy snapshot status cluster decisions (-got, +want): %s", diff) } // Verify that the scheduled condition is added correctly. - scheduledCondition := meta.FindStatusCondition(policySnapshot.Status.Conditions, string(placementv1beta1.PolicySnapshotScheduled)) + scheduledCondition := meta.FindStatusCondition(policySnapshot.GetPolicySnapshotStatus().Conditions, string(placementv1beta1.PolicySnapshotScheduled)) wantScheduledCondition := &metav1.Condition{ Type: string(placementv1beta1.PolicySnapshotScheduled), Status: metav1.ConditionTrue, - ObservedGeneration: policySnapshot.Generation, + ObservedGeneration: policySnapshot.GetGeneration(), } if len(picked) != numOfClusters { wantScheduledCondition = &metav1.Condition{ Type: string(placementv1beta1.PolicySnapshotScheduled), Status: metav1.ConditionFalse, - ObservedGeneration: policySnapshot.Generation, + ObservedGeneration: policySnapshot.GetGeneration(), } } diff --git a/test/scheduler/pickall_integration_test.go b/test/scheduler/pickall_integration_test.go index b144f436a..7d71e5998 100644 --- a/test/scheduler/pickall_integration_test.go +++ b/test/scheduler/pickall_integration_test.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { @@ -36,7 +37,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot. @@ -44,7 +45,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -68,7 +69,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -91,7 +92,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot. @@ -112,7 +113,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newUnhealthyMemberClusterName) @@ -138,7 +139,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot. @@ -167,7 +168,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newUnhealthyMemberClusterName) @@ -182,7 +183,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -190,7 +191,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -214,7 +215,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -238,7 +239,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -271,7 +272,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -295,7 +296,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -319,7 +320,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -359,7 +360,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -383,7 +384,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -419,7 +420,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -495,7 +496,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -525,7 +526,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -547,7 +548,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") policy := &placementv1beta1.PlacementPolicy{ @@ -587,7 +588,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -605,7 +606,452 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) + }) + }) +}) + +var _ = Describe("scheduling RPs of the PickAll placement type", func() { + Context("pick all valid clusters", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, nil) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all healthy clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for unhealthy clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unhealthyClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(healthyClusters, unhealthyClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick clusters with specific affinities (single term, multiple selectors)", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + wantTargetClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster6WestProd, + } + wantIgnoredClusters := []string{ + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "prod", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"east", "west"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick clusters with specific affinities (multiple terms, single selector)", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + wantTargetClusters := []string{ + memberCluster3EastCanary, + memberCluster6WestProd, + memberCluster7WestCanary, + } + wantIgnoredClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "canary", + }, + }, + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{ + "east", + "central", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("affinities updated", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) + policySnapshotKey2 := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName2) + + wantTargetClusters1 := []string{ + memberCluster3EastCanary, + memberCluster7WestCanary, + } + wantTargetClusters2 := []string{ + memberCluster3EastCanary, + memberCluster6WestProd, + memberCluster7WestCanary, + } + wantIgnoredClusters2 := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + boundClusters := []string{ + memberCluster3EastCanary, + } + scheduledClusters := []string{ + memberCluster6WestProd, + memberCluster7WestCanary, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "canary", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + "east", + "west", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName1, policy) + + // Verify that bindings have been created as expected. + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters1, zeroScoreByCluster, rpKey, policySnapshotName1) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + + // Bind some bindings. + markBindingsAsBoundForClusters(rpKey, boundClusters) + + // Update the CRP with a new affinity. + affinity := &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "canary", + }, + }, + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{ + "east", + "central", + }, + }, + }, + }, + }, + }, + }, + }, + } + updatePickAllRPWithNewAffinity(testNamespace, rpName, affinity, policySnapshotName1, policySnapshotName2) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create/update scheduled bindings for newly matched clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(scheduledClusters, zeroScoreByCluster, rpKey, policySnapshotName2) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should update bound bindings for newly matched clusters", func() { + boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(boundClusters, zeroScoreByCluster, rpKey, policySnapshotName2) + Eventually(boundBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + Consistently(boundBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters2, wantIgnoredClusters2, policySnapshotKey2) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("no matching clusters", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + wantIgnoredClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "wonderland", + }, + }, + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{ + "east", + "central", + "west", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual([]string{}, wantIgnoredClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) }) }) }) diff --git a/test/scheduler/pickfixed_integration_test.go b/test/scheduler/pickfixed_integration_test.go index c6aa4a6b0..8a8928845 100644 --- a/test/scheduler/pickfixed_integration_test.go +++ b/test/scheduler/pickfixed_integration_test.go @@ -22,6 +22,7 @@ package tests import ( "fmt" + "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -40,7 +41,7 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create the CRP and its associated policy snapshot. @@ -48,7 +49,7 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -65,7 +66,7 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { }) AfterAll(func() { - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -99,7 +100,7 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create the CRP and its associated policy snapshot. @@ -107,7 +108,7 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -130,7 +131,7 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { }) AfterAll(func() { - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -166,7 +167,7 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create the CRP and its associated policy snapshot. @@ -209,7 +210,7 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { }) AfterAll(func() { - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -244,7 +245,7 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create the CRP and its associated policy snapshot. @@ -281,7 +282,275 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { }) AfterAll(func() { - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) + }) + }) +}) + +var _ = Describe("scheduling RPs of the PickFixed placement type", func() { + Context("with valid target clusters", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + + targetClusters := []string{ + memberCluster1EastProd, + memberCluster4CentralProd, + memberCluster6WestProd, + } + + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create the RP and its associated policy snapshot. + createPickFixedRPWithPolicySnapshot(testNamespace, rpName, targetClusters, policySnapshotName) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for valid target clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters, nilScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters, []string{}, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") + }) + + AfterAll(func() { + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("with both valid and invalid/non-existent target clusters", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + + targetClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster8UnhealthyEastProd, // An invalid cluster (unhealthy). + memberCluster9LeftCentralProd, // An invalid cluster (left). + memberCluster10NonExistent, // A cluster that cannot be found in the fleet. + } + validClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + } + invalidClusters := []string{ + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + memberCluster10NonExistent, + } + + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create the RP and its associated policy snapshot. + createPickFixedRPWithPolicySnapshot(testNamespace, rpName, targetClusters, policySnapshotName) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for valid target clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(validClusters, nilScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create bindings for invalid target clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(invalidClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Created a binding for invalid or not found cluster") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Created a binding for invalid or not found cluster") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(validClusters, invalidClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update policy snapshot status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update policy snapshot status") + }) + + AfterAll(func() { + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("policy snapshot refresh with added clusters", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + + targetClusters1 := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + } + targetClusters2 := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + } + previouslyBoundClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + } + previouslyScheduledClusters := []string{ + memberCluster4CentralProd, + } + newScheduledClusters := []string{ + memberCluster5CentralProd, + memberCluster6WestProd, + } + + policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) + policySnapshotKey2 := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName2) + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create the RP and its associated policy snapshot. + createPickFixedRPWithPolicySnapshot(testNamespace, rpName, targetClusters1, policySnapshotName1) + + // Make sure that the bindings have been created. + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters1, nilScoreByCluster, rpKey, policySnapshotName1) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + + // Mark all previously created bindings as bound. + markBindingsAsBoundForClusters(rpKey, previouslyBoundClusters) + + // Update the CRP with new target clusters and refresh scheduling policy snapshots. + updatePickFixedRPWithNewTargetClustersAndRefreshSnapshots(testNamespace, rpName, targetClusters2, policySnapshotName1, policySnapshotName2) + }) + + It("should create scheduled bindings for newly added valid target clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(newScheduledClusters, nilScoreByCluster, rpKey, policySnapshotName2) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should update bound bindings for previously added valid target clusters", func() { + boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(previouslyBoundClusters, nilScoreByCluster, rpKey, policySnapshotName2) + Eventually(boundBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + Consistently(boundBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + }) + + It("should update scheduled bindings for previously added valid target clusters", func() { + scheduledBindingsUpdatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(previouslyScheduledClusters, nilScoreByCluster, rpKey, policySnapshotName2) + Eventually(scheduledBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + Consistently(scheduledBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters2, []string{}, policySnapshotKey2) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update policy snapshot status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update policy snapshot status") + }) + + AfterAll(func() { + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("policy snapshot refresh with removed clusters", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + + targetClusters1 := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + } + targetClusters2 := []string{ + memberCluster5CentralProd, + memberCluster6WestProd, + } + previouslyBoundClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + } + scheduledClusters := []string{ + memberCluster5CentralProd, + memberCluster6WestProd, + } + unscheduledClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + } + + policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) + policySnapshotKey2 := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName2) + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create the RP and its associated policy snapshot. + createPickFixedRPWithPolicySnapshot(testNamespace, rpName, targetClusters1, policySnapshotName1) + + // Make sure that the bindings have been created. + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters1, nilScoreByCluster, rpKey, policySnapshotName1) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + + // Mark some previously created bindings as bound. + markBindingsAsBoundForClusters(rpKey, previouslyBoundClusters) + + // Update the RP with new target clusters and refresh scheduling policy snapshots. + updatePickFixedRPWithNewTargetClustersAndRefreshSnapshots(testNamespace, rpName, targetClusters2, policySnapshotName1, policySnapshotName2) + }) + + It("should create scheduled bindings for newly added valid target clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(scheduledClusters, nilScoreByCluster, rpKey, policySnapshotName2) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should mark bindings as unscheduled for removed target clusters", func() { + unscheduledBindingsCreatedActual := unscheduledBindingsCreatedOrUpdatedForClustersActual(unscheduledClusters, nilScoreByCluster, rpKey, policySnapshotName1) + Eventually(unscheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to mark bindings as unscheduled") + Consistently(unscheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to mark bindings as unscheduled") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(scheduledClusters, []string{}, policySnapshotKey2) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update policy snapshot status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update policy snapshot status") + }) + + AfterAll(func() { + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) }) }) }) diff --git a/test/scheduler/pickn_integration_test.go b/test/scheduler/pickn_integration_test.go index 247206008..28f823208 100644 --- a/test/scheduler/pickn_integration_test.go +++ b/test/scheduler/pickn_integration_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/utils/ptr" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) var ( @@ -70,7 +71,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -82,7 +83,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -106,7 +107,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -135,7 +136,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -147,7 +148,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -171,7 +172,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -183,7 +184,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -195,7 +196,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -213,7 +214,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -239,7 +240,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -275,7 +276,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -299,7 +300,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -326,7 +327,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -378,7 +379,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -402,7 +403,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -446,7 +447,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -483,7 +484,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -507,7 +508,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -554,7 +555,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -610,7 +611,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -634,7 +635,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -688,7 +689,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -707,7 +708,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -731,7 +732,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -811,7 +812,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -835,7 +836,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -859,7 +860,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -916,7 +917,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -935,7 +936,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -959,7 +960,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -1048,7 +1049,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1072,7 +1073,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -1096,7 +1097,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -1151,7 +1152,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1191,7 +1192,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -1215,7 +1216,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -1296,7 +1297,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1331,7 +1332,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -1355,7 +1356,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -1426,7 +1427,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1477,7 +1478,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -1501,7 +1502,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -1537,7 +1538,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1574,7 +1575,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -1598,7 +1599,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -1628,7 +1629,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1665,7 +1666,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -1689,7 +1690,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -1812,7 +1813,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1898,7 +1899,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -1922,7 +1923,489 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) + }) + }) +}) + +var _ = Describe("scheduling RPs of the PickN placement type", func() { + Context("pick N clusters with no affinities/topology spread constraints specified", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + numOfClusters := int32(3) // Less than the number of clusters available (7) in the fleet. + + // The scheduler is designed to produce only deterministic decisions; if there are no + // comparable scores available for selected clusters, the scheduler will rank the clusters + // by their names. + wantPickedClusters := []string{ + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster7WestCanary, + } + wantNotPickedClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster3EastCanary, + memberCluster4CentralProd, + } + wantFilteredClusters := []string{ + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickN placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + } + createPickNRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the CRP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, wantPickedClusters) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should create scheduled bindings for selected clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + }) + + It("should report status correctly", func() { + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, zeroScoreByCluster, policySnapshotKey, pickNCmpOpts) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("not enough clusters to pick", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + numOfClusters := int32(10) // More than the number of clusters available (7) in the fleet. + + // The scheduler is designed to produce only deterministic decisions; if there are no + // comparable scores available for selected clusters, the scheduler will rank the clusters + // by their names. + wantPickedClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster7WestCanary, + } + wantFilteredClusters := []string{ + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a CRP of the PickN placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + } + createPickNRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the CRP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, wantPickedClusters) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should create scheduled bindings for selected clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + }) + + It("should report status correctly", func() { + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotKey, pickNCmpOpts) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick 0 clusters", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + numOfClusters := int32(0) + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a CRP of the PickN placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + } + createPickNRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, []string{}) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should report status correctly", func() { + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), []string{}, []string{}, []string{}, zeroScoreByCluster, policySnapshotKey, pickNCmpOpts) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick with required affinity", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + numOfClusters := int32(2) + + wantPickedClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + } + wantFilteredClusters := []string{ + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickN placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabel: "east", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + "prod", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickNRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, wantPickedClusters) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should create scheduled bindings for selected clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + }) + + It("should report status correctly", func() { + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotKey, pickNCmpOpts) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick with required affinity, multiple terms", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + numOfClusters := int32(4) + + // Note that the number of matching clusters is less than the desired one. + wantPickedClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster7WestCanary, + } + wantFilteredClusters := []string{ + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickN placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabel: "east", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + "prod", + }, + }, + }, + }, + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabel: "west", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabel, + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{ + "prod", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickNRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, wantPickedClusters) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should create scheduled bindings for selected clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + }) + + It("should report status correctly", func() { + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotKey, pickNCmpOpts) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick with preferred affinity", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + numOfClusters := int32(4) + + wantPickedClusters := []string{ + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster7WestCanary, + memberCluster6WestProd, + } + wantNotPickedClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster3EastCanary, + } + wantFilteredClusters := []string{ + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + scoreByCluster := map[string]*placementv1beta1.ClusterScore{ + memberCluster1EastProd: &zeroScore, + memberCluster2EastProd: &zeroScore, + memberCluster3EastCanary: &zeroScore, + memberCluster4CentralProd: { + AffinityScore: ptr.To(int32(10)), + TopologySpreadScore: ptr.To(int32(0)), + }, + memberCluster5CentralProd: { + AffinityScore: ptr.To(int32(10)), + TopologySpreadScore: ptr.To(int32(0)), + }, + memberCluster6WestProd: &zeroScore, + memberCluster7WestCanary: &zeroScore, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickN placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []placementv1beta1.PreferredClusterSelector{ + { + Weight: 10, + Preference: placementv1beta1.ClusterSelectorTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabel: "central", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + "prod", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickNRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, wantPickedClusters) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should create scheduled bindings for selected clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + }) + + It("should report status correctly", func() { + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotKey, pickNCmpOpts) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) }) }) }) diff --git a/test/scheduler/property_based_scheduling_integration_test.go b/test/scheduler/property_based_scheduling_integration_test.go index 7564830b6..a3458023c 100644 --- a/test/scheduler/property_based_scheduling_integration_test.go +++ b/test/scheduler/property_based_scheduling_integration_test.go @@ -33,6 +33,7 @@ import ( clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/propertyprovider" + "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) const ( @@ -60,7 +61,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -113,7 +114,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -137,7 +138,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -161,7 +162,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -232,7 +233,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -256,7 +257,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -280,7 +281,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -317,7 +318,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -341,7 +342,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -365,7 +366,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -412,7 +413,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -436,7 +437,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -498,7 +499,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -537,7 +538,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -627,7 +628,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -649,7 +650,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -681,7 +682,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -699,7 +700,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -743,7 +744,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -775,7 +776,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -851,7 +852,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) // Reset the cluster properties. for idx := range wantTargetClusters2 { @@ -915,7 +916,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -942,7 +943,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -966,7 +967,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -1029,7 +1030,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -1056,7 +1057,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -1080,7 +1081,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -1135,7 +1136,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop } // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1162,7 +1163,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -1186,7 +1187,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) // Reset the cluster properties. for clusterName := range propertiesByCluster { @@ -1224,7 +1225,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1251,7 +1252,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -1275,7 +1276,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) // Reset the cluster properties. for clusterName := range propertiesByCluster { @@ -1343,7 +1344,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1388,7 +1389,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -1412,7 +1413,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -1460,7 +1461,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -1492,7 +1493,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -1516,7 +1517,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -1570,7 +1571,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -1616,7 +1617,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -1640,7 +1641,611 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) + }) + }) +}) + +var _ = Describe("scheduling RPs of the PickAll placement type using cluster properties", func() { + Context("pick clusters with specific properties (single term, multiple expressions)", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + wantTargetClusters := []string{ + memberCluster3EastCanary, + } + wantIgnoredClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "4", + }, + }, + { + Name: energyEfficiencyRatingPropertyName, + Operator: placementv1beta1.PropertySelectorLessThan, + Values: []string{ + "45", + }, + }, + { + Name: propertyprovider.AllocatableCPUCapacityProperty, + Operator: placementv1beta1.PropertySelectorNotEqualTo, + Values: []string{ + "14", + }, + }, + { + Name: propertyprovider.AvailableMemoryCapacityProperty, + Operator: placementv1beta1.PropertySelectorGreaterThan, + Values: []string{ + "4Gi", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick clusters with specific properties (multiple terms, single expression)", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + wantTargetClusters := []string{ + memberCluster1EastProd, + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster7WestCanary, + } + wantIgnoredClusters := []string{ + memberCluster2EastProd, + memberCluster6WestProd, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "8", + }, + }, + }, + }, + }, + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: energyEfficiencyRatingPropertyName, + Operator: placementv1beta1.PropertySelectorGreaterThan, + Values: []string{ + "99", + }, + }, + }, + }, + }, + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.TotalCPUCapacityProperty, + Operator: placementv1beta1.PropertySelectorEqualTo, + Values: []string{ + "12", + }, + }, + }, + }, + }, + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.TotalMemoryCapacityProperty, + Operator: placementv1beta1.PropertySelectorLessThanOrEqualTo, + Values: []string{ + "4Gi", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick clusters with both label and property selectors (single term)", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + wantTargetClusters := []string{ + memberCluster2EastProd, + memberCluster3EastCanary, + } + wantIgnoredClusters := []string{ + memberCluster1EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabel: "east", + }, + }, + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "4", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the CRP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("pick clusters with both label and property selectors (multiple terms)", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + + wantTargetClusters := []string{ + memberCluster5CentralProd, + memberCluster6WestProd, + } + wantIgnoredClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "prod", + regionLabel: "west", + }, + }, + }, + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: energyEfficiencyRatingPropertyName, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "40", + }, + }, + { + Name: propertyprovider.TotalCPUCapacityProperty, + Operator: placementv1beta1.PropertySelectorGreaterThan, + Values: []string{ + "12", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + Context("property selector updated", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey1 := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName1) + policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) + policySnapshotKey2 := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName2) + + // wantScheduledClusters1, wantIgnoredClusters1, and wantBoundClusters1 are + // the clusters picked (bound) and unpicked respectively with the original + // property selector (before the property selector update). + wantScheduledClusters1 := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster6WestProd, + } + wantIgnoredClusters1 := []string{ + memberCluster5CentralProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + wantBoundClusters1 := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + } + + // wantScheduledClusters2, wantIgnoredClusters2, and wantBoundClusters2 are + // the clusters picked (bound) and unpicked respectively with the new + // property selector (after the property selector update). + wantScheduledClusters2 := []string{ + memberCluster3EastCanary, + memberCluster5CentralProd, + memberCluster7WestCanary, + } + wantBoundClusters2 := []string{ + memberCluster2EastProd, + } + wantUnscheduledClusters2 := []string{ + memberCluster1EastProd, + memberCluster4CentralProd, + memberCluster6WestProd, + } + wantIgnoredClusters2 := []string{ + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + // wantTargetClusters and wantUnselectedClusters are the clusters picked + // and unpicked respectively after the property selector update. + wantTargetClusters := []string{} + wantTargetClusters = append(wantTargetClusters, wantScheduledClusters2...) + wantTargetClusters = append(wantTargetClusters, wantBoundClusters2...) + wantUnselectedClusters := []string{} + wantUnselectedClusters = append(wantUnselectedClusters, wantUnscheduledClusters2...) + wantUnselectedClusters = append(wantUnselectedClusters, wantIgnoredClusters2...) + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a RP of the PickAll placement type, along with its associated policy snapshot. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorLessThanOrEqualTo, + Values: []string{ + "6", + }, + }, + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "2", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName1, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantScheduledClusters1, zeroScoreByCluster, rpKey, policySnapshotName1) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters1, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantScheduledClusters1, wantIgnoredClusters1, policySnapshotKey1) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + It("can mark some bindings as bound", func() { + markBindingsAsBoundForClusters(rpKey, wantBoundClusters1) + }) + + It("can update the scheduling policy with a new property selector", func() { + affinity := &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorLessThanOrEqualTo, + Values: []string{ + "8", + }, + }, + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "4", + }, + }, + }, + }, + }, + }, + }, + }, + } + updatePickAllRPWithNewAffinity(testNamespace, rpName, affinity, policySnapshotName1, policySnapshotName2) + }) + + It("should create/update scheduled bindings for newly matched clusters", func() { + scheduledBindingsCreatedOrUpdatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantScheduledClusters2, zeroScoreByCluster, rpKey, policySnapshotName2) + Eventually(scheduledBindingsCreatedOrUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create/update the expected set of bindings") + Consistently(scheduledBindingsCreatedOrUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create/update the expected set of bindings") + }) + + It("should update bound bindings for newly matched clusters", func() { + boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(wantBoundClusters2, zeroScoreByCluster, rpKey, policySnapshotName2) + Eventually(boundBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + Consistently(boundBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should mark bindings as unscheduled for clusters that were unselected", func() { + unscheduledBindingsUpdatedActual := unscheduledBindingsCreatedOrUpdatedForClustersActual(wantUnscheduledClusters2, zeroScoreByCluster, rpKey, policySnapshotName1) + Eventually(unscheduledBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + Consistently(unscheduledBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantUnselectedClusters, policySnapshotKey2) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) }) }) }) diff --git a/test/scheduler/suite_test.go b/test/scheduler/suite_test.go index d22afb123..60e7202d9 100644 --- a/test/scheduler/suite_test.go +++ b/test/scheduler/suite_test.go @@ -524,6 +524,14 @@ func setupResources() { for clusterName := range propertiesByCluster { resetClusterPropertiesFor(clusterName) } + + // Create test namespace + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + } + Expect(hubClient.Create(ctx, namespace)).Should(Succeed()) } func beforeSuiteForProcess1() []byte { @@ -582,13 +590,27 @@ func beforeSuiteForProcess1() []byte { err = crpReconciler.SetupWithManagerForClusterResourcePlacement(ctrlMgr) Expect(err).NotTo(HaveOccurred(), "Failed to set up CRP watcher with controller manager") - policySnapshotWatcher := clusterschedulingpolicysnapshot.Reconciler{ + rpReconciler := clusterresourceplacement.Reconciler{ + Client: hubClient, + SchedulerWorkQueue: schedulerWorkQueue, + } + err = rpReconciler.SetupWithManagerForResourcePlacement(ctrlMgr) + Expect(err).NotTo(HaveOccurred(), "Failed to set up RP watcher with controller manager") + + clusterPolicySnapshotWatcher := clusterschedulingpolicysnapshot.Reconciler{ Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, } - err = policySnapshotWatcher.SetupWithManagerForClusterSchedulingPolicySnapshot(ctrlMgr) + err = clusterPolicySnapshotWatcher.SetupWithManagerForClusterSchedulingPolicySnapshot(ctrlMgr) Expect(err).NotTo(HaveOccurred(), "Failed to set up cluster policy snapshot watcher with controller manager") + policySnapshotWatcher := clusterschedulingpolicysnapshot.Reconciler{ + Client: hubClient, + SchedulerWorkQueue: schedulerWorkQueue, + } + err = policySnapshotWatcher.SetupWithManagerForSchedulingPolicySnapshot(ctrlMgr) + Expect(err).NotTo(HaveOccurred(), "Failed to set up policy snapshot watcher with controller manager") + memberClusterWatcher := membercluster.Reconciler{ Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, @@ -604,6 +626,13 @@ func beforeSuiteForProcess1() []byte { err = clusterResourceBindingWatcher.SetupWithManagerForClusterResourceBinding(ctrlMgr) Expect(err).NotTo(HaveOccurred(), "Failed to set up cluster resource binding watcher with controller manager") + resourceBindingWatcher := clusterresourcebinding.Reconciler{ + Client: hubClient, + SchedulerWorkQueue: schedulerWorkQueue, + } + err = resourceBindingWatcher.SetupWithManagerForResourceBinding(ctrlMgr) + Expect(err).NotTo(HaveOccurred(), "Failed to set up resource binding watcher with controller manager") + // Set up the scheduler. fw := buildSchedulerFramework(ctrlMgr, clusterEligibilityChecker) sched := scheduler.NewScheduler(defaultSchedulerName, fw, schedulerWorkQueue, ctrlMgr, 3) diff --git a/test/scheduler/tainttoleration_integration_test.go b/test/scheduler/tainttoleration_integration_test.go index 8fea0167f..a39e71439 100644 --- a/test/scheduler/tainttoleration_integration_test.go +++ b/test/scheduler/tainttoleration_integration_test.go @@ -30,6 +30,7 @@ import ( clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) var ( @@ -52,7 +53,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 4, 6 from all regions. @@ -63,7 +64,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -83,7 +84,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -97,7 +98,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 4, 7 from all regions. @@ -108,7 +109,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -134,7 +135,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -150,7 +151,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 4, 7 from all regions. @@ -161,7 +162,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -208,7 +209,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -223,7 +224,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 2, 6 from all regions. @@ -261,7 +262,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -287,7 +288,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -301,7 +302,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") policy := &placementv1beta1.PlacementPolicy{ @@ -312,7 +313,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -361,7 +362,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -382,7 +383,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 2. @@ -421,7 +422,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -470,7 +471,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) }) }) @@ -483,7 +484,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot, no tolerations specified. @@ -504,7 +505,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newUnhealthyMemberClusterName) }) @@ -526,7 +527,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot, and toleration for new cluster. @@ -550,7 +551,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newUnhealthyMemberClusterName) }) @@ -619,7 +620,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot, no tolerations specified. @@ -638,7 +639,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -715,7 +716,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newClusterName) }) @@ -784,7 +785,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -806,7 +807,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) @@ -883,9 +884,455 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", AfterAll(func() { // Delete the CRP. - ensureCRPAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpName) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newClusterName) }) }) }) + +var _ = Describe("scheduling RPs on member clusters with taints & tolerations", func() { + // This is a serial test as adding taints can affect other tests + Context("pickFixed, valid target clusters with taints", Serial, Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + targetClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster6WestProd} + taintClusters := targetClusters + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Add taints to some member clusters 1, 4, 6 from all regions. + addTaintsToMemberClusters(taintClusters, buildTaints(taintClusters)) + + // Create the RP and its associated policy snapshot. + createPickFixedRPWithPolicySnapshot(testNamespace, rpName, targetClusters, policySnapshotName) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for valid target clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters, nilScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters, []string{}, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") + }) + + AfterAll(func() { + // Remove taints + removeTaintsFromMemberClusters(taintClusters) + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + // This is a serial test as adding taints can affect other tests. + Context("pick all valid cluster with no taints, ignore valid cluster with taints, RP with no matching toleration", Serial, Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + taintClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary} + selectedClusters := []string{memberCluster2EastProd, memberCluster3EastCanary, memberCluster5CentralProd, memberCluster6WestProd} + unSelectedClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary, memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Add taints to some member clusters 1, 4, 7 from all regions. + addTaintsToMemberClusters(taintClusters, buildTaints(taintClusters)) + + // Create a RP with no scheduling policy specified, along with its associated policy snapshot, with no tolerations specified. + createNilSchedulingPolicyRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, nil) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all healthy clusters with no taints", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for unhealthy clusters, healthy cluster with taints", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Remove taints + removeTaintsFromMemberClusters(taintClusters) + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + // This is a serial test as adding taints can affect other tests. + Context("pick all valid cluster with no taints, ignore valid cluster with taints, then remove taints after which RP selects all clusters", Serial, Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + taintClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary} + selectedClusters1 := []string{memberCluster2EastProd, memberCluster3EastCanary, memberCluster5CentralProd, memberCluster6WestProd} + unSelectedClusters1 := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary, memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} + selectedClusters2 := []string{memberCluster1EastProd, memberCluster2EastProd, memberCluster3EastCanary, memberCluster4CentralProd, memberCluster5CentralProd, memberCluster6WestProd, memberCluster7WestCanary} + unSelectedClusters2 := []string{memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Add taints to some member clusters 1, 4, 7 from all regions. + addTaintsToMemberClusters(taintClusters, buildTaints(taintClusters)) + + // Create a RP with no scheduling policy specified, along with its associated policy snapshot, with no tolerations specified. + createNilSchedulingPolicyRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, nil) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for all healthy clusters with no taints", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters1, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for unhealthy clusters, healthy cluster with taints", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters1, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters1, unSelectedClusters1, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + It("remove taints from member clusters", func() { + // Remove taints + removeTaintsFromMemberClusters(taintClusters) + }) + + It("should create scheduled bindings for all healthy clusters with no taints", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters2, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for unhealthy clusters, healthy cluster with taints", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters2, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters2, unSelectedClusters2, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + // This is a serial test as adding taints, tolerations can affect other tests. + Context("pick all valid cluster with tolerated taints, ignore valid clusters with taints, RP has some matching tolerations on creation", Serial, Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + taintClusters := []string{memberCluster1EastProd, memberCluster2EastProd, memberCluster6WestProd} + tolerateClusters := []string{memberCluster1EastProd, memberCluster2EastProd} + selectedClusters := tolerateClusters + unSelectedClusters := []string{memberCluster3EastCanary, memberCluster4CentralProd, memberCluster5CentralProd, memberCluster6WestProd, memberCluster7WestCanary, memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Add taints to some member clusters 1, 2, 6 from all regions. + addTaintsToMemberClusters(taintClusters, buildTaints(taintClusters)) + + // Create a RP with affinity, tolerations for clusters 1,2. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "prod", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"east", "west"}, + }, + }, + }, + }, + }, + }, + }, + }, + Tolerations: buildTolerations(tolerateClusters), + } + // Create RP . + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for clusters with tolerated taints", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for clusters with untolerated taints", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Remove taints + removeTaintsFromMemberClusters(taintClusters) + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + // This is a serial test as adding taints, tolerations can affect other tests. + Context("pickAll valid cluster without taints, add a taint to a cluster that's already picked", Serial, Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + selectedClusters := healthyClusters + unSelectedClusters := []string{memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} + taintClusters := []string{memberCluster1EastProd, memberCluster2EastProd} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + } + // Create RP with PickAll, no tolerations specified. + createPickAllRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create scheduled bindings for valid clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for valid clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + It("add taint to existing clusters", func() { + // Add taints to some member clusters 1, 2. + addTaintsToMemberClusters(taintClusters, buildTaints(taintClusters)) + }) + + It("should create scheduled bindings for valid clusters without taints, valid clusters with taint", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for valid clusters without taints, valid clusters with taint", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, rpKey) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotKey) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Remove taints + removeTaintsFromMemberClusters(taintClusters) + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) + + // This is a serial test as adding taints, tolerations can affect other tests. + Context("pick N clusters with affinity specified, ignore valid clusters with taints, RP has some matching tolerations after update", Serial, Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) + policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotNameAfter := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) + policySnapshotNameAfterKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotNameAfter) + numOfClusters := int32(2) // Less than the number of clusters available (7) in the fleet. + taintClusters := []string{memberCluster1EastProd, memberCluster2EastProd} + tolerateClusters := taintClusters + // The scheduler is designed to produce only deterministic decisions; if there are no + // comparable scores available for selected clusters, the scheduler will rank the clusters + // by their names. + wantFilteredClusters := []string{memberCluster1EastProd, memberCluster2EastProd, memberCluster3EastCanary, memberCluster4CentralProd, memberCluster5CentralProd, memberCluster6WestProd, memberCluster7WestCanary, memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} + wantPickedClustersAfter := taintClusters + wantFilteredClustersAfter := []string{memberCluster3EastCanary, memberCluster4CentralProd, memberCluster5CentralProd, memberCluster6WestProd, memberCluster7WestCanary, memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(rpKey) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Add taints to some member clusters 1, 2. + addTaintsToMemberClusters(taintClusters, buildTaints(taintClusters)) + + // Create a RP of the PickN placement type, along with its associated policy snapshot, no tolerations specified. + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: &numOfClusters, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabel: "east", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + "prod", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickNRPWithPolicySnapshot(testNamespace, rpName, policySnapshotName, policy) + }) + + It("should add scheduler cleanup finalizer to the RP", func() { + finalizerAddedActual := placementSchedulerFinalizerAddedActual(rpKey) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to RP") + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, []string{}) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should create scheduled bindings for selected clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual([]string{}, zeroScoreByCluster, rpKey, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + }) + + It("should report status correctly", func() { + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(2, []string{}, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotKey, taintTolerationCmpOpts) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + It("update RP with new tolerations", func() { + // Update RP with tolerations for clusters 1,2. + updatePickNRPWithTolerations(testNamespace, rpName, buildTolerations(tolerateClusters), policySnapshotName, policySnapshotNameAfter) + }) + + It("should create N bindings", func() { + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(rpKey, wantPickedClustersAfter) + Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") + Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") + }) + + It("should create scheduled bindings for selected clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual([]string{}, zeroScoreByCluster, rpKey, policySnapshotNameAfter) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") + }) + + It("should report status correctly", func() { + rpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(2, wantPickedClustersAfter, []string{}, wantFilteredClustersAfter, zeroScoreByCluster, policySnapshotNameAfterKey, taintTolerationCmpOpts) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") + Consistently(rpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") + }) + + AfterAll(func() { + // Remove taints + removeTaintsFromMemberClusters(taintClusters) + // Delete the RP. + ensurePlacementAndAllRelatedResourcesDeletion(rpKey) + }) + }) +}) diff --git a/test/scheduler/utils_test.go b/test/scheduler/utils_test.go index bb1cabd1f..2b3b8528f 100644 --- a/test/scheduler/utils_test.go +++ b/test/scheduler/utils_test.go @@ -48,18 +48,22 @@ import ( "github.com/kubefleet-dev/kubefleet/pkg/scheduler/framework/plugins/sameplacementaffinity" "github.com/kubefleet-dev/kubefleet/pkg/scheduler/framework/plugins/tainttoleration" "github.com/kubefleet-dev/kubefleet/pkg/scheduler/framework/plugins/topologyspreadconstraints" + "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) // This file features some utilities used in the test suites. const ( crpNameTemplate = "crp-%d" + rpNameTemplate = "rp-%d" policySnapshotNameTemplate = "%s-policy-snapshot-%d" provisionalClusterNameTemplate = "provisional-cluster-%d" policyHash = "policy-hash" bindingNamePlaceholder = "binding" + + testNamespace = "test-namespace" ) var ( @@ -95,8 +99,8 @@ var ( ) var ( - lessFuncBinding = func(binding1, binding2 placementv1beta1.ClusterResourceBinding) bool { - return binding1.Spec.TargetCluster < binding2.Spec.TargetCluster + lessFuncBinding = func(binding1, binding2 placementv1beta1.BindingObj) bool { + return binding1.GetBindingSpec().TargetCluster < binding2.GetBindingSpec().TargetCluster } lessFuncClusterDecision = func(decision1, decision2 placementv1beta1.ClusterDecision) bool { return decision1.ClusterName < decision2.ClusterName @@ -109,10 +113,12 @@ var ( ignoreObjectMetaNameField = cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Name") ignoreObjectMetaAnnotationField = cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Annotations") ignoreObjectMetaAutoGeneratedFields = cmpopts.IgnoreFields(metav1.ObjectMeta{}, "UID", "CreationTimestamp", "ResourceVersion", "Generation", "ManagedFields") - ignoreResourceBindingTypeMetaField = cmpopts.IgnoreFields(placementv1beta1.ClusterResourceBinding{}, "TypeMeta") + ignoreClusterResourceBindingTypeMetaField = cmpopts.IgnoreFields(placementv1beta1.ClusterResourceBinding{}, "TypeMeta") + ignoreResourceBindingTypeMetaField = cmpopts.IgnoreFields(placementv1beta1.ResourceBinding{}, "TypeMeta") ignoreConditionTimeReasonAndMessageFields = cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "Reason", "Message") ignoreResourceBindingFields = []cmp.Option{ + ignoreClusterResourceBindingTypeMetaField, ignoreResourceBindingTypeMetaField, ignoreObjectMetaNameField, ignoreObjectMetaAnnotationField, @@ -313,6 +319,49 @@ func createPickFixedCRPWithPolicySnapshot(crpName string, targetClusters []strin Expect(hubClient.Create(ctx, policySnapshot)).To(Succeed(), "Failed to create policy snapshot") } +func createPickFixedRPWithPolicySnapshot(namespace, rpName string, targetClusters []string, policySnapshotName string) { + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: targetClusters, + } + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: namespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: defaultResourceSelectors, + Policy: policy, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create CRP") + + rpGeneration := rp.Generation + + // Create the associated policy snapshot. + policySnapshot := &placementv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(rpGeneration, 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).To(Succeed(), "Failed to create policy snapshot") +} + func createNilSchedulingPolicyCRPWithPolicySnapshot(crpName string, policySnapshotName string, policy *placementv1beta1.PlacementPolicy) { // Create a CRP with no scheduling policy specified. crp := placementv1beta1.ClusterResourcePlacement{ @@ -349,6 +398,44 @@ func createNilSchedulingPolicyCRPWithPolicySnapshot(crpName string, policySnapsh Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") } +func createNilSchedulingPolicyRPWithPolicySnapshot(namespace, rpName string, policySnapshotName string, policy *placementv1beta1.PlacementPolicy) { + // Create a RP with no scheduling policy specified. + rp := placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: namespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: defaultResourceSelectors, + Policy: policy, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed(), "Failed to create RP") + + rpGeneration := rp.Generation + + // Create the associated policy snapshot. + policySnapshot := &placementv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(rpGeneration, 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") +} + func updatePickFixedCRPWithNewTargetClustersAndRefreshSnapshots(crpName string, targetClusters []string, oldPolicySnapshotName, newPolicySnapshotName string) { // Update the CRP. crp := &placementv1beta1.ClusterResourcePlacement{} @@ -387,76 +474,142 @@ func updatePickFixedCRPWithNewTargetClustersAndRefreshSnapshots(crpName string, Expect(hubClient.Create(ctx, policySnapshot)).To(Succeed(), "Failed to create policy snapshot") } -func markBindingsAsBoundForClusters(crpName string, boundClusters []string) { - bindingList := &placementv1beta1.ClusterResourceBindingList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) - listOptions := &client.ListOptions{LabelSelector: labelSelector} - Expect(hubClient.List(ctx, bindingList, listOptions)).To(Succeed(), "Failed to list bindings") +func updatePickFixedRPWithNewTargetClustersAndRefreshSnapshots(namespace, rpName string, targetClusters []string, oldPolicySnapshotName, newPolicySnapshotName string) { + // Update the RP. + rp := &placementv1beta1.ResourcePlacement{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: namespace}, rp)).To(Succeed(), "Failed to get RP") + + policy := rp.Spec.Policy.DeepCopy() + policy.ClusterNames = targetClusters + rp.Spec.Policy = policy + Expect(hubClient.Update(ctx, rp)).To(Succeed(), "Failed to update RP") + + rpGeneration := rp.Generation + + // Mark the old policy snapshot as inactive. + policySnapshot := &placementv1beta1.SchedulingPolicySnapshot{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: oldPolicySnapshotName, Namespace: namespace}, policySnapshot)).To(Succeed(), "Failed to get policy snapshot") + policySnapshot.Labels[placementv1beta1.IsLatestSnapshotLabel] = strconv.FormatBool(false) + Expect(hubClient.Update(ctx, policySnapshot)).To(Succeed(), "Failed to update policy snapshot") + + // Create a new policy snapshot. + policySnapshot = &placementv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: newPolicySnapshotName, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(rpGeneration, 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).To(Succeed(), "Failed to create policy snapshot") +} + +func markBindingsAsBoundForClusters(placementKey string, boundClusters []string) { + bindingList, err := listBindings(placementKey) + Expect(err).ToNot(HaveOccurred(), "Failed to list bindings") + boundClusterMap := make(map[string]bool) for _, cluster := range boundClusters { boundClusterMap[cluster] = true } - for idx := range bindingList.Items { - binding := bindingList.Items[idx] - if _, ok := boundClusterMap[binding.Spec.TargetCluster]; ok && binding.Spec.State == placementv1beta1.BindingStateScheduled { - binding.Spec.State = placementv1beta1.BindingStateBound - Expect(hubClient.Update(ctx, &binding)).To(Succeed(), "Failed to update binding") + + for _, bindingObj := range bindingList.GetBindingObjs() { + if _, ok := boundClusterMap[bindingObj.GetBindingSpec().TargetCluster]; ok && bindingObj.GetBindingSpec().State == placementv1beta1.BindingStateScheduled { + bindingObj.GetBindingSpec().State = placementv1beta1.BindingStateBound + Expect(hubClient.Update(ctx, bindingObj)).To(Succeed(), "Failed to update binding") } } } -func ensureCRPAndAllRelatedResourcesDeletion(crpName string) { - // Delete the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - }, +func ensurePlacementAndAllRelatedResourcesDeletion(placementKey string) { + namespace, placementName, err := controller.ExtractNamespaceNameFromKeyStr(placementKey) + Expect(err).ToNot(HaveOccurred(), "Failed to extract namespace and name from placement key") + + // Delete the placement. + var placement placementv1beta1.PlacementObj + if namespace == "" { + // Delete CRP. + placement = &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: placementName, + }, + } + } else { + // Delete RP. + placement = &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: placementName, + Namespace: namespace, + }, + } } - Expect(hubClient.Delete(ctx, crp)).To(Succeed(), "Failed to delete CRP") + Expect(hubClient.Delete(ctx, placement)).To(Succeed(), "Failed to delete placement") // Ensure that all the bindings are deleted. - noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(placementKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to clear all bindings") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to clear all bindings") // Ensure that the scheduler finalizer is removed. - finalizerRemovedActual := crpSchedulerFinalizerRemovedActual(crpName) - Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove scheduler cleanup finalizer from CRP") + finalizerRemovedActual := placementSchedulerFinalizerRemovedActual(placementKey) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove scheduler cleanup finalizer from placement") - // Remove all the other finalizers from the CRP. + // Remove all the other finalizers from the placement. Eventually(func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + if err := hubClient.Get(ctx, types.NamespacedName{Name: placementName, Namespace: namespace}, placement); err != nil { return err } - crp.Finalizers = []string{} - return hubClient.Update(ctx, crp) - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove all finalizers from CRP") + placement.SetFinalizers([]string{}) + return hubClient.Update(ctx, placement) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove all finalizers from placement") - // Ensure that the CRP is deleted. + // Ensure that the placement is deleted. Eventually(func() error { - err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, &placementv1beta1.ClusterResourcePlacement{}) + err := hubClient.Get(ctx, types.NamespacedName{Name: placementName, Namespace: namespace}, placement) if errors.IsNotFound(err) { return nil } return err - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to delete CRP") + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to delete placement") // List all policy snapshots. - policySnapshotList := &placementv1beta1.ClusterSchedulingPolicySnapshotList{} - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: crpName}) + var policySnapshotList placementv1beta1.PolicySnapshotList + labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: placementName}) listOptions := &client.ListOptions{LabelSelector: labelSelector} + + if namespace == "" { + // List CSPS. + policySnapshotList = &placementv1beta1.ClusterSchedulingPolicySnapshotList{} + } else { + // List SPS. + policySnapshotList = &placementv1beta1.SchedulingPolicySnapshotList{} + listOptions.Namespace = namespace + } Expect(hubClient.List(ctx, policySnapshotList, listOptions)).To(Succeed(), "Failed to list policy snapshots") // Delete all policy snapshots and ensure their deletion. - for idx := range policySnapshotList.Items { - policySnapshot := policySnapshotList.Items[idx] - Expect(hubClient.Delete(ctx, &policySnapshot)).To(Succeed(), "Failed to delete policy snapshot") + for _, policySnapshot := range policySnapshotList.GetPolicySnapshotObjs() { + Expect(hubClient.Delete(ctx, policySnapshot)).To(Succeed(), "Failed to delete policy snapshot") Eventually(func() error { - err := hubClient.Get(ctx, types.NamespacedName{Name: policySnapshot.Name}, &placementv1beta1.ClusterSchedulingPolicySnapshot{}) + var ps placementv1beta1.PolicySnapshotObj + if namespace == "" { + ps = &placementv1beta1.ClusterSchedulingPolicySnapshot{} + } else { + ps = &placementv1beta1.SchedulingPolicySnapshot{} + } + err := hubClient.Get(ctx, types.NamespacedName{Name: policySnapshot.GetName(), Namespace: policySnapshot.GetNamespace()}, ps) if errors.IsNotFound(err) { return nil } @@ -522,6 +675,44 @@ func createPickAllCRPWithPolicySnapshot(crpName string, policySnapshotName strin Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") } +func createPickAllRPWithPolicySnapshot(namespace, rpName, policySnapshotName string, policy *placementv1beta1.PlacementPolicy) { + // Create a RP of the PickAll placement type. + rp := placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: namespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: defaultResourceSelectors, + Policy: policy, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed(), "Failed to create RP") + + rpGeneration := rp.Generation + + // Create the associated policy snapshot. + policySnapshot := &placementv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(rpGeneration, 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") +} + func updatePickAllCRPWithNewAffinity(crpName string, affinity *placementv1beta1.Affinity, oldPolicySnapshotName, newPolicySnapshotName string) { // Update the CRP. crp := &placementv1beta1.ClusterResourcePlacement{} @@ -560,6 +751,45 @@ func updatePickAllCRPWithNewAffinity(crpName string, affinity *placementv1beta1. Expect(hubClient.Create(ctx, policySnapshot)).To(Succeed(), "Failed to create policy snapshot") } +func updatePickAllRPWithNewAffinity(namespace, rpName string, affinity *placementv1beta1.Affinity, oldPolicySnapshotName, newPolicySnapshotName string) { + // Update the RP. + rp := &placementv1beta1.ResourcePlacement{} + Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: rpName}, rp)).To(Succeed(), "Failed to get RP") + + policy := rp.Spec.Policy.DeepCopy() + policy.Affinity = affinity + rp.Spec.Policy = policy + Expect(hubClient.Update(ctx, rp)).To(Succeed(), "Failed to update RP") + + rpGeneration := rp.Generation + + // Mark the old policy snapshot as inactive. + policySnapshot := &placementv1beta1.SchedulingPolicySnapshot{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: oldPolicySnapshotName, Namespace: namespace}, policySnapshot)).To(Succeed(), "Failed to get policy snapshot") + policySnapshot.Labels[placementv1beta1.IsLatestSnapshotLabel] = strconv.FormatBool(false) + Expect(hubClient.Update(ctx, policySnapshot)).To(Succeed(), "Failed to update policy snapshot") + + // Create a new policy snapshot. + policySnapshot = &placementv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: newPolicySnapshotName, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(rpGeneration, 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).To(Succeed(), "Failed to create policy snapshot") +} + func createPickNCRPWithPolicySnapshot(crpName string, policySnapshotName string, policy *placementv1beta1.PlacementPolicy) { // Create a CRP of the PickN placement type. crp := placementv1beta1.ClusterResourcePlacement{ @@ -597,6 +827,45 @@ func createPickNCRPWithPolicySnapshot(crpName string, policySnapshotName string, Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") } +func createPickNRPWithPolicySnapshot(namespace, rpName string, policySnapshotName string, policy *placementv1beta1.PlacementPolicy) { + // Create a RP of the PickN placement type. + rp := placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: namespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: defaultResourceSelectors, + Policy: policy, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed(), "Failed to create CRP") + + rpGeneration := rp.Generation + + // Create the associated policy snapshot. + policySnapshot := &placementv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(rpGeneration, 10), + placementv1beta1.NumberOfClustersAnnotation: strconv.FormatInt(int64(*policy.NumberOfClusters), 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") +} + func updatePickNCRPWithNewAffinityAndTopologySpreadConstraints( crpName string, affinity *placementv1beta1.Affinity, @@ -681,6 +950,45 @@ func updatePickNCRPWithTolerations(crpName string, tolerations []placementv1beta Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") } +func updatePickNRPWithTolerations(namespace, rpName string, tolerations []placementv1beta1.Toleration, oldPolicySnapshotName, newPolicySnapshotName string) { + rp := &placementv1beta1.ResourcePlacement{} + Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: rpName}, rp)).To(Succeed(), "Failed to get resource placement") + + policy := rp.Spec.Policy.DeepCopy() + policy.Tolerations = tolerations + numOfClusters := policy.NumberOfClusters + Expect(hubClient.Update(ctx, rp)).To(Succeed(), "Failed to update resource placement") + + rpGeneration := rp.Generation + + // Mark the old policy snapshot as inactive. + policySnapshot := &placementv1beta1.SchedulingPolicySnapshot{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: oldPolicySnapshotName, Namespace: namespace}, policySnapshot)).To(Succeed(), "Failed to get policy snapshot") + policySnapshot.Labels[placementv1beta1.IsLatestSnapshotLabel] = strconv.FormatBool(false) + Expect(hubClient.Update(ctx, policySnapshot)).To(Succeed(), "Failed to update policy snapshot") + + // Create the associated policy snapshot. + policySnapshot = &placementv1beta1.SchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: newPolicySnapshotName, + Namespace: namespace, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(rpGeneration, 10), + placementv1beta1.NumberOfClustersAnnotation: strconv.FormatInt(int64(*numOfClusters), 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") +} + func buildTaints(memberClusterNames []string) []clusterv1beta1.Taint { var labels map[string]string taints := make([]clusterv1beta1.Taint, len(memberClusterNames)) @@ -768,3 +1076,49 @@ func resetClusterPropertiesFor(clusterName string) { return nil }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to reset cluster properties") } + +func listBindings(placementKey string) (placementv1beta1.BindingObjList, error) { + namespace, placementName, err := controller.ExtractNamespaceNameFromKeyStr(placementKey) + if err != nil { + return nil, fmt.Errorf("failed to extract namespace and name from placement key %s: %w", placementKey, err) + } + + var bindingList placementv1beta1.BindingObjList + labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: placementName}) + listOptions := &client.ListOptions{LabelSelector: labelSelector} + + if namespace == "" { + // List ClusterResourceBindings. + bindingList = &placementv1beta1.ClusterResourceBindingList{} + } else { + // List ResourceBindings. + bindingList = &placementv1beta1.ResourceBindingList{} + listOptions.Namespace = namespace + } + + if err := hubClient.List(ctx, bindingList, listOptions); err != nil { + return nil, err + } + return bindingList, nil +} + +func getSchedulingPolicySnapshot(policySnapshotKey string) (placementv1beta1.PolicySnapshotObj, error) { + namespace, policySnapshotName, err := controller.ExtractNamespaceNameFromKeyStr(policySnapshotKey) + if err != nil { + return nil, fmt.Errorf("failed to extract namespace and name from policy snapshot key %s: %w", policySnapshotKey, err) + } + + // Get the policy snapshot. + var policySnapshot placementv1beta1.PolicySnapshotObj + if namespace == "" { + // Get ClusterSchedulingPolicySnapshot. + policySnapshot = &placementv1beta1.ClusterSchedulingPolicySnapshot{} + } else { + // Get SchedulingPolicySnapshot. + policySnapshot = &placementv1beta1.SchedulingPolicySnapshot{} + } + if err := hubClient.Get(ctx, types.NamespacedName{Name: policySnapshotName, Namespace: namespace}, policySnapshot); err != nil { + return nil, err + } + return policySnapshot, nil +} From fa3c42e0b013fd6bfe74ece151e76afd97f9d91e Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Wed, 13 Aug 2025 10:13:20 +0800 Subject: [PATCH 03/38] fix: set the correct value for work applier fast backoff (#179) Minor fixes Signed-off-by: michaelawyu --- cmd/memberagent/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/memberagent/main.go b/cmd/memberagent/main.go index fc7516ec1..3d560a18b 100644 --- a/cmd/memberagent/main.go +++ b/cmd/memberagent/main.go @@ -99,7 +99,7 @@ var ( workApplierRequeueRateLimiterExponentialBaseForSlowBackoff = flag.Float64("work-applier-requeue-rate-limiter-exponential-base-for-slow-backoff", 1.2, "If set, the work applier will start to back off slowly at this factor after it finished requeueing with fixed delays, until it reaches the slow backoff delay cap. Its value should be larger than 1.0 and no larger than 100.0") workApplierRequeueRateLimiterInitialSlowBackoffDelaySeconds = flag.Float64("work-applier-requeue-rate-limiter-initial-slow-backoff-delay-seconds", 2, "If set, the work applier will start to back off slowly at this delay in seconds.") workApplierRequeueRateLimiterMaxSlowBackoffDelaySeconds = flag.Float64("work-applier-requeue-rate-limiter-max-slow-backoff-delay-seconds", 15, "If set, the work applier will not back off longer than this value in seconds when it is in the slow backoff stage.") - workApplierRequeueRateLimiterExponentialBaseForFastBackoff = flag.Float64("work-applier-requeue-rate-limiter-exponential-base-for-fast-backoff", 1.2, "If set, the work applier will start to back off fast at this factor after it completes the slow backoff stage, until it reaches the fast backoff delay cap. Its value should be larger than the base value for the slow backoff stage.") + workApplierRequeueRateLimiterExponentialBaseForFastBackoff = flag.Float64("work-applier-requeue-rate-limiter-exponential-base-for-fast-backoff", 1.5, "If set, the work applier will start to back off fast at this factor after it completes the slow backoff stage, until it reaches the fast backoff delay cap. Its value should be larger than the base value for the slow backoff stage.") workApplierRequeueRateLimiterMaxFastBackoffDelaySeconds = flag.Float64("work-applier-requeue-rate-limiter-max-fast-backoff-delay-seconds", 900, "If set, the work applier will not back off longer than this value in seconds when it is in the fast backoff stage.") workApplierRequeueRateLimiterSkipToFastBackoffForAvailableOrDiffReportedWorkObjs = flag.Bool("work-applier-requeue-rate-limiter-skip-to-fast-backoff-for-available-or-diff-reported-work-objs", true, "If set, the rate limiter will skip the slow backoff stage and start fast backoff immediately for work objects that are available or have diff reported.") ) From a722522f25f153dbc774c35c551700f3343122c6 Mon Sep 17 00:00:00 2001 From: Wantong Date: Wed, 13 Aug 2025 13:18:20 -0700 Subject: [PATCH 04/38] test: refactor scheduler integration tests (#186) --- test/scheduler/actuals_test.go | 84 ++---- test/scheduler/pickall_integration_test.go | 120 ++++---- test/scheduler/pickfixed_integration_test.go | 74 ++--- test/scheduler/pickn_integration_test.go | 256 ++++++++++-------- ...perty_based_scheduling_integration_test.go | 221 ++++++++------- .../tainttoleration_integration_test.go | 173 ++++++------ test/scheduler/utils_test.go | 37 +-- 7 files changed, 489 insertions(+), 476 deletions(-) diff --git a/test/scheduler/actuals_test.go b/test/scheduler/actuals_test.go index bb7db1c69..b4a15412f 100644 --- a/test/scheduler/actuals_test.go +++ b/test/scheduler/actuals_test.go @@ -29,12 +29,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) // This file features common actuals (and utilities for generating actuals) in the test suites. -func noBindingsCreatedForPlacementActual(placementKey string) func() error { +func noBindingsCreatedForPlacementActual(placementKey types.NamespacedName) func() error { return func() error { bindingList, err := listBindings(placementKey) if err != nil { @@ -50,23 +49,18 @@ func noBindingsCreatedForPlacementActual(placementKey string) func() error { } } -func placementSchedulerFinalizerAddedActual(placementKey string) func() error { +func placementSchedulerFinalizerAddedActual(placementKey types.NamespacedName) func() error { return func() error { - namespace, placementName, err := controller.ExtractNamespaceNameFromKeyStr(placementKey) - if err != nil { - return fmt.Errorf("failed to extract namespace and name from placement key %s: %w", placementKey, err) - } - // Retrieve the placement. var placement placementv1beta1.PlacementObj - if namespace == "" { + if placementKey.Namespace == "" { // Retrieve CRP. placement = &placementv1beta1.ClusterResourcePlacement{} } else { // Retrieve RP. placement = &placementv1beta1.ResourcePlacement{} } - if err := hubClient.Get(ctx, types.NamespacedName{Name: placementName, Namespace: namespace}, placement); err != nil { + if err := hubClient.Get(ctx, types.NamespacedName{Name: placementKey.Name, Namespace: placementKey.Namespace}, placement); err != nil { return err } @@ -79,23 +73,18 @@ func placementSchedulerFinalizerAddedActual(placementKey string) func() error { } } -func placementSchedulerFinalizerRemovedActual(placementKey string) func() error { +func placementSchedulerFinalizerRemovedActual(placementKey types.NamespacedName) func() error { return func() error { - namespace, placementName, err := controller.ExtractNamespaceNameFromKeyStr(placementKey) - if err != nil { - return fmt.Errorf("failed to extract namespace and name from placement key %s: %w", placementKey, err) - } - // Retrieve the placement. var placement placementv1beta1.PlacementObj - if namespace == "" { + if placementKey.Namespace == "" { // Retrieve CRP. placement = &placementv1beta1.ClusterResourcePlacement{} } else { // Retrieve RP. placement = &placementv1beta1.ResourcePlacement{} } - if err := hubClient.Get(ctx, types.NamespacedName{Name: placementName, Namespace: namespace}, placement); err != nil { + if err := hubClient.Get(ctx, types.NamespacedName{Name: placementKey.Name, Namespace: placementKey.Namespace}, placement); err != nil { return err } @@ -108,13 +97,8 @@ func placementSchedulerFinalizerRemovedActual(placementKey string) func() error } } -func scheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, placementKey, policySnapshotName string) func() error { +func scheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, placementKey types.NamespacedName, policySnapshotName string) func() error { return func() error { - namespace, placementName, err := controller.ExtractNamespaceNameFromKeyStr(placementKey) - if err != nil { - return fmt.Errorf("failed to extract namespace and name from placement key %s: %w", placementKey, err) - } - bindingList, err := listBindings(placementKey) if err != nil { return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) @@ -136,13 +120,13 @@ func scheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, score for _, name := range clusters { score := scoreByCluster[name] var binding placementv1beta1.BindingObj - if namespace == "" { + if placementKey.Namespace == "" { // Create CRB. binding = &placementv1beta1.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ Name: bindingNamePlaceholder, Labels: map[string]string{ - placementv1beta1.PlacementTrackingLabel: placementName, + placementv1beta1.PlacementTrackingLabel: placementKey.Name, }, Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, }, @@ -162,9 +146,9 @@ func scheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, score binding = &placementv1beta1.ResourceBinding{ ObjectMeta: metav1.ObjectMeta{ Name: bindingNamePlaceholder, - Namespace: namespace, + Namespace: placementKey.Namespace, Labels: map[string]string{ - placementv1beta1.PlacementTrackingLabel: placementName, + placementv1beta1.PlacementTrackingLabel: placementKey.Name, }, Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, }, @@ -189,7 +173,7 @@ func scheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, score // Verify that binding names are formatted correctly. for _, binding := range bindingList.GetBindingObjs() { - wantPrefix := fmt.Sprintf("%s-%s", placementName, binding.GetBindingSpec().TargetCluster) + wantPrefix := fmt.Sprintf("%s-%s", placementKey.Name, binding.GetBindingSpec().TargetCluster) if !strings.HasPrefix(binding.GetName(), wantPrefix) { return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.GetName(), wantPrefix) } @@ -199,12 +183,8 @@ func scheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, score } } -func boundBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, placementKey, policySnapshotName string) func() error { +func boundBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, placementKey types.NamespacedName, policySnapshotName string) func() error { return func() error { - namespace, placementName, err := controller.ExtractNamespaceNameFromKeyStr(placementKey) - if err != nil { - return fmt.Errorf("failed to extract namespace and name from placement key %s: %w", placementKey, err) - } bindingList, err := listBindings(placementKey) if err != nil { return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) @@ -222,14 +202,14 @@ func boundBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCl } wantBound := []placementv1beta1.BindingObj{} - if namespace == "" { + if placementKey.Namespace == "" { for _, name := range clusters { score := scoreByCluster[name] binding := &placementv1beta1.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ Name: bindingNamePlaceholder, Labels: map[string]string{ - placementv1beta1.PlacementTrackingLabel: placementName, + placementv1beta1.PlacementTrackingLabel: placementKey.Name, }, Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, }, @@ -252,9 +232,9 @@ func boundBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCl binding := &placementv1beta1.ResourceBinding{ ObjectMeta: metav1.ObjectMeta{ Name: bindingNamePlaceholder, - Namespace: namespace, + Namespace: placementKey.Namespace, Labels: map[string]string{ - placementv1beta1.PlacementTrackingLabel: placementName, + placementv1beta1.PlacementTrackingLabel: placementKey.Name, }, Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, }, @@ -279,7 +259,7 @@ func boundBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCl // Verify that binding names are formatted correctly. for _, binding := range bindingList.GetBindingObjs() { - wantPrefix := fmt.Sprintf("%s-%s", placementName, binding.GetBindingSpec().TargetCluster) + wantPrefix := fmt.Sprintf("%s-%s", placementKey.Name, binding.GetBindingSpec().TargetCluster) if !strings.HasPrefix(binding.GetName(), wantPrefix) { return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.GetName(), wantPrefix) } @@ -289,12 +269,8 @@ func boundBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCl } } -func unscheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, placementKey string, policySnapshotName string) func() error { +func unscheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, placementKey types.NamespacedName, policySnapshotName string) func() error { return func() error { - namespace, placementName, err := controller.ExtractNamespaceNameFromKeyStr(placementKey) - if err != nil { - return fmt.Errorf("failed to extract namespace and name from placement key %s: %w", placementKey, err) - } bindingList, err := listBindings(placementKey) if err != nil { return fmt.Errorf("failed to list bindings for placement %s: %w", placementKey, err) @@ -312,14 +288,14 @@ func unscheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, sco } // TODO (rzhang): fix me, compare the annotations when we know its previous state wantUnscheduled := []placementv1beta1.BindingObj{} - if namespace == "" { + if placementKey.Namespace == "" { for _, name := range clusters { score := scoreByCluster[name] binding := &placementv1beta1.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ Name: bindingNamePlaceholder, Labels: map[string]string{ - placementv1beta1.PlacementTrackingLabel: placementName, + placementv1beta1.PlacementTrackingLabel: placementKey.Name, }, Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, }, @@ -342,9 +318,9 @@ func unscheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, sco binding := &placementv1beta1.ResourceBinding{ ObjectMeta: metav1.ObjectMeta{ Name: bindingNamePlaceholder, - Namespace: namespace, + Namespace: placementKey.Namespace, Labels: map[string]string{ - placementv1beta1.PlacementTrackingLabel: placementName, + placementv1beta1.PlacementTrackingLabel: placementKey.Name, }, Finalizers: []string{placementv1beta1.SchedulerBindingCleanupFinalizer}, }, @@ -369,7 +345,7 @@ func unscheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, sco // Verify that binding names are formatted correctly. for _, binding := range bindingList.GetBindingObjs() { - wantPrefix := fmt.Sprintf("%s-%s", placementName, binding.GetBindingSpec().TargetCluster) + wantPrefix := fmt.Sprintf("%s-%s", placementKey.Name, binding.GetBindingSpec().TargetCluster) if !strings.HasPrefix(binding.GetName(), wantPrefix) { return fmt.Errorf("binding name %s is not formatted correctly; want prefix %s", binding.GetName(), wantPrefix) } @@ -379,7 +355,7 @@ func unscheduledBindingsCreatedOrUpdatedForClustersActual(clusters []string, sco } } -func noBindingsCreatedForClustersActual(clusters []string, placementKey string) func() error { +func noBindingsCreatedForClustersActual(clusters []string, placementKey types.NamespacedName) func() error { // Build a map for clusters for quicker lookup. clusterMap := map[string]bool{} for _, name := range clusters { @@ -404,7 +380,7 @@ func noBindingsCreatedForClustersActual(clusters []string, placementKey string) } } -func pickFixedPolicySnapshotStatusUpdatedActual(valid, invalidOrNotFound []string, policySnapshotKey string) func() error { +func pickFixedPolicySnapshotStatusUpdatedActual(valid, invalidOrNotFound []string, policySnapshotKey types.NamespacedName) func() error { return func() error { policySnapshot, err := getSchedulingPolicySnapshot(policySnapshotKey) if err != nil { @@ -460,7 +436,7 @@ func pickFixedPolicySnapshotStatusUpdatedActual(valid, invalidOrNotFound []strin } } -func pickAllPolicySnapshotStatusUpdatedActual(scored, filtered []string, policySnapshotKey string) func() error { +func pickAllPolicySnapshotStatusUpdatedActual(scored, filtered []string, policySnapshotKey types.NamespacedName) func() error { return func() error { policySnapshot, err := getSchedulingPolicySnapshot(policySnapshotKey) if err != nil { @@ -509,7 +485,7 @@ func pickAllPolicySnapshotStatusUpdatedActual(scored, filtered []string, policyS } } -func hasNScheduledOrBoundBindingsPresentActual(placementKey string, clusters []string) func() error { +func hasNScheduledOrBoundBindingsPresentActual(placementKey types.NamespacedName, clusters []string) func() error { clusterMap := make(map[string]bool) for _, name := range clusters { clusterMap[name] = true @@ -547,7 +523,7 @@ func pickNPolicySnapshotStatusUpdatedActual( numOfClusters int, picked, notPicked, filtered []string, scoreByCluster map[string]*placementv1beta1.ClusterScore, - policySnapshotKey string, + policySnapshotKey types.NamespacedName, opts []cmp.Option, ) func() error { return func() error { diff --git a/test/scheduler/pickall_integration_test.go b/test/scheduler/pickall_integration_test.go index 7d71e5998..8e60d4ff8 100644 --- a/test/scheduler/pickall_integration_test.go +++ b/test/scheduler/pickall_integration_test.go @@ -25,19 +25,20 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { Context("pick all valid clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot. @@ -45,37 +46,38 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all healthy clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for unhealthy clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unhealthyClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unhealthyClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(healthyClusters, unhealthyClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(healthyClusters, unhealthyClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) // This is a serial test as adding a new member cluster may interrupt other test cases. Context("add a new healthy cluster", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) // Prepare a new cluster to avoid interrupting other concurrently running test cases. @@ -92,7 +94,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot. @@ -106,14 +108,14 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { }) It("should create scheduled bindings for the newly recovered cluster", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newUnhealthyMemberClusterName) @@ -123,6 +125,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { // This is a serial test as adding a new member cluster may interrupt other test cases. Context("a healthy cluster becomes unhealthy", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) // Prepare a new cluster to avoid interrupting other concurrently running test cases. @@ -139,7 +142,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot. @@ -152,7 +155,7 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { markClusterAsHealthy(newUnhealthyMemberClusterName) // Verify that a binding has been created for the cluster. - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") @@ -161,14 +164,14 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { }) It("should not remove binding for the cluster that just becomes unhealthy", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newUnhealthyMemberClusterName) @@ -179,11 +182,12 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { var _ = Describe("scheduling CRPs of the PickAll placement type", func() { Context("pick all valid clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -191,36 +195,37 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all healthy clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for unhealthy clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unhealthyClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unhealthyClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(healthyClusters, unhealthyClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(healthyClusters, unhealthyClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with specific affinities (single term, multiple selectors)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantTargetClusters := []string{ @@ -239,7 +244,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -272,36 +277,37 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with specific affinities (multiple terms, single selector)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantTargetClusters := []string{ @@ -320,7 +326,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -360,36 +366,37 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("affinities updated", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, crpName, 2) @@ -420,7 +427,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -455,12 +462,12 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { createPickAllCRPWithPolicySnapshot(crpName, policySnapshotName1, policy) // Verify that bindings have been created as expected. - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters1, zeroScoreByCluster, crpName, policySnapshotName1) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters1, zeroScoreByCluster, crpKey, policySnapshotName1) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") // Bind some bindings. - markBindingsAsBoundForClusters(crpName, boundClusters) + markBindingsAsBoundForClusters(crpKey, boundClusters) // Update the CRP with a new affinity. affinity := &placementv1beta1.Affinity{ @@ -496,42 +503,43 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create/update scheduled bindings for newly matched clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(scheduledClusters, zeroScoreByCluster, crpName, policySnapshotName2) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(scheduledClusters, zeroScoreByCluster, crpKey, policySnapshotName2) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should update bound bindings for newly matched clusters", func() { - boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(boundClusters, zeroScoreByCluster, crpName, policySnapshotName2) + boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(boundClusters, zeroScoreByCluster, crpKey, policySnapshotName2) Eventually(boundBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") Consistently(boundBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters2, wantIgnoredClusters2, policySnapshotName2) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters2, wantIgnoredClusters2, types.NamespacedName{Name: policySnapshotName2}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("no matching clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantIgnoredClusters := []string{ @@ -548,7 +556,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") policy := &placementv1beta1.PlacementPolicy{ @@ -588,25 +596,25 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual([]string{}, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual([]string{}, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) }) @@ -614,9 +622,9 @@ var _ = Describe("scheduling CRPs of the PickAll placement type", func() { var _ = Describe("scheduling RPs of the PickAll placement type", func() { Context("pick all valid clusters", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} BeforeAll(func() { // Ensure that no bindings have been created so far. @@ -658,9 +666,9 @@ var _ = Describe("scheduling RPs of the PickAll placement type", func() { Context("pick clusters with specific affinities (single term, multiple selectors)", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} wantTargetClusters := []string{ memberCluster1EastProd, @@ -741,9 +749,9 @@ var _ = Describe("scheduling RPs of the PickAll placement type", func() { Context("pick clusters with specific affinities (multiple terms, single selector)", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} wantTargetClusters := []string{ memberCluster3EastCanary, @@ -831,10 +839,10 @@ var _ = Describe("scheduling RPs of the PickAll placement type", func() { Context("affinities updated", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) - policySnapshotKey2 := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName2) + policySnapshotKey2 := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName2} wantTargetClusters1 := []string{ memberCluster3EastCanary, @@ -975,9 +983,9 @@ var _ = Describe("scheduling RPs of the PickAll placement type", func() { Context("no matching clusters", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} wantIgnoredClusters := []string{ memberCluster1EastProd, diff --git a/test/scheduler/pickfixed_integration_test.go b/test/scheduler/pickfixed_integration_test.go index 8a8928845..b1054fe78 100644 --- a/test/scheduler/pickfixed_integration_test.go +++ b/test/scheduler/pickfixed_integration_test.go @@ -22,14 +22,15 @@ package tests import ( "fmt" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" ) var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { Context("with valid target clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} targetClusters := []string{ memberCluster1EastProd, @@ -41,7 +42,7 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create the CRP and its associated policy snapshot. @@ -49,29 +50,30 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for valid target clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters, nilScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters, nilScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should report status correctly", func() { - statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters, []string{}, policySnapshotName) + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters, []string{}, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") }) AfterAll(func() { - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("with both valid and invalid/non-existent target clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} targetClusters := []string{ memberCluster1EastProd, @@ -100,7 +102,7 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create the CRP and its associated policy snapshot. @@ -108,35 +110,36 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for valid target clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(validClusters, nilScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(validClusters, nilScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create bindings for invalid target clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(invalidClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(invalidClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Created a binding for invalid or not found cluster") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Created a binding for invalid or not found cluster") }) It("should report status correctly", func() { - statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(validClusters, invalidClusters, policySnapshotName) + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(validClusters, invalidClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update policy snapshot status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update policy snapshot status") }) AfterAll(func() { - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("policy snapshot refresh with added clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} targetClusters1 := []string{ memberCluster1EastProd, @@ -167,55 +170,56 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create the CRP and its associated policy snapshot. createPickFixedCRPWithPolicySnapshot(crpName, targetClusters1, policySnapshotName1) // Make sure that the bindings have been created. - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters1, nilScoreByCluster, crpName, policySnapshotName1) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters1, nilScoreByCluster, crpKey, policySnapshotName1) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") // Mark all previously created bindings as bound. - markBindingsAsBoundForClusters(crpName, previouslyBoundClusters) + markBindingsAsBoundForClusters(crpKey, previouslyBoundClusters) // Update the CRP with new target clusters and refresh scheduling policy snapshots. updatePickFixedCRPWithNewTargetClustersAndRefreshSnapshots(crpName, targetClusters2, policySnapshotName1, policySnapshotName2) }) It("should create scheduled bindings for newly added valid target clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(newScheduledClusters, nilScoreByCluster, crpName, policySnapshotName2) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(newScheduledClusters, nilScoreByCluster, crpKey, policySnapshotName2) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should update bound bindings for previously added valid target clusters", func() { - boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(previouslyBoundClusters, nilScoreByCluster, crpName, policySnapshotName2) + boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(previouslyBoundClusters, nilScoreByCluster, crpKey, policySnapshotName2) Eventually(boundBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") Consistently(boundBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") }) It("should update scheduled bindings for previously added valid target clusters", func() { - scheduledBindingsUpdatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(previouslyScheduledClusters, nilScoreByCluster, crpName, policySnapshotName2) + scheduledBindingsUpdatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(previouslyScheduledClusters, nilScoreByCluster, crpKey, policySnapshotName2) Eventually(scheduledBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") Consistently(scheduledBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") }) It("should report status correctly", func() { - statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters2, []string{}, policySnapshotName2) + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters2, []string{}, types.NamespacedName{Name: policySnapshotName2}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update policy snapshot status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update policy snapshot status") }) AfterAll(func() { - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("policy snapshot refresh with removed clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} targetClusters1 := []string{ memberCluster1EastProd, @@ -245,44 +249,44 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create the CRP and its associated policy snapshot. createPickFixedCRPWithPolicySnapshot(crpName, targetClusters1, policySnapshotName1) // Make sure that the bindings have been created. - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters1, nilScoreByCluster, crpName, policySnapshotName1) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters1, nilScoreByCluster, crpKey, policySnapshotName1) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") // Mark some previously created bindings as bound. - markBindingsAsBoundForClusters(crpName, previouslyBoundClusters) + markBindingsAsBoundForClusters(crpKey, previouslyBoundClusters) // Update the CRP with new target clusters and refresh scheduling policy snapshots. updatePickFixedCRPWithNewTargetClustersAndRefreshSnapshots(crpName, targetClusters2, policySnapshotName1, policySnapshotName2) }) It("should create scheduled bindings for newly added valid target clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(scheduledClusters, nilScoreByCluster, crpName, policySnapshotName2) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(scheduledClusters, nilScoreByCluster, crpKey, policySnapshotName2) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should mark bindings as unscheduled for removed target clusters", func() { - unscheduledBindingsCreatedActual := unscheduledBindingsCreatedOrUpdatedForClustersActual(unscheduledClusters, nilScoreByCluster, crpName, policySnapshotName1) + unscheduledBindingsCreatedActual := unscheduledBindingsCreatedOrUpdatedForClustersActual(unscheduledClusters, nilScoreByCluster, crpKey, policySnapshotName1) Eventually(unscheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to mark bindings as unscheduled") Consistently(unscheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to mark bindings as unscheduled") }) It("should report status correctly", func() { - statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(scheduledClusters, []string{}, policySnapshotName2) + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(scheduledClusters, []string{}, types.NamespacedName{Name: policySnapshotName2}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update policy snapshot status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update policy snapshot status") }) AfterAll(func() { - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) }) @@ -290,7 +294,7 @@ var _ = Describe("scheduling CRPs of the PickFixed placement type", func() { var _ = Describe("scheduling RPs of the PickFixed placement type", func() { Context("with valid target clusters", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} targetClusters := []string{ memberCluster1EastProd, @@ -299,7 +303,7 @@ var _ = Describe("scheduling RPs of the PickFixed placement type", func() { } policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} BeforeAll(func() { // Ensure that no bindings have been created so far. @@ -334,7 +338,7 @@ var _ = Describe("scheduling RPs of the PickFixed placement type", func() { Context("with both valid and invalid/non-existent target clusters", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} targetClusters := []string{ memberCluster1EastProd, @@ -360,7 +364,7 @@ var _ = Describe("scheduling RPs of the PickFixed placement type", func() { } policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} BeforeAll(func() { // Ensure that no bindings have been created so far. @@ -401,7 +405,7 @@ var _ = Describe("scheduling RPs of the PickFixed placement type", func() { Context("policy snapshot refresh with added clusters", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} targetClusters1 := []string{ memberCluster1EastProd, @@ -429,7 +433,7 @@ var _ = Describe("scheduling RPs of the PickFixed placement type", func() { policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) - policySnapshotKey2 := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName2) + policySnapshotKey2 := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName2} BeforeAll(func() { // Ensure that no bindings have been created so far. @@ -482,7 +486,7 @@ var _ = Describe("scheduling RPs of the PickFixed placement type", func() { Context("policy snapshot refresh with removed clusters", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} targetClusters1 := []string{ memberCluster1EastProd, @@ -509,7 +513,7 @@ var _ = Describe("scheduling RPs of the PickFixed placement type", func() { policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) - policySnapshotKey2 := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName2) + policySnapshotKey2 := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName2} BeforeAll(func() { // Ensure that no bindings have been created so far. diff --git a/test/scheduler/pickn_integration_test.go b/test/scheduler/pickn_integration_test.go index 28f823208..4bda74385 100644 --- a/test/scheduler/pickn_integration_test.go +++ b/test/scheduler/pickn_integration_test.go @@ -32,7 +32,6 @@ import ( "k8s.io/utils/ptr" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) var ( @@ -46,6 +45,7 @@ var ( var _ = Describe("scheduling CRPs of the PickN placement type", func() { Context("pick N clusters with no affinities/topology spread constraints specified", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(3) // Less than the number of clusters available (7) in the fleet. @@ -71,7 +71,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -83,36 +83,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("not enough clusters to pick", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(10) // More than the number of clusters available (7) in the fleet. @@ -136,7 +137,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -148,43 +149,44 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick 0 clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(0) BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -196,30 +198,31 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, []string{}) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, []string{}) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), []string{}, []string{}, []string{}, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), []string{}, []string{}, []string{}, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with required affinity", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(2) @@ -240,7 +243,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -276,36 +279,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with required affinity, multiple terms", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(4) @@ -327,7 +331,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -379,36 +383,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, []string{}, wantFilteredClusters, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with preferred affinity", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(4) @@ -447,7 +452,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -484,36 +489,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with preferred affinity, multiple terms", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(4) @@ -555,7 +561,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -611,36 +617,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with required topology spread constraints", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(2) @@ -689,7 +696,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -708,36 +715,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with required topology spread constraints, multiple terms", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(2) @@ -812,7 +820,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -836,36 +844,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with preferred topology spread constraints", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(2) @@ -917,7 +926,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -936,36 +945,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with preferred topology spread constraints, multiple terms", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(2) @@ -1049,7 +1059,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1073,36 +1083,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with mixed affinities and topology spread constraints, required only", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(2) @@ -1152,7 +1163,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1192,36 +1203,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with mixed affinities and topology spread constraints, preferred only", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(6) @@ -1297,7 +1309,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1332,36 +1344,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick with mixed affinities and topology spread constraints, mixed", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClusters := int32(3) @@ -1427,7 +1440,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1478,36 +1491,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("upscaling", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClustersBefore := int32(1) @@ -1538,7 +1552,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1549,11 +1563,11 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { createPickNCRPWithPolicySnapshot(crpName, policySnapshotName, policy) // Verify that scheduling has been completed. - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersBefore) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersBefore) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersBefore, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersBefore, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") @@ -1575,36 +1589,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersAfter) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersAfter) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClustersAfter, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClustersAfter, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("downscaling", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numOfClustersBefore := int32(3) @@ -1629,7 +1644,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1640,11 +1655,11 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { createPickNCRPWithPolicySnapshot(crpName, policySnapshotName, policy) // Verify that scheduling has been completed. - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersBefore) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersBefore) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersBefore, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersBefore, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") @@ -1666,36 +1681,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersAfter) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersAfter) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClustersAfter, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClustersAfter, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("affinities and topology spread constraints updated", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotNameBefore := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) policySnapshotNameAfter := fmt.Sprintf(policySnapshotNameTemplate, crpName, 2) @@ -1813,7 +1829,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1863,11 +1879,11 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { createPickNCRPWithPolicySnapshot(crpName, policySnapshotNameBefore, policy) // Verify that scheduling has been completed. - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersBefore) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersBefore) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersBefore, scoreByClusterBefore, crpName, policySnapshotNameBefore) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersBefore, scoreByClusterBefore, crpKey, policySnapshotNameBefore) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") @@ -1899,31 +1915,31 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersAfter) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersAfter) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, scoreByClusterAfter, crpName, policySnapshotNameAfter) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, scoreByClusterAfter, crpKey, policySnapshotNameAfter) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClustersAfter, scoreByClusterAfter, policySnapshotNameAfter, pickNCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClustersAfter, scoreByClusterAfter, types.NamespacedName{Name: policySnapshotNameAfter}, pickNCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) }) @@ -1931,9 +1947,9 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { var _ = Describe("scheduling RPs of the PickN placement type", func() { Context("pick N clusters with no affinities/topology spread constraints specified", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} numOfClusters := int32(3) // Less than the number of clusters available (7) in the fleet. @@ -2000,9 +2016,9 @@ var _ = Describe("scheduling RPs of the PickN placement type", func() { Context("not enough clusters to pick", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} numOfClusters := int32(10) // More than the number of clusters available (7) in the fleet. @@ -2067,9 +2083,9 @@ var _ = Describe("scheduling RPs of the PickN placement type", func() { Context("pick 0 clusters", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} numOfClusters := int32(0) @@ -2111,9 +2127,9 @@ var _ = Describe("scheduling RPs of the PickN placement type", func() { Context("pick with required affinity", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} numOfClusters := int32(2) @@ -2199,9 +2215,9 @@ var _ = Describe("scheduling RPs of the PickN placement type", func() { Context("pick with required affinity, multiple terms", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} numOfClusters := int32(4) @@ -2304,9 +2320,9 @@ var _ = Describe("scheduling RPs of the PickN placement type", func() { Context("pick with preferred affinity", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} numOfClusters := int32(4) diff --git a/test/scheduler/property_based_scheduling_integration_test.go b/test/scheduler/property_based_scheduling_integration_test.go index a3458023c..45223cde4 100644 --- a/test/scheduler/property_based_scheduling_integration_test.go +++ b/test/scheduler/property_based_scheduling_integration_test.go @@ -33,7 +33,6 @@ import ( clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/propertyprovider" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) const ( @@ -43,6 +42,7 @@ const ( var _ = Describe("scheduling CRPs of the PickAll placement type using cluster properties", func() { Context("pick clusters with specific properties (single term, multiple expressions)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantTargetClusters := []string{ @@ -61,7 +61,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -114,36 +114,37 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with specific properties (multiple terms, single expression)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantTargetClusters := []string{ @@ -162,7 +163,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -233,36 +234,37 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with both label and property selectors (single term)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantTargetClusters := []string{ @@ -281,7 +283,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -318,36 +320,37 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with both label and property selectors (multiple terms)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantTargetClusters := []string{ @@ -366,7 +369,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -413,36 +416,37 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("property selector updated", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, crpName, 2) @@ -499,7 +503,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -538,30 +542,30 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantScheduledClusters1, zeroScoreByCluster, crpName, policySnapshotName1) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantScheduledClusters1, zeroScoreByCluster, crpKey, policySnapshotName1) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters1, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters1, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantScheduledClusters1, wantIgnoredClusters1, policySnapshotName1) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantScheduledClusters1, wantIgnoredClusters1, types.NamespacedName{Name: policySnapshotName1}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) It("can mark some bindings as bound", func() { - markBindingsAsBoundForClusters(crpName, wantBoundClusters1) + markBindingsAsBoundForClusters(crpKey, wantBoundClusters1) }) It("can update the scheduling policy with a new property selector", func() { @@ -597,43 +601,44 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should create/update scheduled bindings for newly matched clusters", func() { - scheduledBindingsCreatedOrUpdatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantScheduledClusters2, zeroScoreByCluster, crpName, policySnapshotName2) + scheduledBindingsCreatedOrUpdatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantScheduledClusters2, zeroScoreByCluster, crpKey, policySnapshotName2) Eventually(scheduledBindingsCreatedOrUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create/update the expected set of bindings") Consistently(scheduledBindingsCreatedOrUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create/update the expected set of bindings") }) It("should update bound bindings for newly matched clusters", func() { - boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(wantBoundClusters2, zeroScoreByCluster, crpName, policySnapshotName2) + boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(wantBoundClusters2, zeroScoreByCluster, crpKey, policySnapshotName2) Eventually(boundBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") Consistently(boundBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should mark bindings as unscheduled for clusters that were unselected", func() { - unscheduledBindingsUpdatedActual := unscheduledBindingsCreatedOrUpdatedForClustersActual(wantUnscheduledClusters2, zeroScoreByCluster, crpName, policySnapshotName1) + unscheduledBindingsUpdatedActual := unscheduledBindingsCreatedOrUpdatedForClustersActual(wantUnscheduledClusters2, zeroScoreByCluster, crpKey, policySnapshotName1) Eventually(unscheduledBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") Consistently(unscheduledBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantUnselectedClusters, policySnapshotName2) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantUnselectedClusters, types.NamespacedName{Name: policySnapshotName2}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("no matching clusters", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) wantIgnoredClusters := []string{ @@ -650,7 +655,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -682,25 +687,25 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual([]string{}, wantIgnoredClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual([]string{}, wantIgnoredClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) @@ -708,6 +713,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr // interfere with other specs if run in parallel. Context("cluster properties refreshed", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) // wantTargetClusters1 and wantIgnoredClusters1 are the picked and unpicked clusters @@ -744,7 +750,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -776,24 +782,24 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters1, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters1, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters1, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters1, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters1, wantIgnoredClusters1, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters1, wantIgnoredClusters1, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) @@ -833,26 +839,26 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr }) It("should create scheduled bindings for newly matched clusters while retaining old ones", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters2, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters2, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters2, wantIgnoredClusters2, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters2, wantIgnoredClusters2, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Reset the cluster properties. for idx := range wantTargetClusters2 { @@ -865,6 +871,7 @@ var _ = Describe("scheduling CRPs of the PickAll placement type using cluster pr var _ = Describe("scheduling CRPs of the PickN placement type using cluster properties", func() { Context("pick clusters with specific properties (single sorter, ascending)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numberOfClusters := 3 @@ -916,7 +923,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -943,36 +950,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with specific properties (single sorter, descending)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numberOfClusters := 3 @@ -1030,7 +1038,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -1057,31 +1065,31 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) @@ -1089,6 +1097,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop // interfere with other specs if run in parallel. Context("pick clusters with specific properties (single sorter, same property value across the board)", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numberOfClusters := 3 @@ -1136,7 +1145,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop } // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1163,31 +1172,31 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Reset the cluster properties. for clusterName := range propertiesByCluster { @@ -1198,6 +1207,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop Context("pick clusters with specific properties (single sorter, specified property not available across the board)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numberOfClusters := 3 @@ -1225,7 +1235,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1252,31 +1262,31 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, zeroScoreByCluster, policySnapshotName, pickNCmpOpts) + statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Reset the cluster properties. for clusterName := range propertiesByCluster { @@ -1287,6 +1297,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop Context("pick clusters with specific properties (multiple sorters)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numberOfClusters := 3 @@ -1344,7 +1355,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -1389,36 +1400,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with both label selector and property sorter (single preferred term)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numberOfClusters := 4 @@ -1461,7 +1473,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -1493,36 +1505,37 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) Context("pick clusters with both label selectors and property sorters (multiple preferred terms)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) numberOfClusters := 4 @@ -1571,7 +1584,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickAll placement type, along with its associated policy snapshot. @@ -1617,31 +1630,31 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all matching clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for non-matching clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantNotPickedOrFilteredClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, pickNCmpOpts) + statusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(numberOfClusters, wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, pickNCmpOpts) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) }) @@ -1649,9 +1662,9 @@ var _ = Describe("scheduling CRPs of the PickN placement type using cluster prop var _ = Describe("scheduling RPs of the PickAll placement type using cluster properties", func() { Context("pick clusters with specific properties (single term, multiple expressions)", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} wantTargetClusters := []string{ memberCluster3EastCanary, @@ -1752,9 +1765,9 @@ var _ = Describe("scheduling RPs of the PickAll placement type using cluster pro Context("pick clusters with specific properties (multiple terms, single expression)", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} wantTargetClusters := []string{ memberCluster1EastProd, @@ -1873,9 +1886,9 @@ var _ = Describe("scheduling RPs of the PickAll placement type using cluster pro Context("pick clusters with both label and property selectors (single term)", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} wantTargetClusters := []string{ memberCluster2EastProd, @@ -1960,9 +1973,9 @@ var _ = Describe("scheduling RPs of the PickAll placement type using cluster pro Context("pick clusters with both label and property selectors (multiple terms)", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} wantTargetClusters := []string{ memberCluster5CentralProd, @@ -2057,11 +2070,11 @@ var _ = Describe("scheduling RPs of the PickAll placement type using cluster pro Context("property selector updated", Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey1 := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName1) + policySnapshotKey1 := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName1} policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) - policySnapshotKey2 := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName2) + policySnapshotKey2 := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName2} // wantScheduledClusters1, wantIgnoredClusters1, and wantBoundClusters1 are // the clusters picked (bound) and unpicked respectively with the original diff --git a/test/scheduler/tainttoleration_integration_test.go b/test/scheduler/tainttoleration_integration_test.go index a39e71439..3d3ad79cc 100644 --- a/test/scheduler/tainttoleration_integration_test.go +++ b/test/scheduler/tainttoleration_integration_test.go @@ -30,7 +30,6 @@ import ( clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) var ( @@ -47,13 +46,14 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // This is a serial test as adding taints can affect other tests Context("pickFixed, valid target clusters with taints", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) targetClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster6WestProd} taintClusters := targetClusters BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 4, 6 from all regions. @@ -64,18 +64,18 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for valid target clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters, nilScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(targetClusters, nilScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should report status correctly", func() { - statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters, []string{}, policySnapshotName) + statusUpdatedActual := pickFixedPolicySnapshotStatusUpdatedActual(targetClusters, []string{}, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report correct policy snapshot status") }) @@ -84,13 +84,14 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) // This is a serial test as adding taints can affect other tests. Context("pick all valid cluster with no taints, ignore valid cluster with taints, CRP with no matching toleration", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) taintClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary} selectedClusters := []string{memberCluster2EastProd, memberCluster3EastCanary, memberCluster5CentralProd, memberCluster6WestProd} @@ -98,7 +99,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 4, 7 from all regions. @@ -109,24 +110,24 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all healthy clusters with no taints", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for unhealthy clusters, healthy cluster with taints", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) @@ -135,13 +136,14 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) // This is a serial test as adding taints can affect other tests. Context("pick all valid cluster with no taints, ignore valid cluster with taints, then remove taints after which CRP selects all clusters", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) taintClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary} selectedClusters1 := []string{memberCluster2EastProd, memberCluster3EastCanary, memberCluster5CentralProd, memberCluster6WestProd} @@ -151,7 +153,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 4, 7 from all regions. @@ -162,24 +164,24 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for all healthy clusters with no taints", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters1, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters1, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for unhealthy clusters, healthy cluster with taints", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters1, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters1, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters1, unSelectedClusters1, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters1, unSelectedClusters1, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) @@ -190,32 +192,33 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should create scheduled bindings for all healthy clusters with no taints", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters2, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters2, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for unhealthy clusters, healthy cluster with taints", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters2, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters2, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters2, unSelectedClusters2, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters2, unSelectedClusters2, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) // This is a serial test as adding taints, tolerations can affect other tests. Context("pick all valid cluster with tolerated taints, ignore valid clusters with taints, CRP has some matching tolerations on creation", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) taintClusters := []string{memberCluster1EastProd, memberCluster2EastProd, memberCluster6WestProd} tolerateClusters := []string{memberCluster1EastProd, memberCluster2EastProd} @@ -224,7 +227,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 2, 6 from all regions. @@ -262,24 +265,24 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for clusters with tolerated taints", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for clusters with untolerated taints", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) @@ -288,13 +291,14 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) // This is a serial test as adding taints, tolerations can affect other tests. Context("pickAll valid cluster without taints, add a taint to a cluster that's already picked", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) selectedClusters := healthyClusters unSelectedClusters := []string{memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} @@ -302,7 +306,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") policy := &placementv1beta1.PlacementPolicy{ @@ -313,24 +317,24 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create scheduled bindings for valid clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for valid clusters", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) @@ -341,19 +345,19 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should create scheduled bindings for valid clusters without taints, valid clusters with taint", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(selectedClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) It("should not create any binding for valid clusters without taints, valid clusters with taint", func() { - noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpName) + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unSelectedClusters, crpKey) Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") }) It("should report status correctly", func() { - statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, policySnapshotName) + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(selectedClusters, unSelectedClusters, types.NamespacedName{Name: policySnapshotName}) Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") }) @@ -362,13 +366,14 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) // This is a serial test as adding taints, tolerations can affect other tests. Context("pick N clusters with affinity specified, ignore valid clusters with taints, CRP has some matching tolerations after update", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) policySnapshotNameAfter := fmt.Sprintf(policySnapshotNameTemplate, crpName, 2) numOfClusters := int32(2) // Less than the number of clusters available (7) in the fleet. @@ -383,7 +388,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Add taints to some member clusters 1, 2. @@ -422,24 +427,24 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, []string{}) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, []string{}) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual([]string{}, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual([]string{}, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(2, []string{}, []string{}, wantFilteredClusters, zeroScoreByCluster, policySnapshotName, taintTolerationCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(2, []string{}, []string{}, wantFilteredClusters, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotName}, taintTolerationCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) @@ -450,19 +455,19 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersAfter) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersAfter) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual([]string{}, zeroScoreByCluster, crpName, policySnapshotNameAfter) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual([]string{}, zeroScoreByCluster, crpKey, policySnapshotNameAfter) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(2, wantPickedClustersAfter, []string{}, wantFilteredClustersAfter, zeroScoreByCluster, policySnapshotNameAfter, taintTolerationCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(2, wantPickedClustersAfter, []string{}, wantFilteredClustersAfter, zeroScoreByCluster, types.NamespacedName{Name: policySnapshotNameAfter}, taintTolerationCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) @@ -471,20 +476,21 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // Remove taints removeTaintsFromMemberClusters(taintClusters) // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) }) }) // This is a serial test as adding a new member cluster may interrupt other test cases. Context("pickAll, add a new healthy cluster with taint", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) // Prepare a new cluster to avoid interrupting other concurrently running test cases. newUnhealthyMemberClusterName := fmt.Sprintf(provisionalClusterNameTemplate, GinkgoParallelProcess()) BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot, no tolerations specified. @@ -498,14 +504,14 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should create scheduled bindings for existing clusters, and exclude new cluster with taint", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newUnhealthyMemberClusterName) }) @@ -514,6 +520,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // This is a serial test as adding a new member cluster may interrupt other test cases. Context("pickAll, add a new healthy cluster with taint and matching toleration", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) // Prepare a new cluster to avoid interrupting other concurrently running test cases. newUnhealthyMemberClusterName := fmt.Sprintf(provisionalClusterNameTemplate, GinkgoParallelProcess()) @@ -527,7 +534,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP with no scheduling policy specified, along with its associated policy snapshot, and toleration for new cluster. @@ -544,14 +551,14 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should create scheduled bindings for the newly recovered cluster with tolerated taint", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(updatedHealthyClusters, updatedZeroScoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newUnhealthyMemberClusterName) }) @@ -560,6 +567,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // This is a serial test as adding a new member cluster may interrupt other test cases. Context("pickN with required topology spread constraints, add new cluster with taint, upscaling doesn't pick new cluster", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) // Prepare a new cluster to avoid interrupting other concurrently running test cases. newClusterName := fmt.Sprintf(provisionalClusterNameTemplate, GinkgoParallelProcess()) @@ -620,7 +628,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot, no tolerations specified. @@ -639,24 +647,24 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, taintTolerationCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, taintTolerationCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) @@ -697,26 +705,26 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersAfter) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersAfter) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, scoreByClusterAfter, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, scoreByClusterAfter, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClusters, scoreByClusterAfter, policySnapshotName, taintTolerationCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClusters, scoreByClusterAfter, types.NamespacedName{Name: policySnapshotName}, taintTolerationCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newClusterName) }) @@ -725,6 +733,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", // This is a serial test as adding a new member cluster may interrupt other test cases. Context("pickN with required topology spread constraints, add new cluster with taint, upscaling picks new cluster with tolerated taint", Serial, Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: crpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) // Prepare a new cluster to avoid interrupting other concurrently running test cases. newClusterName := fmt.Sprintf(provisionalClusterNameTemplate, GinkgoParallelProcess()) @@ -785,7 +794,7 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", BeforeAll(func() { // Ensure that no bindings have been created so far. - noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpName) + noBindingsCreatedActual := noBindingsCreatedForPlacementActual(crpKey) Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") // Create a CRP of the PickN placement type, along with its associated policy snapshot. @@ -807,24 +816,24 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should add scheduler cleanup finalizer to the CRP", func() { - finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpName) + finalizerAddedActual := placementSchedulerFinalizerAddedActual(crpKey) Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClusters) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClusters) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClusters, scoreByCluster, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, policySnapshotName, taintTolerationCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClusters), wantPickedClusters, wantNotPickedClusters, wantFilteredClusters, scoreByCluster, types.NamespacedName{Name: policySnapshotName}, taintTolerationCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) @@ -865,26 +874,26 @@ var _ = Describe("scheduling CRPs on member clusters with taints & tolerations", }) It("should create N bindings", func() { - hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpName, wantPickedClustersAfter) + hasNScheduledOrBoundBindingsActual := hasNScheduledOrBoundBindingsPresentActual(crpKey, wantPickedClustersAfter) Eventually(hasNScheduledOrBoundBindingsActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create N bindings") Consistently(hasNScheduledOrBoundBindingsActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create N bindings") }) It("should create scheduled bindings for selected clusters", func() { - scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, scoreByClusterAfter, crpName, policySnapshotName) + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantPickedClustersAfter, scoreByClusterAfter, crpKey, policySnapshotName) Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create scheduled bindings for selected clusters") }) It("should report status correctly", func() { - crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClusters, scoreByClusterAfter, policySnapshotName, taintTolerationCmpOpts) + crpStatusUpdatedActual := pickNPolicySnapshotStatusUpdatedActual(int(numOfClustersAfter), wantPickedClustersAfter, wantNotPickedClustersAfter, wantFilteredClusters, scoreByClusterAfter, types.NamespacedName{Name: policySnapshotName}, taintTolerationCmpOpts) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to report status correctly") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to report status correctly") }) AfterAll(func() { // Delete the CRP. - ensurePlacementAndAllRelatedResourcesDeletion(crpName) + ensurePlacementAndAllRelatedResourcesDeletion(crpKey) // Delete the provisional cluster. ensureProvisionalClusterDeletion(newClusterName) }) @@ -895,9 +904,9 @@ var _ = Describe("scheduling RPs on member clusters with taints & tolerations", // This is a serial test as adding taints can affect other tests Context("pickFixed, valid target clusters with taints", Serial, Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} targetClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster6WestProd} taintClusters := targetClusters @@ -941,9 +950,9 @@ var _ = Describe("scheduling RPs on member clusters with taints & tolerations", // This is a serial test as adding taints can affect other tests. Context("pick all valid cluster with no taints, ignore valid cluster with taints, RP with no matching toleration", Serial, Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} taintClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary} selectedClusters := []string{memberCluster2EastProd, memberCluster3EastCanary, memberCluster5CentralProd, memberCluster6WestProd} unSelectedClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary, memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} @@ -994,9 +1003,9 @@ var _ = Describe("scheduling RPs on member clusters with taints & tolerations", // This is a serial test as adding taints can affect other tests. Context("pick all valid cluster with no taints, ignore valid cluster with taints, then remove taints after which RP selects all clusters", Serial, Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} taintClusters := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary} selectedClusters1 := []string{memberCluster2EastProd, memberCluster3EastCanary, memberCluster5CentralProd, memberCluster6WestProd} unSelectedClusters1 := []string{memberCluster1EastProd, memberCluster4CentralProd, memberCluster7WestCanary, memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} @@ -1070,9 +1079,9 @@ var _ = Describe("scheduling RPs on member clusters with taints & tolerations", // This is a serial test as adding taints, tolerations can affect other tests. Context("pick all valid cluster with tolerated taints, ignore valid clusters with taints, RP has some matching tolerations on creation", Serial, Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} taintClusters := []string{memberCluster1EastProd, memberCluster2EastProd, memberCluster6WestProd} tolerateClusters := []string{memberCluster1EastProd, memberCluster2EastProd} selectedClusters := tolerateClusters @@ -1151,9 +1160,9 @@ var _ = Describe("scheduling RPs on member clusters with taints & tolerations", // This is a serial test as adding taints, tolerations can affect other tests. Context("pickAll valid cluster without taints, add a taint to a cluster that's already picked", Serial, Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} selectedClusters := healthyClusters unSelectedClusters := []string{memberCluster8UnhealthyEastProd, memberCluster9LeftCentralProd} taintClusters := []string{memberCluster1EastProd, memberCluster2EastProd} @@ -1227,11 +1236,11 @@ var _ = Describe("scheduling RPs on member clusters with taints & tolerations", // This is a serial test as adding taints, tolerations can affect other tests. Context("pick N clusters with affinity specified, ignore valid clusters with taints, RP has some matching tolerations after update", Serial, Ordered, func() { rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) - rpKey := controller.GetObjectKeyFromNamespaceName(testNamespace, rpName) + rpKey := types.NamespacedName{Namespace: testNamespace, Name: rpName} policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, rpName, 1) - policySnapshotKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotName) + policySnapshotKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotName} policySnapshotNameAfter := fmt.Sprintf(policySnapshotNameTemplate, rpName, 2) - policySnapshotNameAfterKey := controller.GetObjectKeyFromNamespaceName(testNamespace, policySnapshotNameAfter) + policySnapshotNameAfterKey := types.NamespacedName{Namespace: testNamespace, Name: policySnapshotNameAfter} numOfClusters := int32(2) // Less than the number of clusters available (7) in the fleet. taintClusters := []string{memberCluster1EastProd, memberCluster2EastProd} tolerateClusters := taintClusters diff --git a/test/scheduler/utils_test.go b/test/scheduler/utils_test.go index 2b3b8528f..af57f2f69 100644 --- a/test/scheduler/utils_test.go +++ b/test/scheduler/utils_test.go @@ -48,7 +48,6 @@ import ( "github.com/kubefleet-dev/kubefleet/pkg/scheduler/framework/plugins/sameplacementaffinity" "github.com/kubefleet-dev/kubefleet/pkg/scheduler/framework/plugins/tainttoleration" "github.com/kubefleet-dev/kubefleet/pkg/scheduler/framework/plugins/topologyspreadconstraints" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) // This file features some utilities used in the test suites. @@ -513,7 +512,7 @@ func updatePickFixedRPWithNewTargetClustersAndRefreshSnapshots(namespace, rpName Expect(hubClient.Create(ctx, policySnapshot)).To(Succeed(), "Failed to create policy snapshot") } -func markBindingsAsBoundForClusters(placementKey string, boundClusters []string) { +func markBindingsAsBoundForClusters(placementKey types.NamespacedName, boundClusters []string) { bindingList, err := listBindings(placementKey) Expect(err).ToNot(HaveOccurred(), "Failed to list bindings") @@ -530,10 +529,8 @@ func markBindingsAsBoundForClusters(placementKey string, boundClusters []string) } } -func ensurePlacementAndAllRelatedResourcesDeletion(placementKey string) { - namespace, placementName, err := controller.ExtractNamespaceNameFromKeyStr(placementKey) - Expect(err).ToNot(HaveOccurred(), "Failed to extract namespace and name from placement key") - +func ensurePlacementAndAllRelatedResourcesDeletion(placementKey types.NamespacedName) { + namespace, placementName := placementKey.Namespace, placementKey.Name // Delete the placement. var placement placementv1beta1.PlacementObj if namespace == "" { @@ -565,7 +562,7 @@ func ensurePlacementAndAllRelatedResourcesDeletion(placementKey string) { // Remove all the other finalizers from the placement. Eventually(func() error { - if err := hubClient.Get(ctx, types.NamespacedName{Name: placementName, Namespace: namespace}, placement); err != nil { + if err := hubClient.Get(ctx, placementKey, placement); err != nil { return err } @@ -575,7 +572,7 @@ func ensurePlacementAndAllRelatedResourcesDeletion(placementKey string) { // Ensure that the placement is deleted. Eventually(func() error { - err := hubClient.Get(ctx, types.NamespacedName{Name: placementName, Namespace: namespace}, placement) + err := hubClient.Get(ctx, placementKey, placement) if errors.IsNotFound(err) { return nil } @@ -1077,23 +1074,18 @@ func resetClusterPropertiesFor(clusterName string) { }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to reset cluster properties") } -func listBindings(placementKey string) (placementv1beta1.BindingObjList, error) { - namespace, placementName, err := controller.ExtractNamespaceNameFromKeyStr(placementKey) - if err != nil { - return nil, fmt.Errorf("failed to extract namespace and name from placement key %s: %w", placementKey, err) - } - +func listBindings(placementKey types.NamespacedName) (placementv1beta1.BindingObjList, error) { var bindingList placementv1beta1.BindingObjList - labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: placementName}) + labelSelector := labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: placementKey.Name}) listOptions := &client.ListOptions{LabelSelector: labelSelector} - if namespace == "" { + if placementKey.Namespace == "" { // List ClusterResourceBindings. bindingList = &placementv1beta1.ClusterResourceBindingList{} } else { // List ResourceBindings. bindingList = &placementv1beta1.ResourceBindingList{} - listOptions.Namespace = namespace + listOptions.Namespace = placementKey.Namespace } if err := hubClient.List(ctx, bindingList, listOptions); err != nil { @@ -1102,22 +1094,17 @@ func listBindings(placementKey string) (placementv1beta1.BindingObjList, error) return bindingList, nil } -func getSchedulingPolicySnapshot(policySnapshotKey string) (placementv1beta1.PolicySnapshotObj, error) { - namespace, policySnapshotName, err := controller.ExtractNamespaceNameFromKeyStr(policySnapshotKey) - if err != nil { - return nil, fmt.Errorf("failed to extract namespace and name from policy snapshot key %s: %w", policySnapshotKey, err) - } - +func getSchedulingPolicySnapshot(policySnapshotKey types.NamespacedName) (placementv1beta1.PolicySnapshotObj, error) { // Get the policy snapshot. var policySnapshot placementv1beta1.PolicySnapshotObj - if namespace == "" { + if policySnapshotKey.Namespace == "" { // Get ClusterSchedulingPolicySnapshot. policySnapshot = &placementv1beta1.ClusterSchedulingPolicySnapshot{} } else { // Get SchedulingPolicySnapshot. policySnapshot = &placementv1beta1.SchedulingPolicySnapshot{} } - if err := hubClient.Get(ctx, types.NamespacedName{Name: policySnapshotName, Namespace: namespace}, policySnapshot); err != nil { + if err := hubClient.Get(ctx, types.NamespacedName{Name: policySnapshotKey.Name, Namespace: policySnapshotKey.Namespace}, policySnapshot); err != nil { return nil, err } return policySnapshot, nil From 0bd9bfb217a403c790e976271a2ad671e6863a6f Mon Sep 17 00:00:00 2001 From: Zhiying Lin <54013513+zhiying-lin@users.noreply.github.com> Date: Thu, 14 Aug 2025 04:26:19 +0800 Subject: [PATCH 05/38] feat: update the resource_selector to support RP (#184) --- CLAUDE.md | 7 +- .../resource_selector.go | 103 +- .../resource_selector_test.go | 878 ++++++++++++++++++ test/utils/informer/manager.go | 41 +- 4 files changed, 990 insertions(+), 39 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 8d65cd224..0f60520ec 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -159,11 +159,16 @@ cmd/memberagent/ # Member agent main and setup ## Testing Patterns ### Unit Tests -- Use `testify` for assertions +- Avoid the use of ‘assert’ libraries. - Controllers use `envtest` for integration testing with real etcd - Mock external dependencies with `gomock` - Unit test files: `_test.go` in same directory - Table-driven test style preferred +- Use cmp.Equal for equality comparison and cmp.Diff to obtain a human-readable diff between objects. +- Test outputs should output the actual value that the function returned before printing the value that was expected. A usual format for printing test outputs is “YourFunc(%v) = %v, want %v”. +- If your function returns a struct, don’t write test code that performs an individual comparison for each field of the struct. Instead, construct the struct that you’re expecting your function to return, and compare in one shot using diffs or deep comparisons. The same rule applies to arrays and maps. +- If your struct needs to be compared for approximate equality or some other kind of semantic equality, or it contains fields that cannot be compared for equality (e.g. if one of the fields is an io.Reader), tweaking a cmp.Diff or cmp.Equal comparison with cmpopts options such as cmpopts.IgnoreInterfaces may meet your needs (example); otherwise, this technique just won’t work, so do whatever works. +- If your function returns multiple return values, you don’t need to wrap those in a struct before comparing them. Just compare the return values individually and print them. ### Integration Tests - Located in `test/integration/` and `test/scheduler/` diff --git a/pkg/controllers/clusterresourceplacement/resource_selector.go b/pkg/controllers/clusterresourceplacement/resource_selector.go index 82d769e4e..305dede86 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector.go +++ b/pkg/controllers/clusterresourceplacement/resource_selector.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" @@ -83,7 +84,7 @@ var ( // selectResources selects the resources according to the placement resourceSelectors. // It also generates an array of manifests obj based on the selected resources. func (r *Reconciler) selectResources(placement *fleetv1alpha1.ClusterResourcePlacement) ([]workv1alpha1.Manifest, error) { - selectedObjects, err := r.gatherSelectedResource(placement.GetName(), convertResourceSelector(placement.Spec.ResourceSelectors)) + selectedObjects, err := r.gatherSelectedResource(types.NamespacedName{Name: placement.GetName()}, convertResourceSelector(placement.Spec.ResourceSelectors)) if err != nil { return nil, err } @@ -128,8 +129,7 @@ func convertResourceSelector(old []fleetv1alpha1.ClusterResourceSelector) []flee } // gatherSelectedResource gets all the resources according to the resource selector. -// TODO: treat the RP selector differently to not allow RP to select cluster scoped resources -func (r *Reconciler) gatherSelectedResource(placement string, selectors []fleetv1beta1.ClusterResourceSelector) ([]*unstructured.Unstructured, error) { +func (r *Reconciler) gatherSelectedResource(placementKey types.NamespacedName, selectors []fleetv1beta1.ClusterResourceSelector) ([]*unstructured.Unstructured, error) { var resources []*unstructured.Unstructured var resourceMap = make(map[fleetv1beta1.ResourceIdentifier]bool) for _, selector := range selectors { @@ -145,10 +145,10 @@ func (r *Reconciler) gatherSelectedResource(placement string, selectors []fleetv } var objs []runtime.Object var err error - if gvk == utils.NamespaceGVK { - objs, err = r.fetchNamespaceResources(selector, placement) + if gvk == utils.NamespaceGVK && placementKey.Namespace == "" { + objs, err = r.fetchNamespaceResources(selector, placementKey.Name) } else { - objs, err = r.fetchClusterScopedResources(selector, placement) + objs, err = r.fetchResources(selector, placementKey) } if err != nil { return nil, err @@ -164,7 +164,7 @@ func (r *Reconciler) gatherSelectedResource(placement string, selectors []fleetv } if _, exist := resourceMap[ri]; exist { err = fmt.Errorf("found duplicate resource %+v", ri) - klog.ErrorS(err, "user selected one resource more than once", "resource", ri, "placement", placement) + klog.ErrorS(err, "User selected one resource more than once", "resource", ri, "placement", placementKey) return nil, controller.NewUserError(err) } resourceMap[ri] = true @@ -230,16 +230,16 @@ func buildApplyOrderMap() map[string]int { return ordering } -// fetchClusterScopedResources retrieves the objects based on the selector. -func (r *Reconciler) fetchClusterScopedResources(selector fleetv1beta1.ClusterResourceSelector, placeName string) ([]runtime.Object, error) { - klog.V(2).InfoS("start to fetch the cluster scoped resources by the selector", "selector", selector) +// fetchResources retrieves the objects based on the selector. +func (r *Reconciler) fetchResources(selector fleetv1beta1.ClusterResourceSelector, placementKey types.NamespacedName) ([]runtime.Object, error) { + klog.V(2).InfoS("Start to fetch resources by the selector", "selector", selector, "placement", placementKey) gk := schema.GroupKind{ Group: selector.Group, Kind: selector.Kind, } restMapping, err := r.RestMapper.RESTMapping(gk, selector.Version) if err != nil { - return nil, controller.NewUserError(fmt.Errorf("invalid placement %s, failed to get GVR of the selector: %w", placeName, err)) + return nil, controller.NewUserError(fmt.Errorf("invalid placement %s, failed to get GVR of the selector: %w", placementKey, err)) } gvr := restMapping.Resource gvk := schema.GroupVersionKind{ @@ -247,26 +247,47 @@ func (r *Reconciler) fetchClusterScopedResources(selector fleetv1beta1.ClusterRe Version: selector.Version, Kind: selector.Kind, } - if !r.InformerManager.IsClusterScopedResources(gvk) { - return nil, controller.NewUserError(fmt.Errorf("invalid placement %s: %+v is not a cluster scoped resource", placeName, restMapping.Resource)) + + isNamespacedResource := !r.InformerManager.IsClusterScopedResources(gvk) + if isNamespacedResource && placementKey.Namespace == "" { + // If it's a namespace-scoped resource but placement has no namespace, return error. + err := fmt.Errorf("invalid placement %s: cannot select namespace-scoped resource %v in a clusterResourcePlacement", placementKey, gvr) + klog.ErrorS(err, "Invalid resource selector", "selector", selector) + return nil, controller.NewUserError(err) + } else if !isNamespacedResource && placementKey.Namespace != "" { + // If it's a cluster-scoped resource but placement has a namespace, return error. + err := fmt.Errorf("invalid placement %s: cannot select cluster-scoped resource %v in a resourcePlacement", placementKey, gvr) + klog.ErrorS(err, "Invalid resource selector", "selector", selector) + return nil, controller.NewUserError(err) } + if !r.InformerManager.IsInformerSynced(gvr) { - return nil, controller.NewExpectedBehaviorError(fmt.Errorf("informer cache for %+v is not synced yet", restMapping.Resource)) + err := fmt.Errorf("informer cache for %+v is not synced yet", restMapping.Resource) + klog.ErrorS(err, "Informer cache is not synced", "gvr", gvr, "placement", placementKey) + return nil, controller.NewExpectedBehaviorError(err) } lister := r.InformerManager.Lister(gvr) + // TODO: validator should enforce the mutual exclusiveness between the `name` and `labelSelector` fields if len(selector.Name) != 0 { - obj, err := lister.Get(selector.Name) + var obj runtime.Object + var err error + + if isNamespacedResource { + obj, err = lister.ByNamespace(placementKey.Namespace).Get(selector.Name) + } else { + obj, err = lister.Get(selector.Name) + } + if err != nil { - klog.ErrorS(err, "cannot get the resource", "gvr", gvr, "name", selector.Name) + klog.ErrorS(err, "Cannot get the resource", "gvr", gvr, "name", selector.Name, "namespace", placementKey.Namespace) return nil, controller.NewAPIServerError(true, client.IgnoreNotFound(err)) } - uObj := obj.DeepCopyObject().(*unstructured.Unstructured) - if uObj.GetDeletionTimestamp() != nil { - // skip a to be deleted namespace - klog.V(2).InfoS("skip the deleting cluster scoped resources by the selector", - "selector", selector, "placeName", placeName, "resource name", uObj.GetName()) + if uObj := obj.DeepCopyObject().(*unstructured.Unstructured); uObj.GetDeletionTimestamp() != nil { + // skip a to be deleted resource + klog.V(2).InfoS("Skip the deleting resource by the selector", + "selector", selector, "placement", placementKey, "resourceName", uObj.GetName()) return []runtime.Object{}, nil } return []runtime.Object{obj}, nil @@ -282,18 +303,26 @@ func (r *Reconciler) fetchClusterScopedResources(selector fleetv1beta1.ClusterRe return nil, controller.NewUnexpectedBehaviorError(fmt.Errorf("cannot convert the label selector to a selector: %w", err)) } } + var selectedObjs []runtime.Object - objects, err := lister.List(labelSelector) + var objects []runtime.Object + + if isNamespacedResource { + objects, err = lister.ByNamespace(placementKey.Namespace).List(labelSelector) + } else { + objects, err = lister.List(labelSelector) + } if err != nil { - return nil, controller.NewAPIServerError(true, fmt.Errorf("cannot list all the objects: %w", err)) + klog.ErrorS(err, "Cannot list all the objects", "gvr", gvr, "labelSelector", labelSelector, "placement", placementKey) + return nil, controller.NewAPIServerError(true, err) } + // go ahead and claim all objects by adding a finalizer and insert the placement in its annotation for i := 0; i < len(objects); i++ { - uObj := objects[i].DeepCopyObject().(*unstructured.Unstructured) - if uObj.GetDeletionTimestamp() != nil { - // skip a to be deleted namespace - klog.V(2).InfoS("skip the deleting cluster scoped resources by the selector", - "selector", selector, "placeName", placeName, "resource name", uObj.GetName()) + if uObj := objects[i].DeepCopyObject().(*unstructured.Unstructured); uObj.GetDeletionTimestamp() != nil { + // skip a to be deleted resource + klog.V(2).InfoS("Skip the deleting resource by the selector", + "selector", selector, "placement", placementKey, "resourceName", uObj.GetName()) continue } selectedObjs = append(selectedObjs, objects[i]) @@ -330,7 +359,8 @@ func (r *Reconciler) fetchNamespaceResources(selector fleetv1beta1.ClusterResour } namespaces, err := r.InformerManager.Lister(utils.NamespaceGVR).List(labelSelector) if err != nil { - return nil, controller.NewAPIServerError(true, fmt.Errorf("cannot list all the namespaces given the label selector: %w", err)) + klog.ErrorS(err, "Cannot list all the namespaces by the label selector", "labelSelector", labelSelector, "placement", placeName) + return nil, controller.NewAPIServerError(true, err) } for _, namespace := range namespaces { @@ -384,10 +414,17 @@ func (r *Reconciler) fetchAllResourcesInOneNamespace(namespaceName string, place lister := r.InformerManager.Lister(gvr) objs, err := lister.ByNamespace(namespaceName).List(labels.Everything()) if err != nil { - return nil, controller.NewAPIServerError(true, fmt.Errorf("cannot list all the objects of type %+v in namespace %s: %w", gvr, namespaceName, err)) + klog.ErrorS(err, "Cannot list all the objects in namespace", "gvr", gvr, "namespace", namespaceName) + return nil, controller.NewAPIServerError(true, err) } for _, obj := range objs { uObj := obj.DeepCopyObject().(*unstructured.Unstructured) + if uObj.GetDeletionTimestamp() != nil { + // skip a to be deleted resource + klog.V(2).InfoS("skip the deleting resource by the selector", + "placeName", placeName, "namespace", namespaceName, "object", klog.KObj(uObj)) + continue + } shouldInclude, err := utils.ShouldPropagateObj(r.InformerManager, uObj) if err != nil { klog.ErrorS(err, "cannot determine if we should propagate an object", "object", klog.KObj(uObj)) @@ -520,8 +557,10 @@ func generateResourceContent(object *unstructured.Unstructured) (*fleetv1beta1.R // It also returns the number of envelope configmaps so the CRP controller can have the right expectation of the number of work objects. func (r *Reconciler) selectResourcesForPlacement(placementObj fleetv1beta1.PlacementObj) (int, []fleetv1beta1.ResourceContent, []fleetv1beta1.ResourceIdentifier, error) { envelopeObjCount := 0 - placementSpec := placementObj.GetPlacementSpec() - selectedObjects, err := r.gatherSelectedResource(placementObj.GetName(), placementSpec.ResourceSelectors) + selectedObjects, err := r.gatherSelectedResource(types.NamespacedName{ + Name: placementObj.GetName(), + Namespace: placementObj.GetNamespace(), + }, placementObj.GetPlacementSpec().ResourceSelectors) if err != nil { return 0, nil, nil, err } diff --git a/pkg/controllers/clusterresourceplacement/resource_selector_test.go b/pkg/controllers/clusterresourceplacement/resource_selector_test.go index bc17837a6..b42a50b8e 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector_test.go +++ b/pkg/controllers/clusterresourceplacement/resource_selector_test.go @@ -17,6 +17,7 @@ limitations under the License. package clusterresourceplacement import ( + "errors" "math/rand" "testing" "time" @@ -26,15 +27,21 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" utilrand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/utils/ptr" workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils" + "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" + testinformer "github.com/kubefleet-dev/kubefleet/test/utils/informer" ) func TestGenerateManifest(t *testing.T) { @@ -807,6 +814,877 @@ func createResourceContentForTest(t *testing.T, obj interface{}) *fleetv1beta1.R } } +func TestGatherSelectedResource(t *testing.T) { + // Common test deployment object used across multiple test cases. + testDeployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "test-ns", + }, + }, + } + testDeployment.SetGroupVersionKind(utils.DeploymentGVK) + + // Common test configmap object used across multiple test cases. + testConfigMap := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "test-configmap", + "namespace": "test-ns", + }, + }, + } + testConfigMap.SetGroupVersionKind(utils.ConfigMapGVK) + + kubeRootCAConfigMap := &unstructured.Unstructured{ // reserved configmap object + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "kube-root-ca.crt", + "namespace": "test-ns", + }, + }, + } + kubeRootCAConfigMap.SetGroupVersionKind(utils.ConfigMapGVK) + + // Common test deployment object in deleting state. + testDeletingDeployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deleting-deployment", + "namespace": "test-ns", + "deletionTimestamp": "2025-01-01T00:00:00Z", + "labels": map[string]interface{}{ + "tier": "api", + }, + }, + }, + } + testDeletingDeployment.SetGroupVersionKind(utils.DeploymentGVK) + + // Common test deployment with app=frontend label. + testFrontendDeployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "frontend-deployment", + "namespace": "test-ns", + "labels": map[string]interface{}{ + "app": "frontend", + "tier": "web", + }, + }, + }, + } + testFrontendDeployment.SetGroupVersionKind(utils.DeploymentGVK) + + // Common test deployment with app=backend label. + testBackendDeployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "backend-deployment", + "namespace": "test-ns", + "labels": map[string]interface{}{ + "app": "backend", + "tier": "api", + }, + }, + }, + } + testBackendDeployment.SetGroupVersionKind(utils.DeploymentGVK) + + // Common test namespace object (cluster-scoped). + testNamespace := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "test-ns", + "labels": map[string]interface{}{ + "environment": "test", + }, + }, + }, + } + testNamespace.SetGroupVersionKind(utils.NamespaceGVK) + + testDeletingNamespace := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "deleting-ns", + "labels": map[string]interface{}{ + "environment": "test", + }, + "deletionTimestamp": "2025-01-01T00:00:00Z", + }, + }, + } + testDeletingNamespace.SetGroupVersionKind(utils.NamespaceGVK) + + prodNamespace := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "prod-ns", + "labels": map[string]interface{}{ + "environment": "production", + }, + }, + }, + } + prodNamespace.SetGroupVersionKind(utils.NamespaceGVK) + + // Common test cluster role object (cluster-scoped). + testClusterRole := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRole", + "metadata": map[string]interface{}{ + "name": "test-cluster-role", + }, + }, + } + testClusterRole.SetGroupVersionKind(utils.ClusterRoleGVK) + + // Common test cluster role object #2 (cluster-scoped). + testClusterRole2 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRole", + "metadata": map[string]interface{}{ + "name": "test-cluster-role-2", + }, + }, + } + testClusterRole2.SetGroupVersionKind(utils.ClusterRoleGVK) + + kubeSystemNamespace := &unstructured.Unstructured{ // reserved namespace object + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "kube-system", + "labels": map[string]interface{}{ + "environment": "test", + }, + }, + }, + } + kubeSystemNamespace.SetGroupVersionKind(utils.NamespaceGVK) + + tests := []struct { + name string + placementName types.NamespacedName + selectors []fleetv1beta1.ClusterResourceSelector + resourceConfig *utils.ResourceConfig + informerManager *testinformer.FakeManager + want []*unstructured.Unstructured + wantError error + }{ + { + name: "should handle empty selectors", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ClusterResourceSelector{}, + want: nil, + }, + { + name: "should skip disabled resources", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(true), // deny list - empty means deny all + want: nil, + }, + { + name: "should return error for cluster-scoped resource", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-clusterrole", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{}, + }, + want: nil, + wantError: controller.ErrUserError, + }, + { + name: "should handle single resource selection successfully", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, + }, + } + }(), + want: []*unstructured.Unstructured{testDeployment}, + wantError: nil, + }, + { + name: "should return empty result when informer manager returns not found error", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: { + Objects: []runtime.Object{}, + Err: apierrors.NewNotFound(schema.GroupResource{Group: "apps", Resource: "deployments"}, "test-deployment"), + }, + }, + } + }(), + want: nil, // should return nil when informer returns not found error + }, + { + name: "should return error when informer manager returns non-NotFound error", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: { + Objects: []runtime.Object{}, + Err: errors.New("connection timeout"), + }, + }, + } + }(), + wantError: controller.ErrUnexpectedBehavior, + }, + { + name: "should return error using label selector when informer manager returns error", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: { + Objects: []runtime.Object{}, + Err: apierrors.NewNotFound(schema.GroupResource{Group: "apps", Resource: "deployments"}, "test-deployment"), + }, + }, + } + }(), + wantError: controller.ErrAPIServerError, + }, + { + name: "should return only non-deleting resources when mixed with deleting resources", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", // non-deleting deployment + }, + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deleting-deployment", // deleting deployment + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + }, + } + }(), + want: []*unstructured.Unstructured{testDeployment}, + wantError: nil, + }, + { + name: "should handle resource selection successfully by using label selector", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "frontend", + }, + }, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testFrontendDeployment, testBackendDeployment, testDeployment}}, + }, + } + }(), + want: []*unstructured.Unstructured{testFrontendDeployment}, + wantError: nil, + }, + { + name: "should handle label selector with MatchExpressions", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "tier", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"web", "api"}, + }, + }, + }, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testFrontendDeployment, testBackendDeployment, testDeployment, testDeletingDeployment}}, + }, + } + }(), + want: []*unstructured.Unstructured{testBackendDeployment, testFrontendDeployment}, // should return both deployments (order may vary) + wantError: nil, + }, + { + name: "should detect duplicate resources", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", // same deployment selected twice + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, + }, + } + }(), + wantError: controller.ErrUserError, + }, + { + name: "should sort resources according to apply order", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-configmap", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // Allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap}}, + }, + } + }(), + // ConfigMap should come first according to apply order. + want: []*unstructured.Unstructured{testConfigMap, testDeployment}, + }, + // tests for cluster-scoped placements + { + name: "should return error for namespace-scoped resource for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{}, + }, + want: nil, + wantError: controller.ErrUserError, + }, + { + name: "should sort resources for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + // Empty name means select all ClusterRoles (or use label selector). + }, + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // Allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.ClusterRoleGVR: {Objects: []runtime.Object{testClusterRole, testClusterRole2}}, + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace}}, + }, + } + }(), + // Namespace should come first according to apply order (namespace comes before ClusterRole). + // Both ClusterRoles should be included since we're selecting all ClusterRoles with empty name. + want: []*unstructured.Unstructured{testNamespace, testClusterRole, testClusterRole2}, + }, + { + name: "should select resources by name for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-cluster-role", + }, + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // Allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.ClusterRoleGVR: {Objects: []runtime.Object{testClusterRole, testClusterRole2}}, + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace}}, + }, + } + }(), + // Namespace should come first according to apply order (namespace comes before ClusterRole). + want: []*unstructured.Unstructured{testNamespace, testClusterRole}, + }, + { + name: "should select namespaces and its children resources by using label selector for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "environment": "test", + }, + }, + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // Should select only non-reserved namespaces with matching labels and their children resources + want: []*unstructured.Unstructured{testNamespace, testConfigMap, testDeployment}, + }, + { + name: "should skip the resource for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "environment": "test", + }, + }, + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: func() *utils.ResourceConfig { + cfg := utils.NewResourceConfig(false) + cfg.AddGroupVersionKind(utils.DeploymentGVK) + return cfg + }(), + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // should skip the deployment resource since it is not allowed by resource config + want: []*unstructured.Unstructured{testNamespace, testConfigMap}, + }, + { + name: "should select namespaces using nil label selector for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // Should select only non-reserved namespaces with matching labels and their children resources + want: []*unstructured.Unstructured{prodNamespace, testNamespace, testConfigMap, testDeployment}, + }, + { + name: "should return error when selecting a reserved namespace for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "environment": "test", + }, + }, + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace, kubeSystemNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + wantError: controller.ErrUserError, + }, + { + name: "should return empty result when informer manager returns not found error for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: { + Objects: []runtime.Object{}, + Err: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "namespaces"}, "test-ns"), + }, + }, + } + }(), + want: nil, // should return nil when informer returns not found error + }, + { + name: "should return error when informer manager returns non-NotFound error (getting namespace) for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: { + Objects: []runtime.Object{}, + Err: errors.New("connection timeout"), + }, + }, + } + }(), + wantError: controller.ErrUnexpectedBehavior, + }, + { + name: "should return error using label selector when informer manager returns error (getting namespace) for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: { + Objects: []runtime.Object{}, + Err: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "namespaces"}, "test-ns"), + }, + }, + } + }(), + wantError: controller.ErrAPIServerError, + }, + { + name: "should return error when informer manager returns non-NotFound error (getting deployment) for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // allow all resources + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace, kubeSystemNamespace}}, + utils.DeploymentGVR: { + Objects: []runtime.Object{}, + Err: errors.New("connection timeout"), + }, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR}, + } + }(), + wantError: controller.ErrUnexpectedBehavior, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &Reconciler{ + ResourceConfig: tt.resourceConfig, + InformerManager: tt.informerManager, + RestMapper: newFakeRESTMapper(), + } + + got, err := r.gatherSelectedResource(tt.placementName, tt.selectors) + if gotErr, wantErr := err != nil, tt.wantError != nil; gotErr != wantErr || !errors.Is(err, tt.wantError) { + t.Fatalf("gatherSelectedResource() = %v, want error %v", err, tt.wantError) + } + if tt.wantError != nil { + return + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("gatherSelectedResource() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +// fakeRESTMapper is a minimal RESTMapper implementation for testing +type fakeRESTMapper struct { + mappings map[schema.GroupKind]*meta.RESTMapping +} + +// newFakeRESTMapper creates a new fakeRESTMapper with default mappings +func newFakeRESTMapper() *fakeRESTMapper { + return &fakeRESTMapper{ + mappings: map[schema.GroupKind]*meta.RESTMapping{ + {Group: "", Kind: "Namespace"}: { + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}, + }, + {Group: "apps", Kind: "Deployment"}: { + Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, + }, + {Group: "", Kind: "ConfigMap"}: { + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, + }, + {Group: "", Kind: "Node"}: { + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"}, + }, + {Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}: { + Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"}, + }, + }, + } +} + +func (f *fakeRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if mapping, exists := f.mappings[gk]; exists { + return mapping, nil + } + return nil, errors.New("resource not found") +} + +func (f *fakeRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + mapping, err := f.RESTMapping(gk, versions...) + if err != nil { + return nil, err + } + return []*meta.RESTMapping{mapping}, nil +} + +func (f *fakeRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return input, nil +} + +func (f *fakeRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return []schema.GroupVersionResource{input}, nil +} + +func (f *fakeRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + switch { + case resource.Group == "" && resource.Resource == "namespaces": + return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}, nil + case resource.Group == "apps" && resource.Resource == "deployments": + return schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, nil + case resource.Group == "" && resource.Resource == "configmaps": + return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, nil + case resource.Group == "" && resource.Resource == "nodes": + return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}, nil + } + return schema.GroupVersionKind{}, errors.New("kind not found") +} + +func (f *fakeRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + kind, err := f.KindFor(resource) + if err != nil { + return nil, err + } + return []schema.GroupVersionKind{kind}, nil +} + +func (f *fakeRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + return resource, nil +} + func TestSortResources(t *testing.T) { // Create the ingressClass object ingressClass := &unstructured.Unstructured{ diff --git a/test/utils/informer/manager.go b/test/utils/informer/manager.go index 301357c78..0a201573f 100644 --- a/test/utils/informer/manager.go +++ b/test/utils/informer/manager.go @@ -35,11 +35,24 @@ type FakeLister struct { Err error } -func (f *FakeLister) List(_ labels.Selector) ([]runtime.Object, error) { +func (f *FakeLister) List(selector labels.Selector) ([]runtime.Object, error) { if f.Err != nil { return nil, f.Err } - return f.Objects, nil + + if selector == nil { + return f.Objects, nil + } + + var filtered []runtime.Object + for _, obj := range f.Objects { + if uObj, ok := obj.(*unstructured.Unstructured); ok { + if selector.Matches(labels.Set(uObj.GetLabels())) { + filtered = append(filtered, obj) + } + } + } + return filtered, nil } func (f *FakeLister) Get(name string) (runtime.Object, error) { @@ -65,11 +78,25 @@ type FakeNamespaceLister struct { Err error } -func (f *FakeNamespaceLister) List(_ labels.Selector) ([]runtime.Object, error) { +func (f *FakeNamespaceLister) List(selector labels.Selector) ([]runtime.Object, error) { if f.Err != nil { return nil, f.Err } - return f.Objects, nil + + var filtered []runtime.Object + for _, obj := range f.Objects { + if uObj, ok := obj.(*unstructured.Unstructured); ok { + // Filter by namespace first + if uObj.GetNamespace() != f.Namespace { + continue + } + // Then filter by label selector if provided + if selector == nil || selector.Matches(labels.Set(uObj.GetLabels())) { + filtered = append(filtered, obj) + } + } + } + return filtered, nil } func (f *FakeNamespaceLister) Get(name string) (runtime.Object, error) { @@ -77,7 +104,7 @@ func (f *FakeNamespaceLister) Get(name string) (runtime.Object, error) { return nil, f.Err } for _, obj := range f.Objects { - if obj.(*unstructured.Unstructured).GetName() == name { + if uObj := obj.(*unstructured.Unstructured); uObj.GetName() == name && uObj.GetNamespace() == f.Namespace { return obj, nil } } @@ -96,6 +123,8 @@ type FakeManager struct { IsClusterScopedResource bool // Listers provides fake listers for testing. Listers map[schema.GroupVersionResource]*FakeLister + // NamespaceScopedResources is the list of namespace-scoped resources for testing. + NamespaceScopedResources []schema.GroupVersionResource } func (m *FakeManager) AddDynamicResources(_ []informer.APIResourceMeta, _ cache.ResourceEventHandler, _ bool) { @@ -122,7 +151,7 @@ func (m *FakeManager) Lister(gvr schema.GroupVersionResource) cache.GenericListe } func (m *FakeManager) GetNameSpaceScopedResources() []schema.GroupVersionResource { - return nil + return m.NamespaceScopedResources } func (m *FakeManager) IsClusterScopedResources(gvk schema.GroupVersionKind) bool { From d26fb8a87b9c8fb0b0e8e0932eb3474dc26ebc4d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Aug 2025 13:49:10 -0700 Subject: [PATCH 06/38] chore: bump docker/login-action from 3.4.0 to 3.5.0 (#178) Bumps [docker/login-action](https://github.com/docker/login-action) from 3.4.0 to 3.5.0. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/74a5d142397b4f367a81961eba4e8cd7edddf772...184bdaa0721073962dff0199f1fb9940f07167d1) --- updated-dependencies: - dependency-name: docker/login-action dependency-version: 3.5.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/trivy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index ca5094c18..c060f2c4a 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -47,7 +47,7 @@ jobs: uses: actions/checkout@v4 - name: Login to ${{ env.REGISTRY }} - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} From bf27823b2212751d526b854abddedfc5ec5ef990 Mon Sep 17 00:00:00 2001 From: Zhiying Lin <54013513+zhiying-lin@users.noreply.github.com> Date: Thu, 14 Aug 2025 09:24:27 +0800 Subject: [PATCH 07/38] chore: upgrade go version to 1.24.6 (#185) Signed-off-by: Zhiying Lin --- .github/workflows/ci.yml | 2 +- .github/workflows/code-lint.yml | 2 +- .github/workflows/trivy.yml | 2 +- .github/workflows/upgrade.yml | 2 +- .golangci.yml | 2 +- docker/hub-agent.Dockerfile | 2 +- docker/member-agent.Dockerfile | 2 +- docker/refresh-token.Dockerfile | 2 +- go.mod | 4 +--- 9 files changed, 9 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d2f899c22..b5118c377 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ on: paths-ignore: [docs/**, "**.md", "**.mdx", "**.png", "**.jpg"] env: - GO_VERSION: '1.24.4' + GO_VERSION: '1.24.6' jobs: detect-noop: diff --git a/.github/workflows/code-lint.yml b/.github/workflows/code-lint.yml index 92d14e2a6..fb96b9838 100644 --- a/.github/workflows/code-lint.yml +++ b/.github/workflows/code-lint.yml @@ -14,7 +14,7 @@ on: env: # Common versions - GO_VERSION: '1.24.4' + GO_VERSION: '1.24.6' jobs: diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index c060f2c4a..2d8b51fb7 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -18,7 +18,7 @@ env: MEMBER_AGENT_IMAGE_NAME: member-agent REFRESH_TOKEN_IMAGE_NAME: refresh-token - GO_VERSION: '1.24.4' + GO_VERSION: '1.24.6' jobs: export-registry: diff --git a/.github/workflows/upgrade.yml b/.github/workflows/upgrade.yml index 925d17123..459abd416 100644 --- a/.github/workflows/upgrade.yml +++ b/.github/workflows/upgrade.yml @@ -17,7 +17,7 @@ on: paths-ignore: [docs/**, "**.md", "**.mdx", "**.png", "**.jpg"] env: - GO_VERSION: '1.24.4' + GO_VERSION: '1.24.6' jobs: detect-noop: diff --git a/.golangci.yml b/.golangci.yml index 43b16e201..e0919ecc4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,6 +1,6 @@ run: timeout: 15m - go: '1.24.4' + go: '1.24.6' linters-settings: stylecheck: diff --git a/docker/hub-agent.Dockerfile b/docker/hub-agent.Dockerfile index d674ce51f..90b1ed64c 100644 --- a/docker/hub-agent.Dockerfile +++ b/docker/hub-agent.Dockerfile @@ -1,5 +1,5 @@ # Build the hubagent binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.4 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.6 AS builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/docker/member-agent.Dockerfile b/docker/member-agent.Dockerfile index 2b35ad2a8..9d9e75b62 100644 --- a/docker/member-agent.Dockerfile +++ b/docker/member-agent.Dockerfile @@ -1,5 +1,5 @@ # Build the memberagent binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.4 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.6 AS builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/docker/refresh-token.Dockerfile b/docker/refresh-token.Dockerfile index 2f7e764c8..6bea5ceb1 100644 --- a/docker/refresh-token.Dockerfile +++ b/docker/refresh-token.Dockerfile @@ -1,5 +1,5 @@ # Build the hubagent binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.4 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.6 AS builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/go.mod b/go.mod index bc76bfb5a..73c79ca16 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module github.com/kubefleet-dev/kubefleet -go 1.24.4 - -toolchain go1.24.6 +go 1.24.6 require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 From 0a1e1daba43722ae2fd0b9181bff96837314959a Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Thu, 14 Aug 2025 01:20:43 -0700 Subject: [PATCH 08/38] feat: add the namespace only option in the resource selector (#187) --------- Signed-off-by: Ryan Zhang Co-authored-by: Ryan Zhang --- apis/placement/v1alpha1/override_types.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- .../v1beta1/clusterresourceplacement_types.go | 20 +- apis/placement/v1beta1/override_types.go | 2 +- .../v1beta1/zz_generated.deepcopy.go | 10 +- ...tes-fleet.io_clusterresourceoverrides.yaml | 24 +-- ...t.io_clusterresourceoverridesnapshots.yaml | 24 +-- ...es-fleet.io_clusterresourceplacements.yaml | 12 +- ...ubernetes-fleet.io_resourceplacements.yaml | 12 +- hack/loadtest/util/help.go | 2 +- .../controller_integration_test.go | 10 +- .../controller_test.go | 2 +- .../placement_status_test.go | 4 +- .../resource_selector.go | 20 +- .../resource_selector_test.go | 171 +++++++++++++----- .../controller_intergration_test.go | 6 +- .../watcher_integration_test.go | 4 +- ...terresource_controller_integration_test.go | 2 +- .../resourcechange_controller.go | 15 +- .../resourcechange_controller_test.go | 78 ++++---- .../rollout/controller_integration_test.go | 2 +- pkg/controllers/rollout/controller_test.go | 2 +- .../updaterun/controller_integration_test.go | 4 +- .../workgenerator/override_test.go | 4 +- pkg/controllers/workgenerator/suite_test.go | 4 +- .../controller_integration_test.go | 2 +- .../watchers/membercluster/suite_test.go | 2 +- pkg/utils/overrider/overrider_test.go | 32 ++-- .../validator/clusterresourceoverride.go | 4 +- .../validator/clusterresourceoverride_test.go | 48 ++--- .../clusterresourceplacement_test.go | 14 +- ...resourceplacement_mutating_webhook_test.go | 22 +-- ...sourceplacement_validating_webhook_test.go | 22 +-- ...isruptionbudget_validating_webhook_test.go | 6 +- ...acementeviction_validating_webhook_test.go | 6 +- .../api_validation_integration_test.go | 34 ++-- test/e2e/enveloped_object_placement_test.go | 2 +- test/e2e/join_and_leave_test.go | 2 +- .../e2e/placement_selecting_resources_test.go | 26 +-- test/e2e/placement_with_custom_config_test.go | 4 +- test/e2e/resources_test.go | 8 +- test/e2e/rollout_test.go | 4 +- test/e2e/utils_test.go | 2 +- test/e2e/webhook_test.go | 30 +-- test/scheduler/utils_test.go | 2 +- test/upgrade/before/resources_test.go | 4 +- 46 files changed, 395 insertions(+), 319 deletions(-) diff --git a/apis/placement/v1alpha1/override_types.go b/apis/placement/v1alpha1/override_types.go index 32c7a56f6..66053087c 100644 --- a/apis/placement/v1alpha1/override_types.go +++ b/apis/placement/v1alpha1/override_types.go @@ -62,7 +62,7 @@ type ClusterResourceOverrideSpec struct { // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=20 // +required - ClusterResourceSelectors []placementv1beta1.ClusterResourceSelector `json:"clusterResourceSelectors"` + ClusterResourceSelectors []placementv1beta1.ResourceSelectorTerm `json:"clusterResourceSelectors"` // Policy defines how to override the selected resources on the target clusters. // +required diff --git a/apis/placement/v1alpha1/zz_generated.deepcopy.go b/apis/placement/v1alpha1/zz_generated.deepcopy.go index ccbe37c26..df9f5e6d7 100644 --- a/apis/placement/v1alpha1/zz_generated.deepcopy.go +++ b/apis/placement/v1alpha1/zz_generated.deepcopy.go @@ -308,7 +308,7 @@ func (in *ClusterResourceOverrideSpec) DeepCopyInto(out *ClusterResourceOverride } if in.ClusterResourceSelectors != nil { in, out := &in.ClusterResourceSelectors, &out.ClusterResourceSelectors - *out = make([]v1beta1.ClusterResourceSelector, len(*in)) + *out = make([]v1beta1.ResourceSelectorTerm, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/apis/placement/v1beta1/clusterresourceplacement_types.go b/apis/placement/v1beta1/clusterresourceplacement_types.go index 31a1b1da7..86bbdeb0c 100644 --- a/apis/placement/v1beta1/clusterresourceplacement_types.go +++ b/apis/placement/v1beta1/clusterresourceplacement_types.go @@ -129,7 +129,7 @@ type PlacementSpec struct { // +kubebuilder:validation:Required // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=100 - ResourceSelectors []ClusterResourceSelector `json:"resourceSelectors"` + ResourceSelectors []ResourceSelectorTerm `json:"resourceSelectors"` // Policy defines how to select member clusters to place the selected resources. // If unspecified, all the joined member clusters are selected. @@ -170,33 +170,31 @@ func (p *PlacementSpec) Tolerations() []Toleration { return nil } -// TODO: rename this to ResourceSelectorTerm - -// ClusterResourceSelector is used to select resources as the target resources to be placed. +// ResourceSelectorTerm is used to select resources as the target resources to be placed. // All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. -type ClusterResourceSelector struct { - // Group name of the cluster-scoped resource. +type ResourceSelectorTerm struct { + // Group name of the be selected resource. // Use an empty string to select resources under the core API group (e.g., namespaces). // +kubebuilder:validation:Required Group string `json:"group"` - // Version of the cluster-scoped resource. + // Version of the to be selected resource. // +kubebuilder:validation:Required Version string `json:"version"` - // Kind of the cluster-scoped resource. + // Kind of the to be selected resource. // Note: When `Kind` is `namespace`, by default ALL the resources under the selected namespaces are selected. // +kubebuilder:validation:Required Kind string `json:"kind"` // You can only specify at most one of the following two fields: Name and LabelSelector. - // If none is specified, all the cluster-scoped resources with the given group, version and kind are selected. + // If none is specified, all the be selected resources with the given group, version and kind are selected. - // Name of the cluster-scoped resource. + // Name of the be selected resource. // +kubebuilder:validation:Optional Name string `json:"name,omitempty"` - // A label query over all the cluster-scoped resources. Resources matching the query are selected. + // A label query over all the be selected resources. Resources matching the query are selected. // Note that namespace-scoped resources can't be selected even if they match the query. // +kubebuilder:validation:Optional LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` diff --git a/apis/placement/v1beta1/override_types.go b/apis/placement/v1beta1/override_types.go index 2245219da..193ca790d 100644 --- a/apis/placement/v1beta1/override_types.go +++ b/apis/placement/v1beta1/override_types.go @@ -61,7 +61,7 @@ type ClusterResourceOverrideSpec struct { // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=20 // +required - ClusterResourceSelectors []ClusterResourceSelector `json:"clusterResourceSelectors"` + ClusterResourceSelectors []ResourceSelectorTerm `json:"clusterResourceSelectors"` // Policy defines how to override the selected resources on the target clusters. // +required diff --git a/apis/placement/v1beta1/zz_generated.deepcopy.go b/apis/placement/v1beta1/zz_generated.deepcopy.go index 72b50ef34..9758c3799 100644 --- a/apis/placement/v1beta1/zz_generated.deepcopy.go +++ b/apis/placement/v1beta1/zz_generated.deepcopy.go @@ -631,7 +631,7 @@ func (in *ClusterResourceOverrideSpec) DeepCopyInto(out *ClusterResourceOverride } if in.ClusterResourceSelectors != nil { in, out := &in.ClusterResourceSelectors, &out.ClusterResourceSelectors - *out = make([]ClusterResourceSelector, len(*in)) + *out = make([]ResourceSelectorTerm, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -888,7 +888,7 @@ func (in *ClusterResourcePlacementStatusList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterResourceSelector) DeepCopyInto(out *ClusterResourceSelector) { +func (in *ResourceSelectorTerm) DeepCopyInto(out *ResourceSelectorTerm) { *out = *in if in.LabelSelector != nil { in, out := &in.LabelSelector, &out.LabelSelector @@ -898,11 +898,11 @@ func (in *ClusterResourceSelector) DeepCopyInto(out *ClusterResourceSelector) { } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceSelector. -func (in *ClusterResourceSelector) DeepCopy() *ClusterResourceSelector { +func (in *ResourceSelectorTerm) DeepCopy() *ResourceSelectorTerm { if in == nil { return nil } - out := new(ClusterResourceSelector) + out := new(ResourceSelectorTerm) in.DeepCopyInto(out) return out } @@ -1666,7 +1666,7 @@ func (in *PlacementSpec) DeepCopyInto(out *PlacementSpec) { *out = *in if in.ResourceSelectors != nil { in, out := &in.ResourceSelectors, &out.ResourceSelectors - *out = make([]ClusterResourceSelector, len(*in)) + *out = make([]ResourceSelectorTerm, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml index dbeb5b4ca..cefcd7c12 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml @@ -404,22 +404,22 @@ spec: We only support Name selector for now. items: description: |- - ClusterResourceSelector is used to select resources as the target resources to be placed. + ResourceSelectorTerm is used to select resources as the target resources to be placed. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: description: |- - Group name of the cluster-scoped resource. + Group name of the be selected resource. Use an empty string to select resources under the core API group (e.g., namespaces). type: string kind: description: |- - Kind of the cluster-scoped resource. + Kind of the to be selected resource. Note: When `Kind` is `namespace`, by default ALL the resources under the selected namespaces are selected. type: string labelSelector: description: |- - A label query over all the cluster-scoped resources. Resources matching the query are selected. + A label query over all the be selected resources. Resources matching the query are selected. Note that namespace-scoped resources can't be selected even if they match the query. properties: matchExpressions: @@ -466,7 +466,7 @@ spec: type: object x-kubernetes-map-type: atomic name: - description: Name of the cluster-scoped resource. + description: Name of the be selected resource. type: string selectionScope: default: NamespaceWithResources @@ -477,7 +477,7 @@ spec: - NamespaceWithResources type: string version: - description: Version of the cluster-scoped resource. + description: Version of the to be selected resource. type: string required: - group @@ -777,22 +777,22 @@ spec: We only support Name selector for now. items: description: |- - ClusterResourceSelector is used to select resources as the target resources to be placed. + ResourceSelectorTerm is used to select resources as the target resources to be placed. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: description: |- - Group name of the cluster-scoped resource. + Group name of the be selected resource. Use an empty string to select resources under the core API group (e.g., namespaces). type: string kind: description: |- - Kind of the cluster-scoped resource. + Kind of the to be selected resource. Note: When `Kind` is `namespace`, by default ALL the resources under the selected namespaces are selected. type: string labelSelector: description: |- - A label query over all the cluster-scoped resources. Resources matching the query are selected. + A label query over all the be selected resources. Resources matching the query are selected. Note that namespace-scoped resources can't be selected even if they match the query. properties: matchExpressions: @@ -839,7 +839,7 @@ spec: type: object x-kubernetes-map-type: atomic name: - description: Name of the cluster-scoped resource. + description: Name of the be selected resource. type: string selectionScope: default: NamespaceWithResources @@ -850,7 +850,7 @@ spec: - NamespaceWithResources type: string version: - description: Version of the cluster-scoped resource. + description: Version of the to be selected resource. type: string required: - group diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml index 8cf473808..eb8e96b83 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml @@ -436,22 +436,22 @@ spec: We only support Name selector for now. items: description: |- - ClusterResourceSelector is used to select resources as the target resources to be placed. + ResourceSelectorTerm is used to select resources as the target resources to be placed. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: description: |- - Group name of the cluster-scoped resource. + Group name of the be selected resource. Use an empty string to select resources under the core API group (e.g., namespaces). type: string kind: description: |- - Kind of the cluster-scoped resource. + Kind of the to be selected resource. Note: When `Kind` is `namespace`, by default ALL the resources under the selected namespaces are selected. type: string labelSelector: description: |- - A label query over all the cluster-scoped resources. Resources matching the query are selected. + A label query over all the be selected resources. Resources matching the query are selected. Note that namespace-scoped resources can't be selected even if they match the query. properties: matchExpressions: @@ -498,7 +498,7 @@ spec: type: object x-kubernetes-map-type: atomic name: - description: Name of the cluster-scoped resource. + description: Name of the be selected resource. type: string selectionScope: default: NamespaceWithResources @@ -509,7 +509,7 @@ spec: - NamespaceWithResources type: string version: - description: Version of the cluster-scoped resource. + description: Version of the to be selected resource. type: string required: - group @@ -823,22 +823,22 @@ spec: We only support Name selector for now. items: description: |- - ClusterResourceSelector is used to select resources as the target resources to be placed. + ResourceSelectorTerm is used to select resources as the target resources to be placed. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: description: |- - Group name of the cluster-scoped resource. + Group name of the be selected resource. Use an empty string to select resources under the core API group (e.g., namespaces). type: string kind: description: |- - Kind of the cluster-scoped resource. + Kind of the to be selected resource. Note: When `Kind` is `namespace`, by default ALL the resources under the selected namespaces are selected. type: string labelSelector: description: |- - A label query over all the cluster-scoped resources. Resources matching the query are selected. + A label query over all the be selected resources. Resources matching the query are selected. Note that namespace-scoped resources can't be selected even if they match the query. properties: matchExpressions: @@ -885,7 +885,7 @@ spec: type: object x-kubernetes-map-type: atomic name: - description: Name of the cluster-scoped resource. + description: Name of the be selected resource. type: string selectionScope: default: NamespaceWithResources @@ -896,7 +896,7 @@ spec: - NamespaceWithResources type: string version: - description: Version of the cluster-scoped resource. + description: Version of the to be selected resource. type: string required: - group diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml index b2379cdf9..d7cb2bd4e 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml @@ -1589,22 +1589,22 @@ spec: You can have 1-100 selectors. items: description: |- - ClusterResourceSelector is used to select resources as the target resources to be placed. + ResourceSelectorTerm is used to select resources as the target resources to be placed. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: description: |- - Group name of the cluster-scoped resource. + Group name of the be selected resource. Use an empty string to select resources under the core API group (e.g., namespaces). type: string kind: description: |- - Kind of the cluster-scoped resource. + Kind of the to be selected resource. Note: When `Kind` is `namespace`, by default ALL the resources under the selected namespaces are selected. type: string labelSelector: description: |- - A label query over all the cluster-scoped resources. Resources matching the query are selected. + A label query over all the be selected resources. Resources matching the query are selected. Note that namespace-scoped resources can't be selected even if they match the query. properties: matchExpressions: @@ -1651,7 +1651,7 @@ spec: type: object x-kubernetes-map-type: atomic name: - description: Name of the cluster-scoped resource. + description: Name of the be selected resource. type: string selectionScope: default: NamespaceWithResources @@ -1662,7 +1662,7 @@ spec: - NamespaceWithResources type: string version: - description: Version of the cluster-scoped resource. + description: Version of the to be selected resource. type: string required: - group diff --git a/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml b/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml index 602567f8f..0583faa71 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml @@ -524,22 +524,22 @@ spec: You can have 1-100 selectors. items: description: |- - ClusterResourceSelector is used to select resources as the target resources to be placed. + ResourceSelectorTerm is used to select resources as the target resources to be placed. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: description: |- - Group name of the cluster-scoped resource. + Group name of the be selected resource. Use an empty string to select resources under the core API group (e.g., namespaces). type: string kind: description: |- - Kind of the cluster-scoped resource. + Kind of the to be selected resource. Note: When `Kind` is `namespace`, by default ALL the resources under the selected namespaces are selected. type: string labelSelector: description: |- - A label query over all the cluster-scoped resources. Resources matching the query are selected. + A label query over all the be selected resources. Resources matching the query are selected. Note that namespace-scoped resources can't be selected even if they match the query. properties: matchExpressions: @@ -586,7 +586,7 @@ spec: type: object x-kubernetes-map-type: atomic name: - description: Name of the cluster-scoped resource. + description: Name of the be selected resource. type: string selectionScope: default: NamespaceWithResources @@ -597,7 +597,7 @@ spec: - NamespaceWithResources type: string version: - description: Version of the cluster-scoped resource. + description: Version of the to be selected resource. type: string required: - group diff --git a/hack/loadtest/util/help.go b/hack/loadtest/util/help.go index b059dbecf..6d7d787b0 100644 --- a/hack/loadtest/util/help.go +++ b/hack/loadtest/util/help.go @@ -218,7 +218,7 @@ func createCRP(crp *v1beta1.ClusterResourcePlacement, crpFile string, crpName st crp.Name = crpName if useTestResources { - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, v1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, v1beta1.ResourceSelectorTerm{ Group: "", Version: "v1", Kind: "Namespace", diff --git a/pkg/controllers/clusterresourceplacement/controller_integration_test.go b/pkg/controllers/clusterresourceplacement/controller_integration_test.go index a43a3c511..0365135a0 100644 --- a/pkg/controllers/clusterresourceplacement/controller_integration_test.go +++ b/pkg/controllers/clusterresourceplacement/controller_integration_test.go @@ -388,7 +388,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Name: testCRPName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -976,7 +976,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { By("Update CRP spec to add another resource selector") gotCRP.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, - placementv1beta1.ClusterResourceSelector{ + placementv1beta1.ResourceSelectorTerm{ Group: corev1.GroupName, Version: "v1", Kind: "Namespace", @@ -1400,7 +1400,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Name: testCRPName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -1918,7 +1918,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Name: testCRPName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -1980,7 +1980,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Name: testCRPName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", diff --git a/pkg/controllers/clusterresourceplacement/controller_test.go b/pkg/controllers/clusterresourceplacement/controller_test.go index 7d32d67d8..f86ff540a 100644 --- a/pkg/controllers/clusterresourceplacement/controller_test.go +++ b/pkg/controllers/clusterresourceplacement/controller_test.go @@ -105,7 +105,7 @@ func clusterResourcePlacementForTest() *fleetv1beta1.ClusterResourcePlacement { Generation: placementGeneration, }, Spec: fleetv1beta1.PlacementSpec{ - ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ + ResourceSelectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", diff --git a/pkg/controllers/clusterresourceplacement/placement_status_test.go b/pkg/controllers/clusterresourceplacement/placement_status_test.go index 2c72501a8..fc7078dc5 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status_test.go +++ b/pkg/controllers/clusterresourceplacement/placement_status_test.go @@ -5968,7 +5968,7 @@ func TestSetPlacementStatusForClusterResourcePlacement(t *testing.T) { Name: testCRPName, }, Spec: fleetv1beta1.PlacementSpec{ - ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ + ResourceSelectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -6647,7 +6647,7 @@ func TestSetResourcePlacementStatus(t *testing.T) { Namespace: testRPNamespace, }, Spec: fleetv1beta1.PlacementSpec{ - ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ + ResourceSelectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", diff --git a/pkg/controllers/clusterresourceplacement/resource_selector.go b/pkg/controllers/clusterresourceplacement/resource_selector.go index 305dede86..a6fdd8de9 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector.go +++ b/pkg/controllers/clusterresourceplacement/resource_selector.go @@ -113,10 +113,10 @@ func (r *Reconciler) selectResources(placement *fleetv1alpha1.ClusterResourcePla // Note: temporary solution to share the same set of utils between v1alpha1 and v1beta1 APIs so that v1alpha1 implementation // won't be broken. v1alpha1 implementation should be removed when new API is ready. // The clusterResourceSelect has no changes between different versions. -func convertResourceSelector(old []fleetv1alpha1.ClusterResourceSelector) []fleetv1beta1.ClusterResourceSelector { - res := make([]fleetv1beta1.ClusterResourceSelector, len(old)) +func convertResourceSelector(old []fleetv1alpha1.ClusterResourceSelector) []fleetv1beta1.ResourceSelectorTerm { + res := make([]fleetv1beta1.ResourceSelectorTerm, len(old)) for i, item := range old { - res[i] = fleetv1beta1.ClusterResourceSelector{ + res[i] = fleetv1beta1.ResourceSelectorTerm{ Group: item.Group, Version: item.Version, Kind: item.Kind, @@ -129,7 +129,7 @@ func convertResourceSelector(old []fleetv1alpha1.ClusterResourceSelector) []flee } // gatherSelectedResource gets all the resources according to the resource selector. -func (r *Reconciler) gatherSelectedResource(placementKey types.NamespacedName, selectors []fleetv1beta1.ClusterResourceSelector) ([]*unstructured.Unstructured, error) { +func (r *Reconciler) gatherSelectedResource(placementKey types.NamespacedName, selectors []fleetv1beta1.ResourceSelectorTerm) ([]*unstructured.Unstructured, error) { var resources []*unstructured.Unstructured var resourceMap = make(map[fleetv1beta1.ResourceIdentifier]bool) for _, selector := range selectors { @@ -145,7 +145,7 @@ func (r *Reconciler) gatherSelectedResource(placementKey types.NamespacedName, s } var objs []runtime.Object var err error - if gvk == utils.NamespaceGVK && placementKey.Namespace == "" { + if gvk == utils.NamespaceGVK && placementKey.Namespace == "" && selector.SelectionScope != fleetv1beta1.NamespaceOnly { objs, err = r.fetchNamespaceResources(selector, placementKey.Name) } else { objs, err = r.fetchResources(selector, placementKey) @@ -231,7 +231,7 @@ func buildApplyOrderMap() map[string]int { } // fetchResources retrieves the objects based on the selector. -func (r *Reconciler) fetchResources(selector fleetv1beta1.ClusterResourceSelector, placementKey types.NamespacedName) ([]runtime.Object, error) { +func (r *Reconciler) fetchResources(selector fleetv1beta1.ResourceSelectorTerm, placementKey types.NamespacedName) ([]runtime.Object, error) { klog.V(2).InfoS("Start to fetch resources by the selector", "selector", selector, "placement", placementKey) gk := schema.GroupKind{ Group: selector.Group, @@ -332,13 +332,13 @@ func (r *Reconciler) fetchResources(selector fleetv1beta1.ClusterResourceSelecto } // fetchNamespaceResources retrieves all the objects for a ResourceSelectorTerm that is for namespace. -func (r *Reconciler) fetchNamespaceResources(selector fleetv1beta1.ClusterResourceSelector, placeName string) ([]runtime.Object, error) { +func (r *Reconciler) fetchNamespaceResources(selector fleetv1beta1.ResourceSelectorTerm, placementName string) ([]runtime.Object, error) { klog.V(2).InfoS("start to fetch the namespace resources by the selector", "selector", selector) var resources []runtime.Object if len(selector.Name) != 0 { // just a single namespace - objs, err := r.fetchAllResourcesInOneNamespace(selector.Name, placeName) + objs, err := r.fetchAllResourcesInOneNamespace(selector.Name, placementName) if err != nil { klog.ErrorS(err, "failed to fetch all the selected resource in a namespace", "namespace", selector.Name) return nil, err @@ -359,7 +359,7 @@ func (r *Reconciler) fetchNamespaceResources(selector fleetv1beta1.ClusterResour } namespaces, err := r.InformerManager.Lister(utils.NamespaceGVR).List(labelSelector) if err != nil { - klog.ErrorS(err, "Cannot list all the namespaces by the label selector", "labelSelector", labelSelector, "placement", placeName) + klog.ErrorS(err, "Cannot list all the namespaces by the label selector", "labelSelector", labelSelector, "placement", placementName) return nil, controller.NewAPIServerError(true, err) } @@ -368,7 +368,7 @@ func (r *Reconciler) fetchNamespaceResources(selector fleetv1beta1.ClusterResour if err != nil { return nil, controller.NewUnexpectedBehaviorError(fmt.Errorf("cannot get the name of a namespace object: %w", err)) } - objs, err := r.fetchAllResourcesInOneNamespace(ns.GetName(), placeName) + objs, err := r.fetchAllResourcesInOneNamespace(ns.GetName(), placementName) if err != nil { klog.ErrorS(err, "failed to fetch all the selected resource in a namespace", "namespace", ns.GetName()) return nil, err diff --git a/pkg/controllers/clusterresourceplacement/resource_selector_test.go b/pkg/controllers/clusterresourceplacement/resource_selector_test.go index b42a50b8e..428d7b58f 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector_test.go +++ b/pkg/controllers/clusterresourceplacement/resource_selector_test.go @@ -989,7 +989,7 @@ func TestGatherSelectedResource(t *testing.T) { tests := []struct { name string placementName types.NamespacedName - selectors []fleetv1beta1.ClusterResourceSelector + selectors []fleetv1beta1.ResourceSelectorTerm resourceConfig *utils.ResourceConfig informerManager *testinformer.FakeManager want []*unstructured.Unstructured @@ -998,13 +998,13 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should handle empty selectors", placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ClusterResourceSelector{}, + selectors: []fleetv1beta1.ResourceSelectorTerm{}, want: nil, }, { name: "should skip disabled resources", placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1012,13 +1012,13 @@ func TestGatherSelectedResource(t *testing.T) { Name: "test-deployment", }, }, - resourceConfig: utils.NewResourceConfig(true), // deny list - empty means deny all + resourceConfig: utils.NewResourceConfig(true), // make this allow list - nothing is allowed want: nil, }, { name: "should return error for cluster-scoped resource", placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -1026,7 +1026,7 @@ func TestGatherSelectedResource(t *testing.T) { Name: "test-clusterrole", }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: &testinformer.FakeManager{ IsClusterScopedResource: false, Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{}, @@ -1037,7 +1037,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should handle single resource selection successfully", placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1045,7 +1045,7 @@ func TestGatherSelectedResource(t *testing.T) { Name: "test-deployment", }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: true, @@ -1060,7 +1060,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should return empty result when informer manager returns not found error", placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1068,7 +1068,7 @@ func TestGatherSelectedResource(t *testing.T) { Name: "test-deployment", }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: true, @@ -1085,7 +1085,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should return error when informer manager returns non-NotFound error", placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1093,7 +1093,7 @@ func TestGatherSelectedResource(t *testing.T) { Name: "test-deployment", }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: true, @@ -1110,14 +1110,14 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should return error using label selector when informer manager returns error", placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", Kind: "Deployment", }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: true, @@ -1134,7 +1134,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should return only non-deleting resources when mixed with deleting resources", placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1148,7 +1148,7 @@ func TestGatherSelectedResource(t *testing.T) { Name: "test-deleting-deployment", // deleting deployment }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: true, @@ -1163,7 +1163,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should handle resource selection successfully by using label selector", placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1175,7 +1175,7 @@ func TestGatherSelectedResource(t *testing.T) { }, }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: true, @@ -1190,7 +1190,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should handle label selector with MatchExpressions", placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1206,7 +1206,7 @@ func TestGatherSelectedResource(t *testing.T) { }, }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: true, @@ -1221,7 +1221,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should detect duplicate resources", placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1235,7 +1235,7 @@ func TestGatherSelectedResource(t *testing.T) { Name: "test-deployment", // same deployment selected twice }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: true, @@ -1249,7 +1249,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should sort resources according to apply order", placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1263,7 +1263,7 @@ func TestGatherSelectedResource(t *testing.T) { Name: "test-configmap", }, }, - resourceConfig: utils.NewResourceConfig(false), // Allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: true, @@ -1280,7 +1280,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should return error for namespace-scoped resource for cluster scoped placement", placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1288,7 +1288,7 @@ func TestGatherSelectedResource(t *testing.T) { Name: "test-deployment", }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: &testinformer.FakeManager{ IsClusterScopedResource: true, Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{}, @@ -1299,7 +1299,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should sort resources for cluster scoped placement", placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -1313,7 +1313,7 @@ func TestGatherSelectedResource(t *testing.T) { Name: "test-ns", }, }, - resourceConfig: utils.NewResourceConfig(false), // Allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: false, @@ -1330,7 +1330,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should select resources by name for cluster scoped placement", placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -1344,7 +1344,7 @@ func TestGatherSelectedResource(t *testing.T) { Name: "test-ns", }, }, - resourceConfig: utils.NewResourceConfig(false), // Allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: false, @@ -1360,7 +1360,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should select namespaces and its children resources by using label selector for cluster scoped placement", placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1373,7 +1373,7 @@ func TestGatherSelectedResource(t *testing.T) { SelectionScope: fleetv1beta1.NamespaceWithResources, }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: false, @@ -1391,7 +1391,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should skip the resource for cluster scoped placement", placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1426,7 +1426,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should select namespaces using nil label selector for cluster scoped placement", placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1434,7 +1434,7 @@ func TestGatherSelectedResource(t *testing.T) { SelectionScope: fleetv1beta1.NamespaceWithResources, }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: false, @@ -1446,13 +1446,92 @@ func TestGatherSelectedResource(t *testing.T) { NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, } }(), - // Should select only non-reserved namespaces with matching labels and their children resources + // Should select only non-reserved namespaces with matching labels and their child resources want: []*unstructured.Unstructured{prodNamespace, testNamespace, testConfigMap, testDeployment}, }, + { + name: "should select only namespaces for namespace only scope for a namespace", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceOnly, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // Should select only the namespace with name "test-ns" and none of its child resources + want: []*unstructured.Unstructured{testNamespace}, + }, + { + name: "should select only namespaces for namespace only scope for namespaces with labels", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + SelectionScope: fleetv1beta1.NamespaceOnly, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // Should select only non-deleting namespaces with matching labels and none of their child resources + want: []*unstructured.Unstructured{prodNamespace, testNamespace}, + }, + { + name: "should return error if a resourceplacement selects namespaces even for namespace only scope", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceOnly, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + wantError: controller.ErrUserError, + }, { name: "should return error when selecting a reserved namespace for cluster scoped placement", placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1465,7 +1544,7 @@ func TestGatherSelectedResource(t *testing.T) { SelectionScope: fleetv1beta1.NamespaceWithResources, }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: false, @@ -1482,7 +1561,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should return empty result when informer manager returns not found error for cluster scoped placement", placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1491,7 +1570,7 @@ func TestGatherSelectedResource(t *testing.T) { SelectionScope: fleetv1beta1.NamespaceWithResources, }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: true, @@ -1508,7 +1587,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should return error when informer manager returns non-NotFound error (getting namespace) for cluster scoped placement", placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1517,7 +1596,7 @@ func TestGatherSelectedResource(t *testing.T) { SelectionScope: fleetv1beta1.NamespaceWithResources, }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: true, @@ -1534,7 +1613,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should return error using label selector when informer manager returns error (getting namespace) for cluster scoped placement", placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1542,7 +1621,7 @@ func TestGatherSelectedResource(t *testing.T) { SelectionScope: fleetv1beta1.NamespaceWithResources, }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: true, @@ -1559,7 +1638,7 @@ func TestGatherSelectedResource(t *testing.T) { { name: "should return error when informer manager returns non-NotFound error (getting deployment) for cluster scoped placement", placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ClusterResourceSelector{ + selectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1568,7 +1647,7 @@ func TestGatherSelectedResource(t *testing.T) { SelectionScope: fleetv1beta1.NamespaceWithResources, }, }, - resourceConfig: utils.NewResourceConfig(false), // allow all resources + resourceConfig: utils.NewResourceConfig(false), // default deny list informerManager: func() *testinformer.FakeManager { return &testinformer.FakeManager{ IsClusterScopedResource: true, diff --git a/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go b/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go index 85f0c33d3..5b1bae995 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go @@ -492,7 +492,7 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { PlacementType: placementv1beta1.PickFixedPlacementType, ClusterNames: []string{"test-cluster-1"}, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -537,7 +537,7 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { // Create the CRP. By("Create ClusterResourcePlacement", func() { crp := buildTestPickAllCRP(crpName) - crp.Spec.ResourceSelectors = []placementv1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -709,7 +709,7 @@ func buildTestPickNCRP(crpName string, clusterCount int32) placementv1beta1.Clus PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(clusterCount), }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", diff --git a/pkg/controllers/clusterresourceplacementwatcher/watcher_integration_test.go b/pkg/controllers/clusterresourceplacementwatcher/watcher_integration_test.go index df2ca53ca..01bda4f34 100644 --- a/pkg/controllers/clusterresourceplacementwatcher/watcher_integration_test.go +++ b/pkg/controllers/clusterresourceplacementwatcher/watcher_integration_test.go @@ -45,7 +45,7 @@ func clusterResourcePlacementForTest() *fleetv1beta1.ClusterResourcePlacement { Name: testCRPName, }, Spec: fleetv1beta1.PlacementSpec{ - ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ + ResourceSelectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -67,7 +67,7 @@ func resourcePlacementForTest() *fleetv1beta1.ResourcePlacement { Namespace: testNamespace, }, Spec: fleetv1beta1.PlacementSpec{ - ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ + ResourceSelectors: []fleetv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", diff --git a/pkg/controllers/overrider/clusterresource_controller_integration_test.go b/pkg/controllers/overrider/clusterresource_controller_integration_test.go index 9e1b41808..cdc67a3cb 100644 --- a/pkg/controllers/overrider/clusterresource_controller_integration_test.go +++ b/pkg/controllers/overrider/clusterresource_controller_integration_test.go @@ -35,7 +35,7 @@ import ( func getClusterResourceOverrideSpec() placementv1beta1.ClusterResourceOverrideSpec { return placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", diff --git a/pkg/controllers/resourcechange/resourcechange_controller.go b/pkg/controllers/resourcechange/resourcechange_controller.go index 32935d4be..3738bb8dc 100644 --- a/pkg/controllers/resourcechange/resourcechange_controller.go +++ b/pkg/controllers/resourcechange/resourcechange_controller.go @@ -115,22 +115,21 @@ func (r *Reconciler) handleDeletedResource(key keys.ClusterWideKey, isClusterSco } // handleUpdatedResourceForClusterResourcePlacement handles the updated resource for cluster resource placement. -func (r *Reconciler) handleUpdatedResourceForClusterResourcePlacement(key keys.ClusterWideKey, clusterObj runtime.Object, isClusterScoped bool) error { +func (r *Reconciler) handleUpdatedResourceForClusterResourcePlacement(key keys.ClusterWideKey, obj runtime.Object, isClusterScoped bool) error { if isClusterScoped { klog.V(2).InfoS("Find clusterResourcePlacement that selects the cluster scoped object", "obj", key) - return r.triggerAffectedPlacementsForUpdatedRes(key, clusterObj.(*unstructured.Unstructured), true) + return r.triggerAffectedPlacementsForUpdatedRes(key, obj.(*unstructured.Unstructured), true) } klog.V(2).InfoS("Find namespace that contains the namespace scoped object", "obj", key) // we will use the parent namespace object to search for the affected placements - var err error - clusterObj, err = r.InformerManager.Lister(utils.NamespaceGVR).Get(key.Namespace) + nsObj, err := r.InformerManager.Lister(utils.NamespaceGVR).Get(key.Namespace) if err != nil { klog.ErrorS(err, "Failed to find the namespace the resource belongs to", "obj", key) return client.IgnoreNotFound(err) } klog.V(2).InfoS("Find clusterResourcePlacement that selects the namespace", "obj", key) - if err := r.triggerAffectedPlacementsForUpdatedRes(key, clusterObj.(*unstructured.Unstructured), true); err != nil { + if err := r.triggerAffectedPlacementsForUpdatedRes(key, nsObj.(*unstructured.Unstructured), true); err != nil { klog.ErrorS(err, "Failed to trigger affected placements for updated cluster resource", "obj", key) return err } @@ -405,7 +404,7 @@ func collectAllAffectedPlacementsV1Alpha1(res *unstructured.Unstructured, crpLis return placements } -func isSelectNamespaceOnly(selector placementv1beta1.ClusterResourceSelector) bool { +func isSelectNamespaceOnly(selector placementv1beta1.ResourceSelectorTerm) bool { return selector.Group == "" && selector.Version == "v1" && selector.Kind == "Namespace" && selector.SelectionScope == placementv1beta1.NamespaceOnly } @@ -491,7 +490,7 @@ func matchSelectorGVKV1Alpha1(targetGVK schema.GroupVersionKind, selector fleetv selector.Kind == targetGVK.Kind } -func matchSelectorGVKV1Beta1(targetGVK schema.GroupVersionKind, selector placementv1beta1.ClusterResourceSelector) bool { +func matchSelectorGVKV1Beta1(targetGVK schema.GroupVersionKind, selector placementv1beta1.ResourceSelectorTerm) bool { return selector.Group == targetGVK.Group && selector.Version == targetGVK.Version && selector.Kind == targetGVK.Kind } @@ -506,7 +505,7 @@ func matchSelectorLabelSelectorV1Alpha1(targetLabels map[string]string, selector return s.Matches(labels.Set(targetLabels)) } -func matchSelectorLabelSelectorV1Beta1(targetLabels map[string]string, selector placementv1beta1.ClusterResourceSelector) bool { +func matchSelectorLabelSelectorV1Beta1(targetLabels map[string]string, selector placementv1beta1.ResourceSelectorTerm) bool { if selector.LabelSelector == nil { // if the labelselector not set, it means select all return true diff --git a/pkg/controllers/resourcechange/resourcechange_controller_test.go b/pkg/controllers/resourcechange/resourcechange_controller_test.go index a5c9abd5a..7f31f548c 100644 --- a/pkg/controllers/resourcechange/resourcechange_controller_test.go +++ b/pkg/controllers/resourcechange/resourcechange_controller_test.go @@ -692,7 +692,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -715,7 +715,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -740,7 +740,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, }, }, }, @@ -755,7 +755,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -776,7 +776,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -804,7 +804,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -827,7 +827,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -854,7 +854,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -890,7 +890,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -928,7 +928,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing }, Spec: placementv1beta1.PlacementSpec{ // the mis-matching resource selector - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -971,7 +971,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -1007,7 +1007,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -1028,7 +1028,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -1049,7 +1049,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing }, Spec: placementv1beta1.PlacementSpec{ // Selector that does not match the resource - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -1086,7 +1086,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing Name: "resource-not-selected", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -1156,7 +1156,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1182,7 +1182,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, }, }, }, @@ -1197,7 +1197,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1220,7 +1220,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1242,7 +1242,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1279,7 +1279,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1300,7 +1300,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1328,7 +1328,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { }, Spec: placementv1beta1.PlacementSpec{ // Selector that does not match the resource - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -1439,7 +1439,7 @@ func TestHandleUpdatedResource(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1460,7 +1460,7 @@ func TestHandleUpdatedResource(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -2109,7 +2109,7 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -2130,7 +2130,7 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -2247,7 +2247,7 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Version: "v1", @@ -2334,7 +2334,7 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { Name: "test-crp-2", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -2409,7 +2409,7 @@ func TestTriggerAffectedPlacementsForUpdatedRes(t *testing.T) { Name: "crp-namespace-only", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -2700,7 +2700,7 @@ func TestHandleDeletedResource(t *testing.T) { Name: "crp-namespace-only", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -2771,11 +2771,11 @@ func TestHandleDeletedResource(t *testing.T) { func TestIsSelectNamespaceOnly(t *testing.T) { tests := map[string]struct { - selector placementv1beta1.ClusterResourceSelector + selector placementv1beta1.ResourceSelectorTerm want bool }{ "namespace with namespace only scope": { - selector: placementv1beta1.ClusterResourceSelector{ + selector: placementv1beta1.ResourceSelectorTerm{ Group: "", Version: "v1", Kind: "Namespace", @@ -2784,7 +2784,7 @@ func TestIsSelectNamespaceOnly(t *testing.T) { want: true, }, "namespace with namespace with resources scope": { - selector: placementv1beta1.ClusterResourceSelector{ + selector: placementv1beta1.ResourceSelectorTerm{ Group: "", Version: "v1", Kind: "Namespace", @@ -2793,7 +2793,7 @@ func TestIsSelectNamespaceOnly(t *testing.T) { want: false, }, "configmap with namespace only scope": { - selector: placementv1beta1.ClusterResourceSelector{ + selector: placementv1beta1.ResourceSelectorTerm{ Group: "", Version: "v1", Kind: "ConfigMap", @@ -2802,7 +2802,7 @@ func TestIsSelectNamespaceOnly(t *testing.T) { want: false, }, "deployment with namespace only scope": { - selector: placementv1beta1.ClusterResourceSelector{ + selector: placementv1beta1.ResourceSelectorTerm{ Group: "apps", Version: "v1", Kind: "Deployment", @@ -2811,7 +2811,7 @@ func TestIsSelectNamespaceOnly(t *testing.T) { want: false, }, "namespace with wrong group": { - selector: placementv1beta1.ClusterResourceSelector{ + selector: placementv1beta1.ResourceSelectorTerm{ Group: "core", Version: "v1", Kind: "Namespace", @@ -2820,7 +2820,7 @@ func TestIsSelectNamespaceOnly(t *testing.T) { want: false, }, "namespace with wrong version": { - selector: placementv1beta1.ClusterResourceSelector{ + selector: placementv1beta1.ResourceSelectorTerm{ Group: "", Version: "v2", Kind: "Namespace", @@ -2829,7 +2829,7 @@ func TestIsSelectNamespaceOnly(t *testing.T) { want: false, }, "namespace with default selection scope (NamespaceWithResources)": { - selector: placementv1beta1.ClusterResourceSelector{ + selector: placementv1beta1.ResourceSelectorTerm{ Group: "", Version: "v1", Kind: "Namespace", diff --git a/pkg/controllers/rollout/controller_integration_test.go b/pkg/controllers/rollout/controller_integration_test.go index f92f276c8..359458fc2 100644 --- a/pkg/controllers/rollout/controller_integration_test.go +++ b/pkg/controllers/rollout/controller_integration_test.go @@ -1547,7 +1547,7 @@ func resourcePlacementForTest(namespace, rpName string, policy *placementv1beta1 Namespace: namespace, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "v1", Version: "v1", diff --git a/pkg/controllers/rollout/controller_test.go b/pkg/controllers/rollout/controller_test.go index a8a540885..34277f03f 100644 --- a/pkg/controllers/rollout/controller_test.go +++ b/pkg/controllers/rollout/controller_test.go @@ -2613,7 +2613,7 @@ func clusterResourcePlacementForTest(crpName string, policy *placementv1beta1.Pl Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", diff --git a/pkg/controllers/updaterun/controller_integration_test.go b/pkg/controllers/updaterun/controller_integration_test.go index 1fe57deca..35da5d6e4 100644 --- a/pkg/controllers/updaterun/controller_integration_test.go +++ b/pkg/controllers/updaterun/controller_integration_test.go @@ -350,7 +350,7 @@ func generateTestClusterResourcePlacement() *placementv1beta1.ClusterResourcePla Name: testCRPName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -607,7 +607,7 @@ func generateTestClusterResourceOverride() *placementv1beta1.ClusterResourceOver }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", diff --git a/pkg/controllers/workgenerator/override_test.go b/pkg/controllers/workgenerator/override_test.go index 9268af092..a1e9ed8f2 100644 --- a/pkg/controllers/workgenerator/override_test.go +++ b/pkg/controllers/workgenerator/override_test.go @@ -61,7 +61,7 @@ func TestFetchClusterResourceOverrideSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -87,7 +87,7 @@ func TestFetchClusterResourceOverrideSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", diff --git a/pkg/controllers/workgenerator/suite_test.go b/pkg/controllers/workgenerator/suite_test.go index b43a7ae30..a7b751b3b 100644 --- a/pkg/controllers/workgenerator/suite_test.go +++ b/pkg/controllers/workgenerator/suite_test.go @@ -176,7 +176,7 @@ func createOverrides() { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: utils.NamespaceGVK.Group, Version: utils.NamespaceGVK.Version, @@ -265,7 +265,7 @@ func createOverrides() { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: utils.NamespaceGVK.Group, Version: utils.NamespaceGVK.Version, diff --git a/pkg/scheduler/watchers/clusterresourceplacement/controller_integration_test.go b/pkg/scheduler/watchers/clusterresourceplacement/controller_integration_test.go index cbfb7985a..b5113286d 100644 --- a/pkg/scheduler/watchers/clusterresourceplacement/controller_integration_test.go +++ b/pkg/scheduler/watchers/clusterresourceplacement/controller_integration_test.go @@ -48,7 +48,7 @@ const ( ) var ( - resourceSelectors = []fleetv1beta1.ClusterResourceSelector{ + resourceSelectors = []fleetv1beta1.ResourceSelectorTerm{ { Group: "core", Kind: "Namespace", diff --git a/pkg/scheduler/watchers/membercluster/suite_test.go b/pkg/scheduler/watchers/membercluster/suite_test.go index 0071a8d53..d351b4270 100644 --- a/pkg/scheduler/watchers/membercluster/suite_test.go +++ b/pkg/scheduler/watchers/membercluster/suite_test.go @@ -51,7 +51,7 @@ var ( ) var ( - defaultResourceSelectors = []placementv1beta1.ClusterResourceSelector{ + defaultResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ { Group: "core", Kind: "Namespace", diff --git a/pkg/utils/overrider/overrider_test.go b/pkg/utils/overrider/overrider_test.go index b076eca49..de0c68ffa 100644 --- a/pkg/utils/overrider/overrider_test.go +++ b/pkg/utils/overrider/overrider_test.go @@ -146,7 +146,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -213,7 +213,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -258,7 +258,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -322,7 +322,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -425,7 +425,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -445,7 +445,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -517,7 +517,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -651,7 +651,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { Placement: &placementv1beta1.PlacementRef{ Name: crpName, }, - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -671,7 +671,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -746,7 +746,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -844,7 +844,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -864,7 +864,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -930,7 +930,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -950,7 +950,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1075,7 +1075,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -1098,7 +1098,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { Placement: &placementv1beta1.PlacementRef{ Name: "other-placement", }, - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", diff --git a/pkg/utils/validator/clusterresourceoverride.go b/pkg/utils/validator/clusterresourceoverride.go index a05f24e9e..327718654 100644 --- a/pkg/utils/validator/clusterresourceoverride.go +++ b/pkg/utils/validator/clusterresourceoverride.go @@ -51,7 +51,7 @@ func ValidateClusterResourceOverride(cro placementv1beta1.ClusterResourceOverrid // validateClusterResourceSelectors checks if override is selecting resource by name. func validateClusterResourceSelectors(cro placementv1beta1.ClusterResourceOverride) error { - selectorMap := make(map[placementv1beta1.ClusterResourceSelector]bool) + selectorMap := make(map[placementv1beta1.ResourceSelectorTerm]bool) allErr := make([]error, 0) for _, selector := range cro.Spec.ClusterResourceSelectors { // Check if the resource is not being selected by label selector @@ -79,7 +79,7 @@ func validateClusterResourceOverrideResourceLimit(cro placementv1beta1.ClusterRe if croList == nil || len(croList.Items) == 0 { return nil } - overrideMap := make(map[placementv1beta1.ClusterResourceSelector]string) + overrideMap := make(map[placementv1beta1.ResourceSelectorTerm]string) // Add overrides and its selectors to the map for _, override := range croList.Items { selectors := override.Spec.ClusterResourceSelectors diff --git a/pkg/utils/validator/clusterresourceoverride_test.go b/pkg/utils/validator/clusterresourceoverride_test.go index 32cf650a9..c79267720 100644 --- a/pkg/utils/validator/clusterresourceoverride_test.go +++ b/pkg/utils/validator/clusterresourceoverride_test.go @@ -21,7 +21,7 @@ func TestValidateClusterResourceSelectors(t *testing.T) { "resource selected by label selector": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -40,7 +40,7 @@ func TestValidateClusterResourceSelectors(t *testing.T) { "resource selected by empty name": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -55,7 +55,7 @@ func TestValidateClusterResourceSelectors(t *testing.T) { "duplicate resources selected": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -72,12 +72,12 @@ func TestValidateClusterResourceSelectors(t *testing.T) { }, }, wantErrMsg: fmt.Errorf("resource selector %+v already exists, and must be unique", - placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "Kind", Name: "example"}), + placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "Kind", Name: "example"}), }, "resource selected by name": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -92,7 +92,7 @@ func TestValidateClusterResourceSelectors(t *testing.T) { "multiple invalid resources selected": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -124,9 +124,9 @@ func TestValidateClusterResourceSelectors(t *testing.T) { }, }, }, - wantErrMsg: apierrors.NewAggregate([]error{fmt.Errorf("label selector is not supported for resource selection %+v", placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "Kind", LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"key": "value"}}}), - fmt.Errorf("resource name is required for resource selection %+v", placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "Kind", Name: ""}), - fmt.Errorf("resource selector %+v already exists, and must be unique", placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "Kind", Name: "example"})}), + wantErrMsg: apierrors.NewAggregate([]error{fmt.Errorf("label selector is not supported for resource selection %+v", placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "Kind", LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"key": "value"}}}), + fmt.Errorf("resource name is required for resource selection %+v", placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "Kind", Name: ""}), + fmt.Errorf("resource selector %+v already exists, and must be unique", placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "Kind", Name: "example"})}), }, } for testName, tt := range tests { @@ -155,7 +155,7 @@ func TestValidateClusterResourceOverrideResourceLimit(t *testing.T) { Name: "override-1", }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -174,7 +174,7 @@ func TestValidateClusterResourceOverrideResourceLimit(t *testing.T) { Name: "override-2", }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -192,7 +192,7 @@ func TestValidateClusterResourceOverrideResourceLimit(t *testing.T) { }, overrideCount: 1, wantErrMsg: fmt.Errorf("invalid resource selector %+v: the resource has been selected by both %v and %v, which is not supported", - placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "kind", Name: "example-0"}, "override-2", "override-0"), + placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "kind", Name: "example-0"}, "override-2", "override-0"), }, "one override, which exists": { cro: placementv1beta1.ClusterResourceOverride{ @@ -200,7 +200,7 @@ func TestValidateClusterResourceOverrideResourceLimit(t *testing.T) { Name: "override-1", }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -219,7 +219,7 @@ func TestValidateClusterResourceOverrideResourceLimit(t *testing.T) { Name: "override-2", }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -242,7 +242,7 @@ func TestValidateClusterResourceOverrideResourceLimit(t *testing.T) { Name: fmt.Sprintf("override-%d", i), }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -311,7 +311,7 @@ func TestValidateClusterResourceOverride(t *testing.T) { "valid cluster resource override": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -328,7 +328,7 @@ func TestValidateClusterResourceOverride(t *testing.T) { "invalid cluster resource override - fail validateResourceSelector": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -357,9 +357,9 @@ func TestValidateClusterResourceOverride(t *testing.T) { }, croList: &placementv1beta1.ClusterResourceOverrideList{}, wantErrMsg: apierrors.NewAggregate([]error{fmt.Errorf("resource selector %+v already exists, and must be unique", - placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "kind", Name: "example"}), + placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "kind", Name: "example"}), fmt.Errorf("label selector is not supported for resource selection %+v", - placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "kind", + placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "kind", LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"key": "value"}}})}), }, "invalid cluster resource override - fail ValidateClusterResourceOverrideResourceLimit": { @@ -368,7 +368,7 @@ func TestValidateClusterResourceOverride(t *testing.T) { Name: "override-1", }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -384,7 +384,7 @@ func TestValidateClusterResourceOverride(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{Name: "override-0"}, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "group", Version: "v1", @@ -398,12 +398,12 @@ func TestValidateClusterResourceOverride(t *testing.T) { }, }, wantErrMsg: fmt.Errorf("invalid resource selector %+v: the resource has been selected by both %v and %v, which is not supported", - placementv1beta1.ClusterResourceSelector{Group: "group", Version: "v1", Kind: "kind", Name: "duplicate-example"}, "override-1", "override-0"), + placementv1beta1.ResourceSelectorTerm{Group: "group", Version: "v1", Kind: "kind", Name: "duplicate-example"}, "override-1", "override-0"), }, "valid cluster resource override - empty croList": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -420,7 +420,7 @@ func TestValidateClusterResourceOverride(t *testing.T) { "valid cluster resource override - croList nil": { cro: placementv1beta1.ClusterResourceOverride{ Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", diff --git a/pkg/utils/validator/clusterresourceplacement_test.go b/pkg/utils/validator/clusterresourceplacement_test.go index 912dbf6e6..a3cbab952 100644 --- a/pkg/utils/validator/clusterresourceplacement_test.go +++ b/pkg/utils/validator/clusterresourceplacement_test.go @@ -35,7 +35,7 @@ import ( var ( positiveNumberOfClusters int32 = 1 negativeNumberOfClusters int32 = -1 - resourceSelector = placementv1beta1.ClusterResourceSelector{ + resourceSelector = placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole", @@ -222,7 +222,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, }, @@ -239,7 +239,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { Name: "test-crp-with-very-long-name-field-exceeding-DNS1035LabelMaxLength", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, }, @@ -257,7 +257,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -285,7 +285,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -305,7 +305,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Kind: "Deployment", @@ -327,7 +327,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, }, }, resourceInformer: nil, diff --git a/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_mutating_webhook_test.go b/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_mutating_webhook_test.go index e032ce0d6..344d5e54e 100644 --- a/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_mutating_webhook_test.go +++ b/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_mutating_webhook_test.go @@ -46,7 +46,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-no-revisionhistory", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -73,7 +73,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-no-policy", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, // Policy omitted Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -98,7 +98,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-no-strategy", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -112,7 +112,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-no-apply-strategy", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -134,7 +134,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-no-serverside-apply-config", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -162,7 +162,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-no-rolling-update-config", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, ClusterNames: []string{"cluster1", "cluster2"}, @@ -186,7 +186,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-no-toleration-operator", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, Tolerations: []placementv1beta1.Toleration{ @@ -215,7 +215,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-topology-spread-constraints", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, TopologySpreadConstraints: []placementv1beta1.TopologySpreadConstraint{ @@ -243,7 +243,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-all-fields", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(3)), @@ -298,7 +298,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-update-missing", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, // Policy change is immutable NumberOfClusters: ptr.To(int32(3)), @@ -313,7 +313,7 @@ func TestMutatingHandle(t *testing.T) { Name: "test-crp-update-change-field", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(5)), // Changed from 3 to 5 diff --git a/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook_test.go b/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook_test.go index 377bc8467..30909093d 100644 --- a/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook_test.go +++ b/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook_test.go @@ -25,7 +25,7 @@ import ( ) var ( - resourceSelector = placementv1beta1.ClusterResourceSelector{ + resourceSelector = placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole", @@ -44,7 +44,7 @@ func TestHandle(t *testing.T) { Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -64,7 +64,7 @@ func TestHandle(t *testing.T) { Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -85,7 +85,7 @@ func TestHandle(t *testing.T) { Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -104,7 +104,7 @@ func TestHandle(t *testing.T) { Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -123,7 +123,7 @@ func TestHandle(t *testing.T) { Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, }, @@ -135,7 +135,7 @@ func TestHandle(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, Tolerations: []placementv1beta1.Toleration{ @@ -160,7 +160,7 @@ func TestHandle(t *testing.T) { Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -180,7 +180,7 @@ func TestHandle(t *testing.T) { Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -197,7 +197,7 @@ func TestHandle(t *testing.T) { Finalizers: []string{placementv1beta1.PlacementCleanupFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -216,7 +216,7 @@ func TestHandle(t *testing.T) { PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(2)), }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, }, diff --git a/pkg/webhook/clusterresourceplacementdisruptionbudget/clusterresourceplacementdisruptionbudget_validating_webhook_test.go b/pkg/webhook/clusterresourceplacementdisruptionbudget/clusterresourceplacementdisruptionbudget_validating_webhook_test.go index 912ee3467..9e480f661 100644 --- a/pkg/webhook/clusterresourceplacementdisruptionbudget/clusterresourceplacementdisruptionbudget_validating_webhook_test.go +++ b/pkg/webhook/clusterresourceplacementdisruptionbudget/clusterresourceplacementdisruptionbudget_validating_webhook_test.go @@ -121,7 +121,7 @@ func TestHandle(t *testing.T) { Name: "pick-all-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -132,7 +132,7 @@ func TestHandle(t *testing.T) { Name: "crp-pickn", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(1)), @@ -144,7 +144,7 @@ func TestHandle(t *testing.T) { Name: "crp-pickfixed", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, ClusterNames: []string{"cluster1", "cluster2"}, diff --git a/pkg/webhook/clusterresourceplacementeviction/clusterresourceplacementeviction_validating_webhook_test.go b/pkg/webhook/clusterresourceplacementeviction/clusterresourceplacementeviction_validating_webhook_test.go index 3f449eeb3..6fbeab0e6 100644 --- a/pkg/webhook/clusterresourceplacementeviction/clusterresourceplacementeviction_validating_webhook_test.go +++ b/pkg/webhook/clusterresourceplacementeviction/clusterresourceplacementeviction_validating_webhook_test.go @@ -84,7 +84,7 @@ func TestHandle(t *testing.T) { Name: "test-crp", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -99,7 +99,7 @@ func TestHandle(t *testing.T) { Finalizers: []string{placementv1beta1.PlacementCleanupFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -110,7 +110,7 @@ func TestHandle(t *testing.T) { Name: "crp-pickfixed", }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, ClusterNames: []string{"cluster1", "cluster2"}, diff --git a/test/apis/placement/v1beta1/api_validation_integration_test.go b/test/apis/placement/v1beta1/api_validation_integration_test.go index 0144c804b..0ea1323dc 100644 --- a/test/apis/placement/v1beta1/api_validation_integration_test.go +++ b/test/apis/placement/v1beta1/api_validation_integration_test.go @@ -58,7 +58,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -112,7 +112,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -132,7 +132,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -164,7 +164,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -202,7 +202,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -245,7 +245,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -280,7 +280,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -314,7 +314,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: crpName, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -337,7 +337,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should allow update of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible, one namespace plus other cluster-scoped resources", func() { - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -356,7 +356,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should allow update of ClusterResourcePlacement with StatusReportingScope ClusterScopeOnly, multiple namespace selectors", func() { - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -381,7 +381,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should allow update of ClusterResourcePlacement with default StatusReportingScope, multiple namespace selectors", func() { - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -405,7 +405,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible and multiple namespace selectors", func() { - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -427,7 +427,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible, no namespace selectors", func() { - crp.Spec.ResourceSelectors = []placementv1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -1041,7 +1041,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1079,7 +1079,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: "test-placement", Scope: placementv1beta1.ClusterScoped, }, - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1116,7 +1116,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Placement: &placementv1beta1.PlacementRef{ Name: "test-placement", }, - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", @@ -1156,7 +1156,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: "test-placement", Scope: placementv1beta1.NamespaceScoped, }, - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index a0eb2b94f..4b54368b7 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -79,7 +79,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", diff --git a/test/e2e/join_and_leave_test.go b/test/e2e/join_and_leave_test.go index 6510c00e8..e31fe1cbd 100644 --- a/test/e2e/join_and_leave_test.go +++ b/test/e2e/join_and_leave_test.go @@ -91,7 +91,7 @@ var _ = Describe("Test member cluster join and leave flow", Label("joinleave"), Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", diff --git a/test/e2e/placement_selecting_resources_test.go b/test/e2e/placement_selecting_resources_test.go index 7bd8ec6a2..ffd923aa7 100644 --- a/test/e2e/placement_selecting_resources_test.go +++ b/test/e2e/placement_selecting_resources_test.go @@ -114,7 +114,7 @@ var _ = Describe("creating CRP and selecting resources by label", Ordered, func( Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -178,7 +178,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become selected a Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -264,7 +264,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become unselected Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -620,7 +620,7 @@ var _ = Describe("validating CRP when selecting a reserved resource", Ordered, f Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -702,7 +702,7 @@ var _ = Describe("When creating a pickN ClusterResourcePlacement with duplicated Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -753,7 +753,7 @@ var _ = Describe("When creating a pickN ClusterResourcePlacement with duplicated It("updating the CRP to select one namespace", func() { gotCRP := &placementv1beta1.ClusterResourcePlacement{} Expect(hubClient.Get(ctx, types.NamespacedName{Name: crpName}, gotCRP)).Should(Succeed(), "Failed to get CRP %s", crpName) - gotCRP.Spec.ResourceSelectors = []placementv1beta1.ClusterResourceSelector{ + gotCRP.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ { Group: corev1.GroupName, Version: "v1", @@ -941,7 +941,7 @@ var _ = Describe("validating CRP when placing cluster scope resource (other than Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Kind: "ClusterRole", @@ -1041,7 +1041,7 @@ var _ = Describe("validating CRP revision history allowing single revision when Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -1078,7 +1078,7 @@ var _ = Describe("validating CRP revision history allowing single revision when return err } - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, placementv1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, placementv1beta1.ResourceSelectorTerm{ Group: "", Kind: "Namespace", Version: "v1", @@ -1135,7 +1135,7 @@ var _ = Describe("validating CRP revision history allowing multiple revisions wh Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -1171,7 +1171,7 @@ var _ = Describe("validating CRP revision history allowing multiple revisions wh return err } - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, placementv1beta1.ClusterResourceSelector{ + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, placementv1beta1.ResourceSelectorTerm{ Group: "", Kind: "Namespace", Version: "v1", @@ -1232,7 +1232,7 @@ var _ = Describe("validating CRP when selected resources cross the 1MB limit", O PlacementType: placementv1beta1.PickFixedPlacementType, ClusterNames: []string{memberCluster1EastProdName, memberCluster2EastCanaryName}, }, - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -1367,7 +1367,7 @@ var _ = Describe("creating CRP and checking selected resources order", Ordered, Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", diff --git a/test/e2e/placement_with_custom_config_test.go b/test/e2e/placement_with_custom_config_test.go index 137ea255a..22b15bb2a 100644 --- a/test/e2e/placement_with_custom_config_test.go +++ b/test/e2e/placement_with_custom_config_test.go @@ -54,7 +54,7 @@ var _ = Describe("validating CRP when using customized resourceSnapshotCreationM Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -165,7 +165,7 @@ var _ = Describe("validating that CRP status can be updated after updating the r Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", diff --git a/test/e2e/resources_test.go b/test/e2e/resources_test.go index a4de8d4ec..7b61238db 100644 --- a/test/e2e/resources_test.go +++ b/test/e2e/resources_test.go @@ -54,8 +54,8 @@ const ( workNamespaceLabelName = "process" ) -func workResourceSelector() []placementv1beta1.ClusterResourceSelector { - return []placementv1beta1.ClusterResourceSelector{ +func workResourceSelector() []placementv1beta1.ResourceSelectorTerm { + return []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", @@ -76,8 +76,8 @@ func configMapSelector() []placementv1beta1.ResourceSelector { } } -func invalidWorkResourceSelector() []placementv1beta1.ClusterResourceSelector { - return []placementv1beta1.ClusterResourceSelector{ +func invalidWorkResourceSelector() []placementv1beta1.ResourceSelectorTerm { + return []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", diff --git a/test/e2e/rollout_test.go b/test/e2e/rollout_test.go index 270b43fb3..82b9a19fc 100644 --- a/test/e2e/rollout_test.go +++ b/test/e2e/rollout_test.go @@ -771,7 +771,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { It("create the CRP that select the namespace and CRD", func() { crp = buildCRPForSafeRollout() - crdClusterResourceSelector := placementv1beta1.ClusterResourceSelector{ + crdClusterResourceSelector := placementv1beta1.ResourceSelectorTerm{ Group: utils.CRDMetaGVK.Group, Kind: utils.CRDMetaGVK.Kind, Version: utils.CRDMetaGVK.Version, @@ -914,7 +914,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { It("create the CRP that select the namespace and CRD", func() { crp = buildCRPForSafeRollout() - crdClusterResourceSelector := placementv1beta1.ClusterResourceSelector{ + crdClusterResourceSelector := placementv1beta1.ResourceSelectorTerm{ Group: utils.CRDMetaGVK.Group, Kind: utils.CRDMetaGVK.Kind, Version: utils.CRDMetaGVK.Version, diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index d4325b7c8..1728a07d5 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -962,7 +962,7 @@ func createClusterResourceOverrides(number int) { Name: fmt.Sprintf(croNameTemplate, i), }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", diff --git a/test/e2e/webhook_test.go b/test/e2e/webhook_test.go index 3bae0ce62..724742c84 100644 --- a/test/e2e/webhook_test.go +++ b/test/e2e/webhook_test.go @@ -141,7 +141,7 @@ var _ = Describe("webhook tests for CRP CREATE operations", func() { Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "InvalidNamespace", @@ -171,7 +171,7 @@ var _ = Describe("webhook tests for CRP CREATE operations", func() { Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", Kind: "Deployment", @@ -814,7 +814,7 @@ var _ = Describe("webhook tests for MC taints", Ordered, func() { var _ = Describe("webhook tests for ClusterResourceOverride CREATE operations", func() { croName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) - selector := placementv1beta1.ClusterResourceSelector{ + selector := placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Version: "v1", @@ -854,7 +854,7 @@ var _ = Describe("webhook tests for ClusterResourceOverride CREATE operations", It("should deny create CRO with invalid resource selection ", func() { Consistently(func(g Gomega) error { - invalidSelector := placementv1beta1.ClusterResourceSelector{ + invalidSelector := placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Version: "v1", @@ -863,7 +863,7 @@ var _ = Describe("webhook tests for ClusterResourceOverride CREATE operations", }, SelectionScope: placementv1beta1.NamespaceWithResources, } - invalidSelector1 := placementv1beta1.ClusterResourceSelector{ + invalidSelector1 := placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Version: "v1", @@ -875,7 +875,7 @@ var _ = Describe("webhook tests for ClusterResourceOverride CREATE operations", Name: croName, }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ invalidSelector, selector, selector, invalidSelector1, }, Policy: policy, @@ -913,7 +913,7 @@ var _ = Describe("webhook tests for ClusterResourceOverride CREATE operation lim Name: "test-cro-101", }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", @@ -965,7 +965,7 @@ var _ = Describe("webhook tests for ClusterResourceOverride CREATE operation lim var _ = Describe("webhook tests for ClusterResourceOverride CREATE operations resource selection limitations", Ordered, Serial, func() { croName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) - selector := placementv1beta1.ClusterResourceSelector{ + selector := placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Version: "v1", @@ -979,7 +979,7 @@ var _ = Describe("webhook tests for ClusterResourceOverride CREATE operations re Name: croName, }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ selector, }, Policy: &placementv1beta1.OverridePolicy{ @@ -1029,7 +1029,7 @@ var _ = Describe("webhook tests for ClusterResourceOverride CREATE operations re Name: fmt.Sprintf("test-cro-%d", GinkgoParallelProcess()), }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ selector, }, Policy: &placementv1beta1.OverridePolicy{ @@ -1098,7 +1098,7 @@ var _ = Describe("webhook tests for CRO UPDATE operations", Ordered, func() { Name: croName, }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", @@ -1139,7 +1139,7 @@ var _ = Describe("webhook tests for CRO UPDATE operations", Ordered, func() { Eventually(func(g Gomega) error { var cro placementv1beta1.ClusterResourceOverride g.Expect(hubClient.Get(ctx, types.NamespacedName{Name: croName}, &cro)).Should(Succeed()) - invalidSelector := placementv1beta1.ClusterResourceSelector{ + invalidSelector := placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Version: "v1", @@ -1148,7 +1148,7 @@ var _ = Describe("webhook tests for CRO UPDATE operations", Ordered, func() { }, SelectionScope: placementv1beta1.NamespaceWithResources, } - invalidSelector1 := placementv1beta1.ClusterResourceSelector{ + invalidSelector1 := placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Version: "v1", @@ -1179,7 +1179,7 @@ var _ = Describe("webhook tests for CRO UPDATE operations", Ordered, func() { Name: cro1Name, }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", @@ -1207,7 +1207,7 @@ var _ = Describe("webhook tests for CRO UPDATE operations", Ordered, func() { Expect(hubClient.Create(ctx, cro1)).To(Succeed(), "Failed to create CRO %s", cro1.Name) var cro placementv1beta1.ClusterResourceOverride g.Expect(hubClient.Get(ctx, types.NamespacedName{Name: croName}, &cro)).Should(Succeed()) - selector := placementv1beta1.ClusterResourceSelector{ + selector := placementv1beta1.ResourceSelectorTerm{ Group: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Version: "v1", diff --git a/test/scheduler/utils_test.go b/test/scheduler/utils_test.go index af57f2f69..318ea0f15 100644 --- a/test/scheduler/utils_test.go +++ b/test/scheduler/utils_test.go @@ -70,7 +70,7 @@ var ( // by any controller (the scheduler cares only about policy snapshots and manipulates // bindings accordingly), it is safe for all suites to select the same set of resources // (which is not even provisioned in the environment). - defaultResourceSelectors = []placementv1beta1.ClusterResourceSelector{ + defaultResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ { Group: "core", Kind: "Namespace", diff --git a/test/upgrade/before/resources_test.go b/test/upgrade/before/resources_test.go index acf96bf19..f7b7053eb 100644 --- a/test/upgrade/before/resources_test.go +++ b/test/upgrade/before/resources_test.go @@ -27,8 +27,8 @@ const ( workNamespaceLabelName = "target-test-spec" ) -func workResourceSelector(workNamespaceName string) []placementv1beta1.ClusterResourceSelector { - return []placementv1beta1.ClusterResourceSelector{ +func workResourceSelector(workNamespaceName string) []placementv1beta1.ResourceSelectorTerm { + return []placementv1beta1.ResourceSelectorTerm{ { Group: "", Kind: "Namespace", From 5009332121366df6f4c3a0fdfe564b5866950976 Mon Sep 17 00:00:00 2001 From: Wantong Date: Thu, 14 Aug 2025 01:21:57 -0700 Subject: [PATCH 09/38] test: add override test in rollout integration (#182) --------- Signed-off-by: Wantong Jiang --- Makefile | 3 +- .../rollout/controller_integration_test.go | 376 +++++++++++++++++- pkg/controllers/rollout/suite_test.go | 23 +- 3 files changed, 392 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index 9b3dceb00..8c7a8e190 100644 --- a/Makefile +++ b/Makefile @@ -135,13 +135,14 @@ test: manifests generate fmt vet local-unit-test integration-test## Run tests. ## ## workaround to bypass the pkg/controllers/workv1alpha1 tests failure +## rollout controller tests need a bit longer to complete, so we increase the timeout ## .PHONY: local-unit-test local-unit-test: $(ENVTEST) ## Run tests. export CGO_ENABLED=1 && \ export KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" && \ go test ./pkg/controllers/workv1alpha1 -race -coverprofile=ut-coverage.xml -covermode=atomic -v && \ - go test `go list ./pkg/... ./cmd/... | grep -v pkg/controllers/workv1alpha1` -race -coverpkg=./... -coverprofile=ut-coverage.xml -covermode=atomic -v + go test `go list ./pkg/... ./cmd/... | grep -v pkg/controllers/workv1alpha1` -race -coverpkg=./... -coverprofile=ut-coverage.xml -covermode=atomic -v -timeout=20m .PHONY: integration-test integration-test: $(ENVTEST) ## Run tests. diff --git a/pkg/controllers/rollout/controller_integration_test.go b/pkg/controllers/rollout/controller_integration_test.go index 359458fc2..48df5d274 100644 --- a/pkg/controllers/rollout/controller_integration_test.go +++ b/pkg/controllers/rollout/controller_integration_test.go @@ -25,6 +25,8 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -32,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" + clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/utils" "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" @@ -44,7 +47,7 @@ const ( consistentTimeout = time.Second * 60 consistentInterval = time.Second * 5 customBindingFinalizer = "custom-binding-finalizer" - testNamespace = "test-namespace" + testNamespace = "app" // to align with the test resources in rollout/manifests ) var ( @@ -314,6 +317,95 @@ var _ = Describe("Test the rollout Controller", func() { }, timeout, interval).Should(Succeed(), "Failed to verify that all the bindings have their status refreshed") }) + It("should trigger binding rollout for clusterResourceOverrideSnapshot but not resourceOverrideSnapshot with Namespaced scope", func() { + // Create a CRP. + targetClusterCount := int32(2) + rolloutCRP = clusterResourcePlacementForTest( + testCRPName, + createPlacementPolicyForTest(placementv1beta1.PickNPlacementType, targetClusterCount), + createPlacementRolloutStrategyForTest(placementv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)) + Expect(k8sClient.Create(ctx, rolloutCRP)).Should(Succeed(), "Failed to create CRP") + + // Create a master cluster resource snapshot. + resourceSnapshot := generateClusterResourceSnapshot(rolloutCRP.Name, 0, true) + Expect(k8sClient.Create(ctx, resourceSnapshot)).Should(Succeed(), "Failed to create cluster resource snapshot") + + // Create bindings. + clusters := make([]string, targetClusterCount) + for i := 0; i < int(targetClusterCount); i++ { + clusters[i] = "cluster-" + utils.RandStr() + binding := generateClusterResourceBinding(placementv1beta1.BindingStateScheduled, resourceSnapshot.Name, clusters[i]) + Expect(k8sClient.Create(ctx, binding)).Should(Succeed(), "Failed to create cluster resource binding") + bindings = append(bindings, binding) + + memberCluster := generateMemberCluster(i, clusters[i]) + Expect(k8sClient.Create(ctx, memberCluster)).Should(Succeed(), "Failed to create member cluster") + } + + // Verify that all the bindings are rolled out initially. + verifyBindingsRolledOut(controller.ConvertCRBArrayToBindingObjs(bindings), resourceSnapshot, timeout) + + // Mark the bindings to be available. + for _, binding := range bindings { + markBindingAvailable(binding, true) + } + + // Create a resourceOverrideSnapshot with the same placement name but Namespaced scope and verify bindings are not updated. + testROName1 := "ro" + utils.RandStr() + resourceOverrideSnapshot1 := generateResourceOverrideSnapshot(testROName1, testCRPName, placementv1beta1.NamespaceScoped) + By(fmt.Sprintf("Creating resourceOverrideSnapshot %s to refer a resourcePlacement", resourceOverrideSnapshot1.Name)) + Expect(k8sClient.Create(ctx, resourceOverrideSnapshot1)).Should(Succeed(), "Failed to create resource override snapshot") + + // Verify bindings are NOT updated (rollout not triggered) by resourceOverrideSnapshot. + verifyBindingsNotUpdatedWithOverridesConsistently(controller.ConvertCRBArrayToBindingObjs(bindings), nil, nil) + + By(fmt.Sprintf("Updating resourceOverrideSnapshot %s to refer the clusterResourcePlacement instead", resourceOverrideSnapshot1.Name)) + resourceOverrideSnapshot1.Spec.OverrideSpec.Placement.Scope = placementv1beta1.ClusterScoped + Expect(k8sClient.Update(ctx, resourceOverrideSnapshot1)).Should(Succeed(), "Failed to update resource override snapshot") + + // Verify bindings are NOT updated (rollout not triggered) by resourceOverrideSnapshot. + // This is because rollout controller is not triggered by overrideSnapshot update events. + verifyBindingsNotUpdatedWithOverridesConsistently(controller.ConvertCRBArrayToBindingObjs(bindings), nil, nil) + + // Create a clusterResourceOverrideSnapshot and verify it triggers rollout. + testCROName := "cro" + utils.RandStr() + clusterResourceOverrideSnapshot := generateClusterResourceOverrideSnapshot(testCROName, testCRPName) + By(fmt.Sprintf("Creating clusterResourceOverrideSnapshot %s to refer the clusterResourcePlacement", clusterResourceOverrideSnapshot.Name)) + Expect(k8sClient.Create(ctx, clusterResourceOverrideSnapshot)).Should(Succeed(), "Failed to create cluster resource override snapshot") + + // Verify bindings are updated, note that both clusterResourceOverrideSnapshot and resourceOverrideSnapshot are set in the bindings. + waitUntilRolloutCompleted(controller.ConvertCRBArrayToBindingObjs(bindings), []string{clusterResourceOverrideSnapshot.Name}, + []placementv1beta1.NamespacedName{{Name: resourceOverrideSnapshot1.Name, Namespace: resourceOverrideSnapshot1.Namespace}}) + + // Create another resourceOverrideSnapshot referencing the same CRP and verify bindings are updated again. + testROName2 := "ro" + utils.RandStr() + resourceOverrideSnapshot2 := generateResourceOverrideSnapshot(testROName2, testCRPName, placementv1beta1.ClusterScoped) + By(fmt.Sprintf("Creating resourceOverrideSnapshot %s to refer a clusterResourcePlacement", resourceOverrideSnapshot2.Name)) + Expect(k8sClient.Create(ctx, resourceOverrideSnapshot2)).Should(Succeed(), "Failed to create resource override snapshot") + + // Verify bindings are updated, note that both clusterResourceOverrideSnapshot and resourceOverrideSnapshot are set in the bindings. + waitUntilRolloutCompleted(controller.ConvertCRBArrayToBindingObjs(bindings), []string{clusterResourceOverrideSnapshot.Name}, + []placementv1beta1.NamespacedName{ + {Name: resourceOverrideSnapshot1.Name, Namespace: resourceOverrideSnapshot1.Namespace}, + {Name: resourceOverrideSnapshot2.Name, Namespace: resourceOverrideSnapshot2.Namespace}, + }, + ) + + // Clean up the override snapshots. + Expect(k8sClient.Delete(ctx, resourceOverrideSnapshot1)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, clusterResourceOverrideSnapshot)).Should(Succeed()) + + // Clean up the member clusters. + for _, cluster := range clusters { + memberCluster := &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster, + }, + } + Expect(k8sClient.Delete(ctx, memberCluster)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{})) + } + }) + It("Should rollout all the selected bindings when the rollout strategy is not set", func() { // create CRP var targetCluster int32 = 11 @@ -619,7 +711,7 @@ var _ = Describe("Test the rollout Controller", func() { By("Verified that the rollout is finally unblocked") }) - It("Should rollout both the old applied and failed to apply bond the new resources", func() { + It("Should rollout both the old applied and failed to apply bound the new resources", func() { // create CRP var targetCluster int32 = 5 rolloutCRP = clusterResourcePlacementForTest(testCRPName, @@ -1481,7 +1573,7 @@ var _ = Describe("Test the rollout Controller for ResourcePlacement", func() { By("Verified that the rollout is finally unblocked") }) - It("Should rollout both the old applied and failed to apply bond the new resources", func() { + It("Should rollout both the old applied and failed to apply bound the new resources", func() { // create RP var targetCluster int32 = 5 rolloutRP = resourcePlacementForTest(testNamespace, testRPName, @@ -1538,6 +1630,79 @@ var _ = Describe("Test the rollout Controller for ResourcePlacement", func() { return allMatch }, 5*defaultUnavailablePeriod*time.Second, interval).Should(BeTrue(), "rollout controller should roll all the bindings to use the latest resource snapshot") }) + + It("should trigger binding rollout for resourceOverrideSnapshot but not clusterResourceOverrideSnapshot", func() { + // Create a RP. + targetClusterCount := int32(2) + rolloutRP = resourcePlacementForTest( + testNamespace, testRPName, + createPlacementPolicyForTest(placementv1beta1.PickNPlacementType, targetClusterCount), + createPlacementRolloutStrategyForTest(placementv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)) + Expect(k8sClient.Create(ctx, rolloutRP)).Should(Succeed(), "Failed to create RP") + + // Create a master resource snapshot. + resourceSnapshot := generateResourceSnapshot(rolloutRP.Namespace, rolloutRP.Name, 0, true) + Expect(k8sClient.Create(ctx, resourceSnapshot)).Should(Succeed(), "Failed to create resource snapshot") + + // Create bindings. + clusters := make([]string, targetClusterCount) + for i := 0; i < int(targetClusterCount); i++ { + clusters[i] = "cluster-" + utils.RandStr() + binding := generateResourceBinding(placementv1beta1.BindingStateScheduled, resourceSnapshot.Name, clusters[i], testNamespace) + Expect(k8sClient.Create(ctx, binding)).Should(Succeed(), "Failed to create resource binding") + bindings = append(bindings, binding) + + memberCluster := generateMemberCluster(i, clusters[i]) + Expect(k8sClient.Create(ctx, memberCluster)).Should(Succeed(), "Failed to create member cluster") + } + + // Verify that all the bindings are rolled out initially. + verifyBindingsRolledOut(controller.ConvertRBArrayToBindingObjs(bindings), resourceSnapshot, timeout) + + // Mark the bindings to be available. + for _, binding := range bindings { + markBindingAvailable(binding, true) + } + + // Create a clusterResourceOverrideSnapshot and a resourceOverrideSnapshot with cluster-scope placement and verify bindings are not updated. + testCROName := "cro" + utils.RandStr() + clusterResourceOverrideSnapshot := generateClusterResourceOverrideSnapshot(testCROName, testRPName) + By(fmt.Sprintf("Creating cluster resource override snapshot %s", clusterResourceOverrideSnapshot.Name)) + Expect(k8sClient.Create(ctx, clusterResourceOverrideSnapshot)).Should(Succeed(), "Failed to create cluster resource override snapshot") + + testROName1 := "ro" + utils.RandStr() + resourceOverrideSnapshot1 := generateResourceOverrideSnapshot(testROName1, testRPName, placementv1beta1.ClusterScoped) + By(fmt.Sprintf("Creating resource override snapshot %s", resourceOverrideSnapshot1.Name)) + Expect(k8sClient.Create(ctx, resourceOverrideSnapshot1)).Should(Succeed(), "Failed to create resource override snapshot") + + // Verify bindings are NOT updated (rollout not triggered) by clusterResourceOverrideSnapshot. + verifyBindingsNotUpdatedWithOverridesConsistently(controller.ConvertRBArrayToBindingObjs(bindings), nil, nil) + + // Create a resourceOverrideSnapshot and verify it triggers rollout. + testROName2 := "ro" + utils.RandStr() + resourceOverrideSnapshot2 := generateResourceOverrideSnapshot(testROName2, testRPName, placementv1beta1.NamespaceScoped) + By(fmt.Sprintf("Creating resource override snapshot %s", resourceOverrideSnapshot2.Name)) + Expect(k8sClient.Create(ctx, resourceOverrideSnapshot2)).Should(Succeed(), "Failed to create resource override snapshot") + + waitUntilRolloutCompleted(controller.ConvertRBArrayToBindingObjs(bindings), nil, []placementv1beta1.NamespacedName{ + {Name: resourceOverrideSnapshot2.Name, Namespace: resourceOverrideSnapshot2.Namespace}, + }) + + // Clean up the override snapshots. + Expect(k8sClient.Delete(ctx, resourceOverrideSnapshot1)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, resourceOverrideSnapshot2)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, clusterResourceOverrideSnapshot)).Should(Succeed()) + + // Clean up the member clusters. + for _, cluster := range clusters { + memberCluster := &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster, + }, + } + Expect(k8sClient.Delete(ctx, memberCluster)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{})) + } + }) }) func resourcePlacementForTest(namespace, rpName string, policy *placementv1beta1.PlacementPolicy, strategy placementv1beta1.RolloutStrategy) *placementv1beta1.ResourcePlacement { @@ -1609,6 +1774,92 @@ func verifyBindingsRolledOut(bindings []placementv1beta1.BindingObj, masterSnaps }, timeout, interval).Should(Succeed(), "rollout controller should roll out all the bindings") } +func verifyBindingsNotUpdatedWithOverridesConsistently( + bindings []placementv1beta1.BindingObj, + wantClusterResourceOverrideSnapshots []string, + wantResourceOverrideSnapshots []placementv1beta1.NamespacedName, +) { + Consistently(func() error { + for _, binding := range bindings { + bindingKey := types.NamespacedName{Name: binding.GetName(), Namespace: binding.GetNamespace()} + if _, err := checkIfBindingUpdatedWithOverrides(bindingKey, wantClusterResourceOverrideSnapshots, wantResourceOverrideSnapshots); err != nil { + return fmt.Errorf("binding %s should not be updated with overrides: %w", bindingKey, err) + } + } + return nil + }, consistentTimeout, interval).Should(Succeed(), "Bindings should not be updated with new overrides consistently") +} + +func waitUntilRolloutCompleted( + bindings []placementv1beta1.BindingObj, + wantClusterResourceOverrideSnapshots []string, + wantResourceOverrideSnapshots []placementv1beta1.NamespacedName, +) { + notUpdatedBindings := make(map[types.NamespacedName]bool, len(bindings)) + for _, binding := range bindings { + notUpdatedBindings[types.NamespacedName{Name: binding.GetName(), Namespace: binding.GetNamespace()}] = true + } + + for len(notUpdatedBindings) > 0 { + // In each round, try to find a binding that has been updated and update it to available so rollout can proceed. + var gotBinding placementv1beta1.BindingObj + var err error + Eventually(func() error { + for bindingKey := range notUpdatedBindings { + gotBinding, err = checkIfBindingUpdatedWithOverrides(bindingKey, wantClusterResourceOverrideSnapshots, wantResourceOverrideSnapshots) + if err != nil { + continue // current binding not updated yet, continue to check the next one. + } + delete(notUpdatedBindings, bindingKey) + return nil // found an updated binding, can exit this round. + } + return fmt.Errorf("failed to find a binding with updated overrides") + }, timeout, interval).Should(Succeed(), "One of the bindings should be updated with overrides") + // Mark the binding as available so rollout can proceed. + markBindingAvailable(gotBinding, true) + } +} + +func checkIfBindingUpdatedWithOverrides( + bindingKey types.NamespacedName, + wantClusterResourceOverrideSnapshots []string, + wantResourceOverrideSnapshots []placementv1beta1.NamespacedName, +) (placementv1beta1.BindingObj, error) { + var gotBinding placementv1beta1.BindingObj + if bindingKey.Namespace == "" { + gotBinding = &placementv1beta1.ClusterResourceBinding{} + } else { + gotBinding = &placementv1beta1.ResourceBinding{} + } + if err := k8sClient.Get(ctx, bindingKey, gotBinding); err != nil { + return gotBinding, fmt.Errorf("failed to get binding %s: %w", bindingKey, err) + } + + // Check that RolloutStarted condition is True. + if !condition.IsConditionStatusTrue(gotBinding.GetCondition(string(placementv1beta1.ResourceBindingRolloutStarted)), gotBinding.GetGeneration()) { + return gotBinding, fmt.Errorf("binding %s RolloutStarted condition is not True", bindingKey) + } + + // Check that override snapshots in spec are the want ones. + cmpOptions := []cmp.Option{ + cmpopts.EquateEmpty(), + cmpopts.SortSlices(func(a, b string) bool { return a < b }), + cmpopts.SortSlices(func(a, b placementv1beta1.NamespacedName) bool { + if a.Namespace == b.Namespace { + return a.Name < b.Name + } + return a.Namespace < b.Namespace + }), + } + if !cmp.Equal(gotBinding.GetBindingSpec().ClusterResourceOverrideSnapshots, wantClusterResourceOverrideSnapshots, cmpOptions...) || + !cmp.Equal(gotBinding.GetBindingSpec().ResourceOverrideSnapshots, wantResourceOverrideSnapshots, cmpOptions...) { + return gotBinding, fmt.Errorf("binding %s override snapshots mismatch: want %v and %v, got %v and %v", bindingKey, + wantClusterResourceOverrideSnapshots, wantResourceOverrideSnapshots, + gotBinding.GetBindingSpec().ClusterResourceOverrideSnapshots, gotBinding.GetBindingSpec().ResourceOverrideSnapshots) + } + return gotBinding, nil +} + func markBindingAvailable(binding placementv1beta1.BindingObj, trackable bool) { Eventually(func() error { reason := "trackable" @@ -1707,7 +1958,8 @@ func generateClusterResourceSnapshot(testCRPName string, resourceIndex int, isLa placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(isLatest), }, Annotations: map[string]string{ - placementv1beta1.ResourceGroupHashAnnotation: "hash", + placementv1beta1.ResourceGroupHashAnnotation: "hash", + placementv1beta1.NumberOfResourceSnapshotsAnnotation: "1", }, }, } @@ -1734,7 +1986,8 @@ func generateResourceSnapshot(namespace, testRPName string, resourceIndex int, i placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(isLatest), }, Annotations: map[string]string{ - placementv1beta1.ResourceGroupHashAnnotation: "hash", + placementv1beta1.ResourceGroupHashAnnotation: "hash", + placementv1beta1.NumberOfResourceSnapshotsAnnotation: "1", }, }, } @@ -1750,3 +2003,116 @@ func generateResourceSnapshot(namespace, testRPName string, resourceIndex int, i } return resourceSnapshot } + +func generateMemberCluster(idx int, clusterName string) *clusterv1beta1.MemberCluster { + clusterLabels := map[string]string{ + "index": strconv.Itoa(idx), + } + return &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Labels: clusterLabels, + }, + Spec: clusterv1beta1.MemberClusterSpec{ + Identity: rbacv1.Subject{ + Name: "testUser", + Kind: "ServiceAccount", + Namespace: utils.FleetSystemNamespace, + }, + HeartbeatPeriodSeconds: 60, + }, + } +} + +func generateClusterResourceOverrideSnapshot(testCROName, testPlacementName string) *placementv1beta1.ClusterResourceOverrideSnapshot { + return &placementv1beta1.ClusterResourceOverrideSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, testCROName, 0), + Labels: map[string]string{ + placementv1beta1.OverrideIndexLabel: "0", + placementv1beta1.IsLatestSnapshotLabel: "true", + placementv1beta1.OverrideTrackingLabel: testCROName, + }, + }, + Spec: placementv1beta1.ClusterResourceOverrideSnapshotSpec{ + OverrideHash: []byte("cluster-override-hash"), + OverrideSpec: placementv1beta1.ClusterResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: testPlacementName, + Scope: placementv1beta1.ClusterScoped, + }, + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/labels/test", + Value: apiextensionsv1.JSON{Raw: []byte(`"test"`)}, + }, + }, + }, + }, + }, + ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "app", // from manifests/test_namespace.yaml + }, + }, + }, + }, + } +} + +func generateResourceOverrideSnapshot(testROName, testPlacementName string, scope placementv1beta1.ResourceScope) *placementv1beta1.ResourceOverrideSnapshot { + return &placementv1beta1.ResourceOverrideSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, testROName, 0), + Namespace: testNamespace, + Labels: map[string]string{ + placementv1beta1.OverrideIndexLabel: "0", + placementv1beta1.IsLatestSnapshotLabel: "true", + placementv1beta1.OverrideTrackingLabel: testROName, + }, + }, + Spec: placementv1beta1.ResourceOverrideSnapshotSpec{ + OverrideHash: []byte("resource-override-hash"), + OverrideSpec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: testPlacementName, + Scope: scope, + }, + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/labels/test", + Value: apiextensionsv1.JSON{Raw: []byte(`"test"`)}, + }, + }, + }, + }, + }, + ResourceSelectors: []placementv1beta1.ResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-configmap", + }, + }, + }, + }, + } +} diff --git a/pkg/controllers/rollout/suite_test.go b/pkg/controllers/rollout/suite_test.go index 556a5c48a..5649838a3 100644 --- a/pkg/controllers/rollout/suite_test.go +++ b/pkg/controllers/rollout/suite_test.go @@ -29,6 +29,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/klog/v2" @@ -42,6 +43,8 @@ import ( clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" placementv1alpha1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1alpha1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils" + "github.com/kubefleet-dev/kubefleet/pkg/utils/informer" ) var ( @@ -114,17 +117,29 @@ var _ = BeforeSuite(func() { } Expect(k8sClient.Create(ctx, namespace)).Should(Succeed()) + // setup informer manager for the reconciler + dynamicClient, err := dynamic.NewForConfig(cfg) + Expect(err).Should(Succeed()) + dynamicInformerManager := informer.NewInformerManager(dynamicClient, 0, ctx.Done()) + dynamicInformerManager.AddStaticResource(informer.APIResourceMeta{ + GroupVersionKind: utils.NamespaceGVK, + GroupVersionResource: utils.NamespaceGVR, + IsClusterScoped: true, + }, nil) + // setup our cluster scoped reconciler err = (&Reconciler{ - Client: k8sClient, - UncachedReader: mgr.GetAPIReader(), + Client: k8sClient, + UncachedReader: mgr.GetAPIReader(), + InformerManager: dynamicInformerManager, }).SetupWithManagerForClusterResourcePlacement(mgr) Expect(err).Should(Succeed()) // setup our namespace scoped reconciler err = (&Reconciler{ - Client: k8sClient, - UncachedReader: mgr.GetAPIReader(), + Client: k8sClient, + UncachedReader: mgr.GetAPIReader(), + InformerManager: dynamicInformerManager, }).SetupWithManagerForResourcePlacement(mgr) Expect(err).Should(Succeed()) From ec5e57a7c5b074ebbfc660d1ebca9550512d4bc5 Mon Sep 17 00:00:00 2001 From: Zhiying Lin <54013513+zhiying-lin@users.noreply.github.com> Date: Fri, 15 Aug 2025 01:19:56 +0800 Subject: [PATCH 10/38] fix: fix the rollout controller IT (#189) --- pkg/controllers/rollout/controller_integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controllers/rollout/controller_integration_test.go b/pkg/controllers/rollout/controller_integration_test.go index 48df5d274..46bddf8fc 100644 --- a/pkg/controllers/rollout/controller_integration_test.go +++ b/pkg/controllers/rollout/controller_integration_test.go @@ -2057,7 +2057,7 @@ func generateClusterResourceOverrideSnapshot(testCROName, testPlacementName stri }, }, }, - ClusterResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", From 630be5f37053ed6ba7955ed601156089f49646eb Mon Sep 17 00:00:00 2001 From: Wantong Date: Thu, 14 Aug 2025 16:20:37 -0700 Subject: [PATCH 11/38] feat: enable resourcePlacement controllers (#188) --- cmd/hubagent/options/options.go | 4 + cmd/hubagent/workload/setup.go | 108 +++++++++++++++++- .../watchers/membercluster/suite_test.go | 1 + .../watchers/membercluster/watcher.go | 13 ++- test/scheduler/suite_test.go | 1 + 5 files changed, 119 insertions(+), 8 deletions(-) diff --git a/cmd/hubagent/options/options.go b/cmd/hubagent/options/options.go index 908d9f20b..c271ececc 100644 --- a/cmd/hubagent/options/options.go +++ b/cmd/hubagent/options/options.go @@ -98,6 +98,8 @@ type Options struct { EnableStagedUpdateRunAPIs bool // EnableEvictionAPIs enables to agents to watch the eviction and placement disruption budget CRs. EnableEvictionAPIs bool + // EnableResourcePlacement enables the agents to watch the ResourcePlacement APIs. + EnableResourcePlacement bool // EnablePprof enables the pprof profiling. EnablePprof bool // PprofPort is the port for pprof profiling. @@ -126,6 +128,7 @@ func NewOptions() *Options { EnableV1Alpha1APIs: false, EnableClusterInventoryAPIs: true, EnableStagedUpdateRunAPIs: true, + EnableResourcePlacement: true, EnablePprof: false, PprofPort: 6065, ResourceSnapshotCreationMinimumInterval: 30 * time.Second, @@ -173,6 +176,7 @@ func (o *Options) AddFlags(flags *flag.FlagSet) { flags.DurationVar(&o.ForceDeleteWaitTime.Duration, "force-delete-wait-time", 15*time.Minute, "The duration the hub agent waits before force deleting a member cluster.") flags.BoolVar(&o.EnableStagedUpdateRunAPIs, "enable-staged-update-run-apis", true, "If set, the agents will watch for the ClusterStagedUpdateRun APIs.") flags.BoolVar(&o.EnableEvictionAPIs, "enable-eviction-apis", true, "If set, the agents will watch for the Eviction and PlacementDisruptionBudget APIs.") + flags.BoolVar(&o.EnableResourcePlacement, "enable-resource-placement", true, "If set, the agents will watch for the ResourcePlacement APIs.") flags.BoolVar(&o.EnablePprof, "enable-pprof", false, "If set, the pprof profiling is enabled.") flags.IntVar(&o.PprofPort, "pprof-port", 6065, "The port for pprof profiling.") flags.BoolVar(&o.DenyModifyMemberClusterLabels, "deny-modify-member-cluster-labels", false, "If set, users not in the system:masters cannot modify member cluster labels.") diff --git a/cmd/hubagent/workload/setup.go b/cmd/hubagent/workload/setup.go index dfc620896..9049c4523 100644 --- a/cmd/hubagent/workload/setup.go +++ b/cmd/hubagent/workload/setup.go @@ -67,6 +67,7 @@ const ( crpControllerName = "cluster-resource-placement-controller" crpControllerV1Alpha1Name = crpControllerName + "-v1alpha1" crpControllerV1Beta1Name = crpControllerName + "-v1beta1" + rpControllerName = "resource-placement-controller" resourceChangeControllerName = "resource-change-controller" mcPlacementControllerName = "memberCluster-placement-controller" @@ -96,6 +97,14 @@ var ( placementv1beta1.GroupVersion.WithKind(placementv1beta1.ResourceOverrideSnapshotKind), } + // There's a prerequisite that v1Beta1RequiredGVKs must be installed too. + rpRequiredGVKs = []schema.GroupVersionKind{ + placementv1beta1.GroupVersion.WithKind(placementv1beta1.ResourcePlacementKind), + placementv1beta1.GroupVersion.WithKind(placementv1beta1.ResourceBindingKind), + placementv1beta1.GroupVersion.WithKind(placementv1beta1.ResourceSnapshotKind), + placementv1beta1.GroupVersion.WithKind(placementv1beta1.SchedulingPolicySnapshotKind), + } + clusterStagedUpdateRunGVKs = []schema.GroupVersionKind{ placementv1beta1.GroupVersion.WithKind(placementv1beta1.ClusterStagedUpdateRunKind), placementv1beta1.GroupVersion.WithKind(placementv1beta1.ClusterStagedUpdateStrategyKind), @@ -167,6 +176,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, rateLimiter := options.DefaultControllerRateLimiter(opts.RateLimiterOpts) var clusterResourcePlacementControllerV1Alpha1 controller.Controller var clusterResourcePlacementControllerV1Beta1 controller.Controller + var resourcePlacementController controller.Controller var memberClusterPlacementController controller.Controller if opts.EnableV1Alpha1APIs { for _, gvk := range v1Alpha1RequiredGVKs { @@ -220,7 +230,43 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, return err } - // Set up a new controller to do rollout resources according to CRP rollout strategy + if opts.EnableResourcePlacement { + for _, gvk := range rpRequiredGVKs { + if err = utils.CheckCRDInstalled(discoverClient, gvk); err != nil { + klog.ErrorS(err, "unable to find the required CRD", "GVK", gvk) + return err + } + } + klog.Info("Setting up resourcePlacement controller") + resourcePlacementController = controller.NewController(rpControllerName, controller.NamespaceKeyFunc, crpc.Reconcile, rateLimiter) + klog.Info("Setting up resourcePlacement watcher") + if err := (&clusterresourceplacementwatcher.Reconciler{ + PlacementController: resourcePlacementController, + }).SetupWithManagerForResourcePlacement(mgr); err != nil { + klog.ErrorS(err, "Unable to set up the resourcePlacement watcher") + return err + } + + klog.Info("Setting up resourceBinding watcher") + if err := (&clusterresourcebindingwatcher.Reconciler{ + PlacementController: resourcePlacementController, + Client: mgr.GetClient(), + }).SetupWithManagerForResourceBinding(mgr); err != nil { + klog.ErrorS(err, "Unable to set up the resourceBinding watcher") + return err + } + + klog.Info("Setting up schedulingPolicySnapshot watcher") + if err := (&clusterschedulingpolicysnapshot.Reconciler{ + Client: mgr.GetClient(), + PlacementController: resourcePlacementController, + }).SetupWithManagerForSchedulingPolicySnapshot(mgr); err != nil { + klog.ErrorS(err, "Unable to set up the schedulingPolicySnapshot watcher") + return err + } + } + + // Set up a new controller to do rollout resources according to CRP/RP rollout strategy klog.Info("Setting up rollout controller") if err := (&rollout.Reconciler{ Client: mgr.GetClient(), @@ -228,10 +274,22 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, MaxConcurrentReconciles: int(math.Ceil(float64(opts.MaxFleetSizeSupported)/30) * math.Ceil(float64(opts.MaxConcurrentClusterPlacement)/10)), InformerManager: dynamicInformerManager, }).SetupWithManagerForClusterResourcePlacement(mgr); err != nil { - klog.ErrorS(err, "Unable to set up rollout controller") + klog.ErrorS(err, "Unable to set up rollout controller for clusterResourcePlacement") return err } + if opts.EnableResourcePlacement { + if err := (&rollout.Reconciler{ + Client: mgr.GetClient(), + UncachedReader: mgr.GetAPIReader(), + MaxConcurrentReconciles: int(math.Ceil(float64(opts.MaxFleetSizeSupported)/30) * math.Ceil(float64(opts.MaxConcurrentClusterPlacement)/10)), + InformerManager: dynamicInformerManager, + }).SetupWithManagerForResourcePlacement(mgr); err != nil { + klog.ErrorS(err, "Unable to set up rollout controller for resourcePlacement") + return err + } + } + if opts.EnableEvictionAPIs { for _, gvk := range evictionGVKs { if err = utils.CheckCRDInstalled(discoverClient, gvk); err != nil { @@ -274,10 +332,21 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, MaxConcurrentReconciles: int(math.Ceil(float64(opts.MaxFleetSizeSupported)/10) * math.Ceil(float64(opts.MaxConcurrentClusterPlacement)/10)), InformerManager: dynamicInformerManager, }).SetupWithManagerForClusterResourceBinding(mgr); err != nil { - klog.ErrorS(err, "Unable to set up work generator") + klog.ErrorS(err, "Unable to set up work generator for clusterResourceBinding") return err } + if opts.EnableResourcePlacement { + if err := (&workgenerator.Reconciler{ + Client: mgr.GetClient(), + MaxConcurrentReconciles: int(math.Ceil(float64(opts.MaxFleetSizeSupported)/10) * math.Ceil(float64(opts.MaxConcurrentClusterPlacement)/10)), + InformerManager: dynamicInformerManager, + }).SetupWithManagerForResourceBinding(mgr); err != nil { + klog.ErrorS(err, "Unable to set up work generator for resourceBinding") + return err + } + } + // Set up the scheduler klog.Info("Setting up scheduler") defaultProfile := profile.NewDefaultProfile() @@ -328,11 +397,41 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, return err } + if opts.EnableResourcePlacement { + klog.Info("Setting up the resourcePlacement watcher for scheduler") + if err := (&schedulercrpwatcher.Reconciler{ + Client: mgr.GetClient(), + SchedulerWorkQueue: defaultSchedulingQueue, + }).SetupWithManagerForResourcePlacement(mgr); err != nil { + klog.ErrorS(err, "Unable to set up resourcePlacement watcher for scheduler") + return err + } + + klog.Info("Setting up the schedulingPolicySnapshot watcher for scheduler") + if err := (&schedulercspswatcher.Reconciler{ + Client: mgr.GetClient(), + SchedulerWorkQueue: defaultSchedulingQueue, + }).SetupWithManagerForSchedulingPolicySnapshot(mgr); err != nil { + klog.ErrorS(err, "Unable to set up schedulingPolicySnapshot watcher for scheduler") + return err + } + + klog.Info("Setting up the resourceBinding watcher for scheduler") + if err := (&schedulercrbwatcher.Reconciler{ + Client: mgr.GetClient(), + SchedulerWorkQueue: defaultSchedulingQueue, + }).SetupWithManagerForResourceBinding(mgr); err != nil { + klog.ErrorS(err, "Unable to set up resourceBinding watcher for scheduler") + return err + } + } + klog.Info("Setting up the memberCluster watcher for scheduler") if err := (&membercluster.Reconciler{ Client: mgr.GetClient(), SchedulerWorkQueue: defaultSchedulingQueue, ClusterEligibilityChecker: clustereligibilitychecker.New(), + EnableResourcePlacement: opts.EnableResourcePlacement, }).SetupWithManager(mgr); err != nil { klog.ErrorS(err, "Unable to set up memberCluster watcher for scheduler") return err @@ -388,6 +487,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, InformerManager: dynamicInformerManager, PlacementControllerV1Alpha1: clusterResourcePlacementControllerV1Alpha1, PlacementControllerV1Beta1: clusterResourcePlacementControllerV1Beta1, + ResourcePlacementController: resourcePlacementController, } resourceChangeController := controller.NewController(resourceChangeControllerName, controller.ClusterWideKeyFunc, rcr.Reconcile, rateLimiter) @@ -397,7 +497,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, RESTMapper: mgr.GetRESTMapper(), ClusterResourcePlacementControllerV1Alpha1: clusterResourcePlacementControllerV1Alpha1, ClusterResourcePlacementControllerV1Beta1: clusterResourcePlacementControllerV1Beta1, - ResourcePlacementController: nil, // TODO: need to enable the resource placement controller when ready + ResourcePlacementController: resourcePlacementController, ResourceChangeController: resourceChangeController, MemberClusterPlacementController: memberClusterPlacementController, InformerManager: dynamicInformerManager, diff --git a/pkg/scheduler/watchers/membercluster/suite_test.go b/pkg/scheduler/watchers/membercluster/suite_test.go index d351b4270..5eacab0f0 100644 --- a/pkg/scheduler/watchers/membercluster/suite_test.go +++ b/pkg/scheduler/watchers/membercluster/suite_test.go @@ -190,6 +190,7 @@ var _ = BeforeSuite(func() { Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, ClusterEligibilityChecker: clustereligibilitychecker.New(), + EnableResourcePlacement: true, } err = reconciler.SetupWithManager(ctrlMgr) Expect(err).ToNot(HaveOccurred(), "Failed to set up controller with controller manager") diff --git a/pkg/scheduler/watchers/membercluster/watcher.go b/pkg/scheduler/watchers/membercluster/watcher.go index 01d3742bb..c2426822f 100644 --- a/pkg/scheduler/watchers/membercluster/watcher.go +++ b/pkg/scheduler/watchers/membercluster/watcher.go @@ -49,6 +49,9 @@ type Reconciler struct { // clusterEligibilityCheck helps check if a cluster is eligible for resource replacement. ClusterEligibilityChecker *clustereligibilitychecker.ClusterEligibilityChecker + + // enableResourcePlacement indicates whether the resource placement controller is enabled. + EnableResourcePlacement bool } // Reconcile reconciles a member cluster. @@ -141,10 +144,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, controller.NewAPIServerError(true, err) } rpList := &placementv1beta1.ResourcePlacementList{} - // Empty namespace provided to list RPs across all namespaces. - if err := r.Client.List(ctx, rpList, client.InNamespace("")); err != nil { - klog.ErrorS(err, "Failed to list RPs", "memberCluster", memberClusterRef) - return ctrl.Result{}, controller.NewAPIServerError(true, err) + if r.EnableResourcePlacement { + // Empty namespace provided to list RPs across all namespaces. + if err := r.Client.List(ctx, rpList, client.InNamespace("")); err != nil { + klog.ErrorS(err, "Failed to list RPs", "memberCluster", memberClusterRef) + return ctrl.Result{}, controller.NewAPIServerError(true, err) + } } placements := append(convertCRPArrayToPlacementObjs(crpList.Items), convertRPArrayToPlacementObjs(rpList.Items)...) diff --git a/test/scheduler/suite_test.go b/test/scheduler/suite_test.go index 60e7202d9..c813595a5 100644 --- a/test/scheduler/suite_test.go +++ b/test/scheduler/suite_test.go @@ -615,6 +615,7 @@ func beforeSuiteForProcess1() []byte { Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, ClusterEligibilityChecker: clusterEligibilityChecker, + EnableResourcePlacement: true, } err = memberClusterWatcher.SetupWithManager(ctrlMgr) Expect(err).NotTo(HaveOccurred(), "Failed to set up member cluster watcher with controller manager") From 97990f1634953956b14d5f3ff9bba485469dc436 Mon Sep 17 00:00:00 2001 From: Zhiying Lin <54013513+zhiying-lin@users.noreply.github.com> Date: Sat, 16 Aug 2025 04:19:56 +0800 Subject: [PATCH 12/38] fix: remove the owner reference from work (#193) --- pkg/controllers/workgenerator/controller.go | 19 +-- .../controller_integration_test.go | 117 ------------------ pkg/controllers/workgenerator/envelope.go | 14 +-- 3 files changed, 7 insertions(+), 143 deletions(-) diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index 879350472..f334e86c1 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -38,7 +38,6 @@ import ( "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - "k8s.io/utils/ptr" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -326,10 +325,9 @@ func (r *Reconciler) handleDelete(ctx context.Context, resourceBinding fleetv1be return controllerruntime.Result{}, err } - // delete all the listed works - // - // TO-DO: this controller should be able to garbage collect all works automatically via - // background/foreground cascade deletion. This may render the finalizer unnecessary. + // Note: This controller cannot garbage collect all works automatically via background/foreground + // cascade deletion as the namespaces of work and resourceBinding are different + // and we don't set the ownerReference for the works. for workName := range works { work := works[workName] if err := r.Client.Delete(ctx, work); err != nil && !apierrors.IsNotFound(err) { @@ -746,15 +744,8 @@ func generateSnapshotWorkObj(workName string, resourceBinding fleetv1beta1.Bindi fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: resourceOverrideSnapshotHash, fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: clusterResourceOverrideSnapshotHash, }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: fleetv1beta1.GroupVersion.String(), - Kind: resourceBinding.GetObjectKind().GroupVersionKind().Kind, - Name: resourceBinding.GetName(), - UID: resourceBinding.GetUID(), - BlockOwnerDeletion: ptr.To(true), // make sure that the k8s will call work delete when the binding is deleted - }, - }, + // OwnerReferences cannot be added, as the namespaces of work and resourceBinding are different. + // Garbage collector will assume the resourceBinding is invalid as it cannot be found in the same namespace. }, Spec: fleetv1beta1.WorkSpec{ Workload: fleetv1beta1.WorkloadTemplate{ diff --git a/pkg/controllers/workgenerator/controller_integration_test.go b/pkg/controllers/workgenerator/controller_integration_test.go index 3e83ac896..834976485 100644 --- a/pkg/controllers/workgenerator/controller_integration_test.go +++ b/pkg/controllers/workgenerator/controller_integration_test.go @@ -336,15 +336,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -430,15 +421,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -632,15 +614,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -673,15 +646,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: envWork.Name, Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -758,15 +722,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -798,15 +753,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: work.Name, Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -918,15 +864,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -958,15 +895,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: envWork.Name, Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -1117,15 +1045,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.WorkNameWithSubindexFmt, testCRPName, 1), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentResourceSnapshotIndexLabel: "2", @@ -1192,15 +1111,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.WorkNameWithSubindexFmt, testCRPName, 1), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentResourceSnapshotIndexLabel: "2", @@ -1491,15 +1401,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -1723,15 +1624,6 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ClusterResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, @@ -4946,15 +4838,6 @@ var _ = Describe("Test Work Generator Controller for ResourcePlacement", func() ObjectMeta: metav1.ObjectMeta{ Name: workName, Namespace: memberClusterNamespaceName, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: placementv1beta1.GroupVersion.String(), - Kind: "ResourceBinding", - Name: binding.Name, - UID: binding.UID, - BlockOwnerDeletion: ptr.To(true), - }, - }, Labels: map[string]string{ placementv1beta1.PlacementTrackingLabel: testRPName, placementv1beta1.ParentBindingLabel: binding.Name, diff --git a/pkg/controllers/workgenerator/envelope.go b/pkg/controllers/workgenerator/envelope.go index f220039f6..ee49d1402 100644 --- a/pkg/controllers/workgenerator/envelope.go +++ b/pkg/controllers/workgenerator/envelope.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/klog/v2" - "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" @@ -214,17 +213,8 @@ func buildNewWorkForEnvelopeCR( fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: resourceOverrideSnapshotHash, fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: clusterResourceOverrideSnapshotHash, }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: fleetv1beta1.GroupVersion.String(), - Kind: resourceBinding.GetObjectKind().GroupVersionKind().Kind, - Name: resourceBinding.GetName(), - UID: resourceBinding.GetUID(), - // Make sure that the resource binding can only be deleted after - // all of its managed work objects have been deleted. - BlockOwnerDeletion: ptr.To(true), - }, - }, + // OwnerReferences cannot be added, as the namespaces of work and resourceBinding are different. + // Garbage collector will assume the resourceBinding is invalid as it cannot be found in the same namespace. }, Spec: fleetv1beta1.WorkSpec{ Workload: fleetv1beta1.WorkloadTemplate{ From 401170edcbf572b4778717e645947921db16be45 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Aug 2025 10:17:54 +0800 Subject: [PATCH 13/38] chore: bump actions/checkout from 4 to 5 (#196) Bumps [actions/checkout](https://github.com/actions/checkout) from 4 to 5. - [Release notes](https://github.com/actions/checkout/releases) - [Commits](https://github.com/actions/checkout/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: '5' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] --- .github/workflows/chart.yml | 2 +- .github/workflows/ci.yml | 6 +++--- .github/workflows/code-lint.yml | 4 ++-- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/codespell.yml | 2 +- .github/workflows/markdown-lint.yml | 2 +- .github/workflows/trivy.yml | 2 +- .github/workflows/upgrade.yml | 6 +++--- 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/chart.yml b/.github/workflows/chart.yml index b6664c7c6..9d16e834c 100644 --- a/.github/workflows/chart.yml +++ b/.github/workflows/chart.yml @@ -18,7 +18,7 @@ jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: submodules: true fetch-depth: 0 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b5118c377..136e608be 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -40,7 +40,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Set up Ginkgo CLI run: | @@ -70,7 +70,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Install Kind # Before updating the kind version to use, verify that the current kind image @@ -121,7 +121,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Install Ginkgo CLI run: | diff --git a/.github/workflows/code-lint.yml b/.github/workflows/code-lint.yml index fb96b9838..3cfabf9f6 100644 --- a/.github/workflows/code-lint.yml +++ b/.github/workflows/code-lint.yml @@ -43,7 +43,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: submodules: true @@ -64,7 +64,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: golangci-lint run: make lint diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 6563883d1..fb98847ad 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -38,7 +38,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index df3664f5e..207b0b0da 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -16,7 +16,7 @@ jobs: with: egress-policy: audit - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@ff7abcd0c3c05ccf6adc123a8cd1fd4fb30fb493 # v4.1.7 - uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # master with: check_filenames: true diff --git a/.github/workflows/markdown-lint.yml b/.github/workflows/markdown-lint.yml index e65a4999c..337f8be6a 100644 --- a/.github/workflows/markdown-lint.yml +++ b/.github/workflows/markdown-lint.yml @@ -10,7 +10,7 @@ jobs: markdown-link-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: tcort/github-action-markdown-link-check@v1 with: # this will only show errors in the output diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 2d8b51fb7..6e15e88fc 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -44,7 +44,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Login to ${{ env.REGISTRY }} uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 diff --git a/.github/workflows/upgrade.yml b/.github/workflows/upgrade.yml index 459abd416..d040275e7 100644 --- a/.github/workflows/upgrade.yml +++ b/.github/workflows/upgrade.yml @@ -44,7 +44,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: # Fetch the history of all branches and tags. # This is needed for the test suite to switch between releases. @@ -127,7 +127,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: # Fetch the history of all branches and tags. # This is needed for the test suite to switch between releases. @@ -210,7 +210,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: # Fetch the history of all branches and tags. # This is needed for the test suite to switch between releases. From 11f130e362e485efa4e7be0a046a2ced1b29503e Mon Sep 17 00:00:00 2001 From: Zhiying Lin <54013513+zhiying-lin@users.noreply.github.com> Date: Mon, 18 Aug 2025 12:25:05 +0800 Subject: [PATCH 14/38] test: increase time in clusterresourcebinding watcher IT (#99) --------- Signed-off-by: Zhiying Lin --- .../clusterresourcebindingwatcher/watcher_integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controllers/clusterresourcebindingwatcher/watcher_integration_test.go b/pkg/controllers/clusterresourcebindingwatcher/watcher_integration_test.go index d08e2e6d4..f24db8198 100644 --- a/pkg/controllers/clusterresourcebindingwatcher/watcher_integration_test.go +++ b/pkg/controllers/clusterresourcebindingwatcher/watcher_integration_test.go @@ -40,7 +40,7 @@ const ( testReason1 = "testReason1" testReason2 = "testReason2" - eventuallyTimeout = time.Second * 10 + eventuallyTimeout = time.Second * 20 consistentlyDuration = time.Second * 10 interval = time.Millisecond * 250 ) From 15623ab958baf3d9e08d62ef33e2732d723fc089 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Mon, 18 Aug 2025 19:10:57 +1000 Subject: [PATCH 15/38] fix: refactor the work applier code to reduce conditional checking + address an issue in work applier where some errors might not get properly surfaced in ReportDiff mode (#183) --- .../workapplier/availability_tracker.go | 2 +- .../workapplier/availability_tracker_test.go | 56 +-- pkg/controllers/workapplier/backoff.go | 4 +- pkg/controllers/workapplier/backoff_test.go | 110 ++--- pkg/controllers/workapplier/controller.go | 117 ++--- .../controller_integration_migrated_test.go | 2 +- .../controller_integration_test.go | 427 +++++++++++++++--- pkg/controllers/workapplier/metrics_test.go | 34 +- pkg/controllers/workapplier/preprocess.go | 16 +- .../workapplier/preprocess_test.go | 12 +- pkg/controllers/workapplier/process.go | 54 +-- pkg/controllers/workapplier/status.go | 115 +++-- pkg/controllers/workapplier/status_test.go | 272 ++++++----- pkg/controllers/workapplier/utils.go | 6 +- .../controller_integration_test.go | 32 +- test/e2e/actuals_test.go | 74 +++ test/e2e/enveloped_object_placement_test.go | 4 +- test/e2e/placement_apply_strategy_test.go | 10 +- test/e2e/placement_drift_diff_test.go | 32 +- test/e2e/placement_negative_cases_test.go | 147 +++++- .../e2e/placement_selecting_resources_test.go | 2 +- test/upgrade/before/actuals_test.go | 4 +- 22 files changed, 1055 insertions(+), 477 deletions(-) diff --git a/pkg/controllers/workapplier/availability_tracker.go b/pkg/controllers/workapplier/availability_tracker.go index 1f7423972..944145134 100644 --- a/pkg/controllers/workapplier/availability_tracker.go +++ b/pkg/controllers/workapplier/availability_tracker.go @@ -48,7 +48,7 @@ func (r *Reconciler) trackInMemberClusterObjAvailability(ctx context.Context, bu doWork := func(pieces int) { bundle := bundles[pieces] - if !isManifestObjectApplied(bundle.applyResTyp) { + if !isManifestObjectApplied(bundle.applyOrReportDiffResTyp) { // The manifest object in the bundle has not been applied yet. No availability check // is needed. bundle.availabilityResTyp = ManifestProcessingAvailabilityResultTypeSkipped diff --git a/pkg/controllers/workapplier/availability_tracker_test.go b/pkg/controllers/workapplier/availability_tracker_test.go index a496a8eaa..77cf4379c 100644 --- a/pkg/controllers/workapplier/availability_tracker_test.go +++ b/pkg/controllers/workapplier/availability_tracker_test.go @@ -1047,36 +1047,36 @@ func TestTrackInMemberClusterObjAvailability(t *testing.T) { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 0, }, - gvr: &utils.DeploymentGVR, - inMemberClusterObj: toUnstructured(t, availableDeploy), - applyResTyp: ManifestProcessingApplyResultTypeApplied, + gvr: &utils.DeploymentGVR, + inMemberClusterObj: toUnstructured(t, availableDeploy), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, // A failed to get applied service. { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 1, }, - gvr: &utils.ServiceGVR, - inMemberClusterObj: nil, - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, + gvr: &utils.ServiceGVR, + inMemberClusterObj: nil, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, }, // An unavailable daemon set. { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 2, }, - gvr: &utils.DaemonSetGVR, - inMemberClusterObj: toUnstructured(t, unavailableDaemonSet), - applyResTyp: ManifestProcessingApplyResultTypeApplied, + gvr: &utils.DaemonSetGVR, + inMemberClusterObj: toUnstructured(t, unavailableDaemonSet), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, // An untrackable job. { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 3, }, - gvr: &utils.JobGVR, - inMemberClusterObj: toUnstructured(t, untrackableJob), - applyResTyp: ManifestProcessingApplyResultTypeApplied, + gvr: &utils.JobGVR, + inMemberClusterObj: toUnstructured(t, untrackableJob), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, }, wantBundles: []*manifestProcessingBundle{ @@ -1084,37 +1084,37 @@ func TestTrackInMemberClusterObjAvailability(t *testing.T) { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 0, }, - gvr: &utils.DeploymentGVR, - inMemberClusterObj: toUnstructured(t, availableDeploy), - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + gvr: &utils.DeploymentGVR, + inMemberClusterObj: toUnstructured(t, availableDeploy), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, }, { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 1, }, - gvr: &utils.ServiceGVR, - inMemberClusterObj: nil, - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + gvr: &utils.ServiceGVR, + inMemberClusterObj: nil, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, }, { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 2, }, - gvr: &utils.DaemonSetGVR, - inMemberClusterObj: toUnstructured(t, unavailableDaemonSet), - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + gvr: &utils.DaemonSetGVR, + inMemberClusterObj: toUnstructured(t, unavailableDaemonSet), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, }, { id: &fleetv1beta1.WorkResourceIdentifier{ Ordinal: 3, }, - gvr: &utils.JobGVR, - inMemberClusterObj: toUnstructured(t, untrackableJob), - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, + gvr: &utils.JobGVR, + inMemberClusterObj: toUnstructured(t, untrackableJob), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, }, }, }, diff --git a/pkg/controllers/workapplier/backoff.go b/pkg/controllers/workapplier/backoff.go index e9b159632..0470b88f9 100644 --- a/pkg/controllers/workapplier/backoff.go +++ b/pkg/controllers/workapplier/backoff.go @@ -60,7 +60,7 @@ const ( ) const ( - processingResultStrTpl = "%s,%s,%s" + processingResultStrTpl = "%s,%s" ) // RequeueMultiStageWithExponentialBackoffRateLimiter is a rate limiter that allows requeues of various @@ -287,7 +287,7 @@ func computeProcessingResultHash(work *fleetv1beta1.Work, bundles []*manifestPro // The order of manifests is stable in a bundle. processingResults := make([]string, 0, len(bundles)) for _, bundle := range bundles { - processingResults = append(processingResults, fmt.Sprintf(processingResultStrTpl, bundle.applyResTyp, bundle.availabilityResTyp, bundle.reportDiffResTyp)) + processingResults = append(processingResults, fmt.Sprintf(processingResultStrTpl, bundle.applyOrReportDiffResTyp, bundle.availabilityResTyp)) } processingResHash, err := resource.HashOf(processingResults) diff --git a/pkg/controllers/workapplier/backoff_test.go b/pkg/controllers/workapplier/backoff_test.go index 8c0bfe9b2..d46ee5d2f 100644 --- a/pkg/controllers/workapplier/backoff_test.go +++ b/pkg/controllers/workapplier/backoff_test.go @@ -846,7 +846,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, }, wantRequeueDelaySeconds: 5, // Use fixed delay for the third time, since the processing result has changed. @@ -862,7 +862,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, }, wantRequeueDelaySeconds: 10, // Start to slow back off for the third time. @@ -878,7 +878,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, }, wantRequeueDelaySeconds: 20, // The slow back off continues. @@ -894,7 +894,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, }, wantRequeueDelaySeconds: 100, // Start to fast back off again. @@ -910,7 +910,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, }, wantRequeueDelaySeconds: 200, // Reached the max. cap. @@ -926,7 +926,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, }, }, wantRequeueDelaySeconds: 5, // Use fixed delay for the fourth time, since both generation and processing result have changed. @@ -976,8 +976,8 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, }, }, wantRequeueDelaySeconds: 5, @@ -1000,8 +1000,8 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, }, }, wantRequeueDelaySeconds: 5, // Use fixed delay, since the processing result has changed. @@ -1024,8 +1024,8 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, }, }, wantRequeueDelaySeconds: 10, // Start the slow backoff. @@ -1048,8 +1048,8 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, }, }, wantRequeueDelaySeconds: 50, // Skip to fast back off. @@ -1072,8 +1072,8 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, }, }, wantRequeueDelaySeconds: 200, // Reached the max. cap. @@ -1098,7 +1098,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, }, }, wantRequeueDelaySeconds: 5, // Use fixed delay, since the processing result has changed. @@ -1123,7 +1123,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, }, }, wantRequeueDelaySeconds: 10, // Start the slow backoff. @@ -1148,7 +1148,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, }, }, wantRequeueDelaySeconds: 50, // Skip to fast back off. @@ -1173,7 +1173,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, }, }, wantRequeueDelaySeconds: 200, // Reached the max. cap. @@ -1198,7 +1198,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFoundDiff, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, }, }, wantRequeueDelaySeconds: 5, // Use fixed delay, since the processing result has changed. @@ -1223,7 +1223,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFoundDiff, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, }, }, wantRequeueDelaySeconds: 10, // Start the slow backoff. @@ -1248,7 +1248,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { }, bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFoundDiff, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, }, }, wantRequeueDelaySeconds: 50, // Skip to fast back off. @@ -1474,111 +1474,111 @@ func TestComputeProcessingResultHash(t *testing.T) { bundles: []*manifestProcessingBundle{ {}, }, - wantHash: "ec6e5a3a69851e2b956b6f682bad1d2355faa874e635b4d2f3e33ce84a8f788a", + wantHash: "9637daf658d40f9ab65fc1f86e78f8496692ec8160389758039f752756f0505a", }, { name: "single manifest, apply op failure (pre-processing)", bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeDecodingErred, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeDecodingErred, }, }, - wantHash: "a4cce45a59ced1c0b218b7e2b07920e6515a0bd4e80141f114cf29a1e2062790", + wantHash: "86ab4bd237c2fa247e493a58e91895fe11e7bd2fcfb422890b8c296eaf6cc4ce", }, { name: "single manifest, apply op failure (processing, no error message)", bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, }, }, - wantHash: "f4610fbac163e867a62672a3e95547e8321fa09709ecac73308dfff8fde49511", + wantHash: "0ecc47caf32d81607057dcfb22f60416fe1f1f7930761edb92d4cb7fee4a075f", }, { name: "single manifest, apply op failure (processing, with error message)", bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, - applyErr: fmt.Errorf("failed to apply manifest"), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, + applyOrReportDiffErr: fmt.Errorf("failed to apply manifest"), }, }, // Note that this expected hash value is the same as the previous one. - wantHash: "f4610fbac163e867a62672a3e95547e8321fa09709ecac73308dfff8fde49511", + wantHash: "0ecc47caf32d81607057dcfb22f60416fe1f1f7930761edb92d4cb7fee4a075f", }, { name: "single manifest, availability check failure", bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, }, }, - wantHash: "9110cc26c9559ba84e909593a089fd495eb6e86479c9430d5673229ebe2d1275", + wantHash: "339954d2619310502c70300409bdf65fd6f14d81c12cfade84879e713ea850ea", }, { name: "single manifest, apply op + availability check success", bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, }, }, - wantHash: "d922098ce1f87b79fc26fad06355ea4eba77cc5a86e742e9159c58cce5bd4a31", + wantHash: "708387dadaf07f43d46b032c3afb5d984868107b297dad9c99c2d258584d2377", }, { name: "single manifest, diff reporting failure", bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFailed, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToReportDiff, }, }, - wantHash: "dd541a034eb568cf92da960b884dece6d136460399ab68958ce8fc6730c91d45", + wantHash: "c5ffc29f5050ad825711a77012d6be36550035d848deb990082fff196f886906", }, { name: "single manifest, diff reporting success", bundles: []*manifestProcessingBundle{ { - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, }, }, - wantHash: "f9b66724190d196e1cf19247a0447a6ed0d71697dcb8016c0bc3b3726a757e1a", + wantHash: "4bc69d33a287d57e25a5406e47722b1cfa3965472cf9324d3ace2302dd0e9f02", }, { name: "multiple manifests (assorted)", bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, - applyErr: fmt.Errorf("failed to apply manifest"), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, + applyOrReportDiffErr: fmt.Errorf("failed to apply manifest"), }, { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, }, { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, }, }, - wantHash: "09c6195d94bfc84cdbb365bb615d3461a457a355b9f74049488a1db38e979018", + wantHash: "1a001803829ef5509d24d60806593cb5fbfb0445d32b9ab1301e5faea57bbaa9", }, { name: "multiple manifests (assorted, different order)", bundles: []*manifestProcessingBundle{ { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, }, { - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, - applyErr: fmt.Errorf("failed to apply manifest"), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, + applyOrReportDiffErr: fmt.Errorf("failed to apply manifest"), }, { - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, }, }, // Note that different orders of the manifests result in different hashes. - wantHash: "ef1a6e8d207f5b86a8c7f39417eede40abc6e4f1d5ef9feceb5797f14a834f58", + wantHash: "15461229a70cecc0096aea95c08dbda81990985d69bd6f6a4448254461b84886", }, } diff --git a/pkg/controllers/workapplier/controller.go b/pkg/controllers/workapplier/controller.go index 29a83bf4e..9bf469d43 100644 --- a/pkg/controllers/workapplier/controller.go +++ b/pkg/controllers/workapplier/controller.go @@ -34,6 +34,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" "k8s.io/utils/ptr" + "k8s.io/utils/set" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -231,9 +232,6 @@ type Reconciler struct { } // NewReconciler returns a new Work object reconciler for the work applier. -// -// TO-DO (chenyu1): evaluate if KubeFleet needs to expose the requeue rate limiter -// parameters as command-line arguments for user-side configuration. func NewReconciler( hubClient client.Client, workNameSpace string, spokeDynamicClient dynamic.Interface, spokeClient client.Client, restMapper meta.RESTMapper, @@ -266,33 +264,65 @@ func NewReconciler( } } -type manifestProcessingAppliedResultType string +type ManifestProcessingApplyOrReportDiffResultType string const ( - // The result types and descriptions for processing failures. - ManifestProcessingApplyResultTypeDecodingErred manifestProcessingAppliedResultType = "DecodingErred" - ManifestProcessingApplyResultTypeFoundGenerateName manifestProcessingAppliedResultType = "FoundGenerateName" - ManifestProcessingApplyResultTypeDuplicated manifestProcessingAppliedResultType = "Duplicated" - ManifestProcessingApplyResultTypeFailedToFindObjInMemberCluster manifestProcessingAppliedResultType = "FailedToFindObjInMemberCluster" - ManifestProcessingApplyResultTypeFailedToTakeOver manifestProcessingAppliedResultType = "FailedToTakeOver" - ManifestProcessingApplyResultTypeNotTakenOver manifestProcessingAppliedResultType = "NotTakenOver" - ManifestProcessingApplyResultTypeFailedToRunDriftDetection manifestProcessingAppliedResultType = "FailedToRunDriftDetection" - ManifestProcessingApplyResultTypeFoundDrifts manifestProcessingAppliedResultType = "FoundDrifts" + // The result types for apply op failures. + ApplyOrReportDiffResTypeDecodingErred ManifestProcessingApplyOrReportDiffResultType = "DecodingErred" + ApplyOrReportDiffResTypeFoundGenerateName ManifestProcessingApplyOrReportDiffResultType = "FoundGenerateName" + ApplyOrReportDiffResTypeDuplicated ManifestProcessingApplyOrReportDiffResultType = "Duplicated" + ApplyOrReportDiffResTypeFailedToFindObjInMemberCluster ManifestProcessingApplyOrReportDiffResultType = "FailedToFindObjInMemberCluster" + ApplyOrReportDiffResTypeFailedToTakeOver ManifestProcessingApplyOrReportDiffResultType = "FailedToTakeOver" + ApplyOrReportDiffResTypeNotTakenOver ManifestProcessingApplyOrReportDiffResultType = "NotTakenOver" + ApplyOrReportDiffResTypeFailedToRunDriftDetection ManifestProcessingApplyOrReportDiffResultType = "FailedToRunDriftDetection" + ApplyOrReportDiffResTypeFoundDrifts ManifestProcessingApplyOrReportDiffResultType = "FoundDrifts" // Note that the reason string below uses the same value as kept in the old work applier. - ManifestProcessingApplyResultTypeFailedToApply manifestProcessingAppliedResultType = "ManifestApplyFailed" + ApplyOrReportDiffResTypeFailedToApply ManifestProcessingApplyOrReportDiffResultType = "ManifestApplyFailed" - // The result type and description for partially successfully processing attempts. - ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection manifestProcessingAppliedResultType = "AppliedWithFailedDriftDetection" + // The result type and description for successful apply ops. + ApplyOrReportDiffResTypeApplied ManifestProcessingApplyOrReportDiffResultType = "Applied" +) + +const ( + // The descriptions for different apply op result types. + + // The description for partially successful apply ops. + ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection ManifestProcessingApplyOrReportDiffResultType = "AppliedWithFailedDriftDetection" + // The description for successful apply ops. + ApplyOrReportDiffResTypeAppliedDescription = "Manifest has been applied successfully" +) - ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetectionDescription = "Manifest has been applied successfully, but drift detection has failed" +const ( + // The result type for diff reporting failures. + ApplyOrReportDiffResTypeFailedToReportDiff ManifestProcessingApplyOrReportDiffResultType = "FailedToReportDiff" - // The result type and description for successful processing attempts. - ManifestProcessingApplyResultTypeApplied manifestProcessingAppliedResultType = "Applied" + // The result type for successful diff reportings. + ApplyOrReportDiffResTypeFoundDiff ManifestProcessingApplyOrReportDiffResultType = "FoundDiff" + ApplyOrReportDiffResTypeNoDiffFound ManifestProcessingApplyOrReportDiffResultType = "NoDiffFound" +) - ManifestProcessingApplyResultTypeAppliedDescription = "Manifest has been applied successfully" +const ( + // The descriptions for different diff reporting result types. + ApplyOrReportDiffResTypeFailedToReportDiffDescription = "Failed to report the diff between the hub cluster and the member cluster (error = %s)" + ApplyOrReportDiffResTypeNoDiffFoundDescription = "No diff has been found between the hub cluster and the member cluster" + ApplyOrReportDiffResTypeFoundDiffDescription = "Diff has been found between the hub cluster and the member cluster" +) - // A special result type for the case where no apply is performed (i.e., the ReportDiff mode). - ManifestProcessingApplyResultTypeNoApplyPerformed manifestProcessingAppliedResultType = "Skipped" +var ( + // A set for all apply related result types. + manifestProcessingApplyResTypSet = set.New( + ApplyOrReportDiffResTypeDecodingErred, + ApplyOrReportDiffResTypeFoundGenerateName, + ApplyOrReportDiffResTypeDuplicated, + ApplyOrReportDiffResTypeFailedToFindObjInMemberCluster, + ApplyOrReportDiffResTypeFailedToTakeOver, + ApplyOrReportDiffResTypeNotTakenOver, + ApplyOrReportDiffResTypeFailedToRunDriftDetection, + ApplyOrReportDiffResTypeFoundDrifts, + ApplyOrReportDiffResTypeFailedToApply, + ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection, + ApplyOrReportDiffResTypeApplied, + ) ) type ManifestProcessingAvailabilityResultType string @@ -320,39 +350,18 @@ const ( ManifestProcessingAvailabilityResultTypeNotTrackableDescription = "Manifest's availability is not trackable; Fleet assumes that the applied manifest is available" ) -type ManifestProcessingReportDiffResultType string - -const ( - // The result type for the cases where ReportDiff mode is not enabled. - ManifestProcessingReportDiffResultTypeNotEnabled ManifestProcessingReportDiffResultType = "NotEnabled" - - // The result type for diff reporting failures. - ManifestProcessingReportDiffResultTypeFailed ManifestProcessingReportDiffResultType = "Failed" - - ManifestProcessingReportDiffResultTypeFailedDescription = "Failed to report the diff between the hub cluster and the member cluster (error = %s)" - - // The result type for completed diff reportings. - ManifestProcessingReportDiffResultTypeFoundDiff ManifestProcessingReportDiffResultType = "FoundDiff" - ManifestProcessingReportDiffResultTypeNoDiffFound ManifestProcessingReportDiffResultType = "NoDiffFound" - - ManifestProcessingReportDiffResultTypeNoDiffFoundDescription = "No diff has been found between the hub cluster and the member cluster" - ManifestProcessingReportDiffResultTypeFoundDiffDescription = "Diff has been found between the hub cluster and the member cluster" -) - type manifestProcessingBundle struct { - manifest *fleetv1beta1.Manifest - id *fleetv1beta1.WorkResourceIdentifier - manifestObj *unstructured.Unstructured - inMemberClusterObj *unstructured.Unstructured - gvr *schema.GroupVersionResource - applyResTyp manifestProcessingAppliedResultType - availabilityResTyp ManifestProcessingAvailabilityResultType - reportDiffResTyp ManifestProcessingReportDiffResultType - applyErr error - availabilityErr error - reportDiffErr error - drifts []fleetv1beta1.PatchDetail - diffs []fleetv1beta1.PatchDetail + manifest *fleetv1beta1.Manifest + id *fleetv1beta1.WorkResourceIdentifier + manifestObj *unstructured.Unstructured + inMemberClusterObj *unstructured.Unstructured + gvr *schema.GroupVersionResource + applyOrReportDiffResTyp ManifestProcessingApplyOrReportDiffResultType + availabilityResTyp ManifestProcessingAvailabilityResultType + applyOrReportDiffErr error + availabilityErr error + drifts []fleetv1beta1.PatchDetail + diffs []fleetv1beta1.PatchDetail } // Reconcile implement the control loop logic for Work object. diff --git a/pkg/controllers/workapplier/controller_integration_migrated_test.go b/pkg/controllers/workapplier/controller_integration_migrated_test.go index 738c2383c..20c27bf6d 100644 --- a/pkg/controllers/workapplier/controller_integration_migrated_test.go +++ b/pkg/controllers/workapplier/controller_integration_migrated_test.go @@ -83,7 +83,7 @@ var _ = Describe("Work Controller", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, diff --git a/pkg/controllers/workapplier/controller_integration_test.go b/pkg/controllers/workapplier/controller_integration_test.go index 1bf9a453f..3cdebfc1a 100644 --- a/pkg/controllers/workapplier/controller_integration_test.go +++ b/pkg/controllers/workapplier/controller_integration_test.go @@ -714,7 +714,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -739,7 +739,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { @@ -896,7 +896,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -921,7 +921,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { @@ -1014,7 +1014,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -1193,7 +1193,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -1217,7 +1217,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundGenerateName), + Reason: string(ApplyOrReportDiffResTypeFoundGenerateName), ObservedGeneration: 0, }, }, @@ -1352,7 +1352,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -1376,7 +1376,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeDecodingErred), + Reason: string(ApplyOrReportDiffResTypeDecodingErred), }, }, }, @@ -1393,7 +1393,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, @@ -1463,6 +1463,161 @@ var _ = Describe("applying manifests", func() { // deletion; consequently this test suite would not attempt so verify its deletion. }) }) + + Context("apply op failure (decoding error)", Ordered, func() { + workName := fmt.Sprintf(workNameTemplate, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + var regularNS *corev1.Namespace + var malformedConfigMap *corev1.ConfigMap + + BeforeAll(func() { + // Prepare a NS object. + regularNS = ns.DeepCopy() + regularNS.Name = nsName + regularNSJSON := marshalK8sObjJSON(regularNS) + + malformedConfigMap = configMap.DeepCopy() + malformedConfigMap.Namespace = nsName + // This will trigger a decoding error on the work applier side as this API is not registered. + malformedConfigMap.TypeMeta = metav1.TypeMeta{ + APIVersion: "malformed/v10", + Kind: "Unknown", + } + malformedConfigMapJSON := marshalK8sObjJSON(malformedConfigMap) + + // Create a new Work object with all the manifest JSONs and proper apply strategy. + createWorkObject(workName, nil, regularNSJSON, malformedConfigMapJSON) + }) + + It("should add cleanup finalizer to the Work object", func() { + finalizerAddedActual := workFinalizerAddedActual(workName) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add cleanup finalizer to the Work object") + }) + + It("should prepare an AppliedWork object", func() { + appliedWorkCreatedActual := appliedWorkCreatedActual(workName) + Eventually(appliedWorkCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to prepare an AppliedWork object") + + appliedWorkOwnerRef = prepareAppliedWorkOwnerRef(workName) + }) + + It("should not apply malformed manifest", func() { + Consistently(func() error { + configMap := &corev1.ConfigMap{} + objKey := client.ObjectKey{Namespace: nsName, Name: malformedConfigMap.Name} + if err := memberClient.Get(ctx, objKey, configMap); !errors.IsNotFound(err) { + return fmt.Errorf("the config map exists, or an unexpected error has occurred: %w", err) + } + return nil + }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Manifests are applied unexpectedly") + }) + + It("should apply the other manifests", func() { + // Ensure that the NS object has been applied as expected. + regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) + Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") + + Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + }) + + It("should update the Work object status", func() { + // Prepare the status information. + workConds := []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: condition.WorkNotAllManifestsAppliedReason, + }, + } + manifestConds := []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 0, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + }, + { + // Note that this specific decoding error will not block the work applier from extracting + // the GVR, hence the populated API group, version and kind information. + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "malformed", + Version: "v10", + Kind: "Unknown", + Resource: "", + Name: malformedConfigMap.Name, + Namespace: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeDecodingErred), + }, + }, + }, + } + + workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") + }) + + It("should update the AppliedWork object status", func() { + // Prepare the status information. + appliedResourceMeta := []fleetv1beta1.AppliedResourceMeta{ + { + WorkResourceIdentifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + UID: regularNS.UID, + }, + } + + appliedWorkStatusUpdatedActual := appliedWorkStatusUpdated(workName, appliedResourceMeta) + Eventually(appliedWorkStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update appliedWork status") + }) + + AfterAll(func() { + // Delete the Work object and related resources. + deleteWorkObject(workName) + + // Ensure that the AppliedWork object has been removed. + appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) + Eventually(appliedWorkRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object") + + workRemovedActual := workRemovedActual(workName) + Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") + + // The environment prepared by the envtest package does not support namespace + // deletion; consequently this test suite would not attempt so verify its deletion. + }) + }) }) var _ = Describe("work applier garbage collection", func() { @@ -1556,7 +1711,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -1581,7 +1736,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { @@ -1827,7 +1982,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -1852,7 +2007,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { @@ -1876,7 +2031,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -2146,7 +2301,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -2171,7 +2326,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { @@ -2195,7 +2350,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -2463,7 +2618,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, @@ -2486,7 +2641,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 2, }, { @@ -2739,7 +2894,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -2764,7 +2919,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 1, }, }, @@ -2996,7 +3151,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 0, }, }, @@ -3031,7 +3186,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 1, }, }, @@ -3191,7 +3346,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -3216,7 +3371,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { @@ -3431,7 +3586,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -3456,7 +3611,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), ObservedGeneration: 2, }, }, @@ -3594,7 +3749,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -3717,7 +3872,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), ObservedGeneration: 0, }, }, @@ -3834,7 +3989,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -3960,7 +4115,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -4091,7 +4246,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -4214,7 +4369,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), ObservedGeneration: 0, }, }, @@ -4321,7 +4476,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -4452,7 +4607,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -4541,7 +4696,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), ObservedGeneration: 0, }, }, @@ -4615,7 +4770,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), ObservedGeneration: 0, }, }, @@ -4761,7 +4916,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeNotTakenOver), + Reason: string(ApplyOrReportDiffResTypeNotTakenOver), ObservedGeneration: 0, }, }, @@ -4780,7 +4935,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { @@ -4877,7 +5032,7 @@ var _ = Describe("report diff", func() { It("should not apply the manifests", func() { // Ensure that the NS object has not been applied. regularNSObjectNotAppliedActual := regularNSObjectNotAppliedActual(nsName) - Eventually(regularNSObjectNotAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to avoid applying the namespace object") + Consistently(regularNSObjectNotAppliedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to avoid applying the namespace object") }) It("should update the Work object status", func() { @@ -4903,7 +5058,7 @@ var _ = Describe("report diff", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 0, }, }, @@ -5114,7 +5269,7 @@ var _ = Describe("report diff", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 0, }, }, @@ -5133,7 +5288,7 @@ var _ = Describe("report diff", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 1, }, }, @@ -5214,7 +5369,7 @@ var _ = Describe("report diff", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 0, }, }, @@ -5233,7 +5388,7 @@ var _ = Describe("report diff", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 2, }, }, @@ -5321,7 +5476,7 @@ var _ = Describe("report diff", func() { It("should not apply any manifest", func() { // Verify that the NS manifest has not been applied. - Eventually(func() error { + Consistently(func() error { // Retrieve the NS object. updatedNS := &corev1.Namespace{} if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { @@ -5342,10 +5497,10 @@ var _ = Describe("report diff", func() { } return nil - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to leave the NS object alone") + }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to leave the NS object alone") // Verify that the Deployment manifest has not been applied. - Eventually(func() error { + Consistently(func() error { // Retrieve the Deployment object. updatedDeploy := &appsv1.Deployment{} if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, updatedDeploy); err != nil { @@ -5393,7 +5548,7 @@ var _ = Describe("report diff", func() { return fmt.Errorf("deployment diff (-got +want):\n%s", diff) } return nil - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to leave the Deployment object alone") + }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to leave the Deployment object alone") }) It("should update the Work object status", func() { @@ -5419,7 +5574,7 @@ var _ = Describe("report diff", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 0, }, }, @@ -5438,7 +5593,7 @@ var _ = Describe("report diff", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 1, }, }, @@ -5486,6 +5641,146 @@ var _ = Describe("report diff", func() { // deletion; consequently this test suite would not attempt so verify its deletion. }) }) + + Context("report diff failure (decoding error)", Ordered, func() { + workName := fmt.Sprintf(workNameTemplate, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + var regularNS *corev1.Namespace + var malformedConfigMap *corev1.ConfigMap + + BeforeAll(func() { + // Prepare a NS object. + regularNS = ns.DeepCopy() + regularNS.Name = nsName + regularNSJSON := marshalK8sObjJSON(regularNS) + + malformedConfigMap = configMap.DeepCopy() + malformedConfigMap.Namespace = nsName + // This will trigger a decoding error on the work applier side as this API is not registered. + malformedConfigMap.TypeMeta = metav1.TypeMeta{ + APIVersion: "malformed/v10", + Kind: "Unknown", + } + malformedConfigMapJSON := marshalK8sObjJSON(malformedConfigMap) + + // Create a new Work object with all the manifest JSONs and proper apply strategy. + applyStrategy := &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + } + createWorkObject(workName, applyStrategy, regularNSJSON, malformedConfigMapJSON) + }) + + It("should add cleanup finalizer to the Work object", func() { + finalizerAddedActual := workFinalizerAddedActual(workName) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add cleanup finalizer to the Work object") + }) + + It("should prepare an AppliedWork object", func() { + appliedWorkCreatedActual := appliedWorkCreatedActual(workName) + Eventually(appliedWorkCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to prepare an AppliedWork object") + + appliedWorkOwnerRef = prepareAppliedWorkOwnerRef(workName) + }) + + It("should not apply any manifest", func() { + Consistently(func() error { + configMap := &corev1.ConfigMap{} + objKey := client.ObjectKey{Namespace: nsName, Name: malformedConfigMap.Name} + if err := memberClient.Get(ctx, objKey, configMap); !errors.IsNotFound(err) { + return fmt.Errorf("the config map exists, or an unexpected error has occurred: %w", err) + } + return nil + }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "The config map has been applied unexpectedly") + + Consistently(regularNSObjectNotAppliedActual(nsName), consistentlyDuration, consistentlyInterval).Should(Succeed(), "The namespace object has been applied unexpectedly") + }) + + It("should update the Work object status", func() { + // Prepare the status information. + workConds := []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeDiffReported, + Status: metav1.ConditionFalse, + Reason: condition.WorkNotAllManifestsDiffReportedReason, + }, + } + manifestConds := []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeDiffReported, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeFoundDiff), + ObservedGeneration: 0, + }, + }, + DiffDetails: &fleetv1beta1.DiffDetails{ + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/", + ValueInHub: "(the whole object)", + }, + }, + }, + }, + { + // Note that this specific decoding error will not block the work applier from extracting + // the GVR, hence the populated API group, version and kind information. + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "malformed", + Version: "v10", + Kind: "Unknown", + Resource: "", + Name: malformedConfigMap.Name, + Namespace: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeDiffReported, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeFailedToReportDiff), + }, + }, + }, + } + + workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") + }) + + It("should update the AppliedWork object status", func() { + // Prepare the status information. + appliedWorkStatusUpdatedActual := appliedWorkStatusUpdated(workName, nil) + Eventually(appliedWorkStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update appliedWork status") + }) + + AfterAll(func() { + // Delete the Work object and related resources. + deleteWorkObject(workName) + + // Ensure that the AppliedWork object has been removed. + appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) + Eventually(appliedWorkRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object") + + workRemovedActual := workRemovedActual(workName) + Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") + + // The environment prepared by the envtest package does not support namespace + // deletion; consequently this test suite would not attempt so verify its deletion. + }) + }) }) var _ = Describe("handling different apply strategies", func() { @@ -5659,7 +5954,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 0, }, }, @@ -5678,7 +5973,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 1, }, }, @@ -5755,7 +6050,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -5780,7 +6075,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 2, }, { @@ -5969,7 +6264,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -5994,7 +6289,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, { @@ -6052,7 +6347,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 0, }, }, @@ -6071,7 +6366,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 1, }, }, @@ -6270,7 +6565,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeNotTakenOver), + Reason: string(ApplyOrReportDiffResTypeNotTakenOver), ObservedGeneration: 0, }, }, @@ -6289,7 +6584,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeNotTakenOver), + Reason: string(ApplyOrReportDiffResTypeNotTakenOver), ObservedGeneration: 1, }, }, @@ -6409,7 +6704,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -6434,7 +6729,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 1, }, }, @@ -6649,7 +6944,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { @@ -6674,7 +6969,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 0, }, { diff --git a/pkg/controllers/workapplier/metrics_test.go b/pkg/controllers/workapplier/metrics_test.go index e5f279cb6..d4f63016f 100644 --- a/pkg/controllers/workapplier/metrics_test.go +++ b/pkg/controllers/workapplier/metrics_test.go @@ -71,7 +71,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { Conditions: []metav1.Condition{ { Type: placementv1beta1.WorkConditionTypeApplied, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), Status: metav1.ConditionTrue, }, { @@ -113,7 +113,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToApply), + Reason: string(ApplyOrReportDiffResTypeFailedToApply), }, }, }, @@ -155,7 +155,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { Conditions: []metav1.Condition{ { Type: placementv1beta1.WorkConditionTypeApplied, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), Status: metav1.ConditionTrue, }, { @@ -201,7 +201,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), }, }, }, @@ -243,7 +243,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingReportDiffResultTypeFailed), + Reason: string(ApplyOrReportDiffResTypeFailedToReportDiff), }, }, }, @@ -263,12 +263,12 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { fleet_manifest_processing_requests_total{apply_status="Applied",availability_status="Available",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Applied",availability_status="ManifestNotAvailableYet",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="ManifestApplyFailed",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 - fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Failed",drift_detection_status="NotFound"} 1 + fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="FailedToReportDiff",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="NoDiffFound",drift_detection_status="NotFound"} 1 `, }, { - name: "applied failed, found drifts, multiple manifests", + name: "apply op failed, found drifts, multiple manifests", work: &placementv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, @@ -287,7 +287,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), }, }, DriftDetails: &placementv1beta1.DriftDetails{}, @@ -297,7 +297,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: placementv1beta1.WorkConditionTypeAvailable, @@ -323,7 +323,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { fleet_manifest_processing_requests_total{apply_status="Applied",availability_status="ManifestNotAvailableYet",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="FoundDrifts",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="Found"} 1 fleet_manifest_processing_requests_total{apply_status="ManifestApplyFailed",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 - fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Failed",drift_detection_status="NotFound"} 1 + fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="FailedToReportDiff",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="NoDiffFound",drift_detection_status="NotFound"} 1 `, }, @@ -347,7 +347,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), }, }, DiffDetails: &placementv1beta1.DiffDetails{}, @@ -357,7 +357,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), }, }, }, @@ -379,7 +379,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { fleet_manifest_processing_requests_total{apply_status="FoundDrifts",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="Found"} 1 fleet_manifest_processing_requests_total{apply_status="ManifestApplyFailed",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="Found",diff_reporting_status="FoundDiff",drift_detection_status="NotFound"} 1 - fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Failed",drift_detection_status="NotFound"} 1 + fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="FailedToReportDiff",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="NoDiffFound",drift_detection_status="NotFound"} 2 `, }, @@ -441,7 +441,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { fleet_manifest_processing_requests_total{apply_status="FoundDrifts",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="Found"} 1 fleet_manifest_processing_requests_total{apply_status="ManifestApplyFailed",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="Found",diff_reporting_status="FoundDiff",drift_detection_status="NotFound"} 1 - fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Failed",drift_detection_status="NotFound"} 1 + fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="FailedToReportDiff",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="NoDiffFound",drift_detection_status="NotFound"} 2 fleet_manifest_processing_requests_total{apply_status="Unknown",availability_status="Unknown",diff_detection_status="NotFound",diff_reporting_status="Unknown",drift_detection_status="NotFound"} 1 `, @@ -477,7 +477,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection), + Reason: string(ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), }, { Type: placementv1beta1.WorkConditionTypeAvailable, @@ -487,7 +487,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), }, }, }, @@ -510,7 +510,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { fleet_manifest_processing_requests_total{apply_status="FoundDrifts",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="Found"} 1 fleet_manifest_processing_requests_total{apply_status="ManifestApplyFailed",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Skipped",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="Found",diff_reporting_status="FoundDiff",drift_detection_status="NotFound"} 1 - fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="Failed",drift_detection_status="NotFound"} 1 + fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="FailedToReportDiff",drift_detection_status="NotFound"} 1 fleet_manifest_processing_requests_total{apply_status="Skipped",availability_status="Skipped",diff_detection_status="NotFound",diff_reporting_status="NoDiffFound",drift_detection_status="NotFound"} 2 fleet_manifest_processing_requests_total{apply_status="Unknown",availability_status="Unknown",diff_detection_status="NotFound",diff_reporting_status="Unknown",drift_detection_status="NotFound"} 1 `, diff --git a/pkg/controllers/workapplier/preprocess.go b/pkg/controllers/workapplier/preprocess.go index b28a607e5..7352ef8a9 100644 --- a/pkg/controllers/workapplier/preprocess.go +++ b/pkg/controllers/workapplier/preprocess.go @@ -61,8 +61,8 @@ func (r *Reconciler) preProcessManifests( bundle.id = buildWorkResourceIdentifier(pieces, gvr, manifestObj) if err != nil { klog.ErrorS(err, "Failed to decode the manifest", "ordinal", pieces, "work", klog.KObj(work)) - bundle.applyErr = fmt.Errorf("failed to decode manifest: %w", err) - bundle.applyResTyp = ManifestProcessingApplyResultTypeDecodingErred + bundle.applyOrReportDiffErr = fmt.Errorf("failed to decode manifest: %w", err) + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeDecodingErred return } @@ -70,8 +70,8 @@ func (r *Reconciler) preProcessManifests( if len(manifestObj.GetGenerateName()) > 0 && len(manifestObj.GetName()) == 0 { // The manifest object has a generate name but no name. klog.V(2).InfoS("Rejected an object with only generate name", "manifestObj", klog.KObj(manifestObj), "work", klog.KObj(work)) - bundle.applyErr = fmt.Errorf("objects with only generate name are not supported") - bundle.applyResTyp = ManifestProcessingApplyResultTypeFoundGenerateName + bundle.applyOrReportDiffErr = fmt.Errorf("objects with only generate name are not supported") + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFoundGenerateName return } @@ -146,7 +146,7 @@ func (r *Reconciler) writeAheadManifestProcessingAttempts( checked := make(map[string]bool, len(bundles)) for idx := range bundles { bundle := bundles[idx] - if bundle.applyErr != nil { + if bundle.applyOrReportDiffErr != nil { // Skip a manifest if it cannot be pre-processed, i.e., it can only be identified by // its ordinal. // @@ -154,7 +154,7 @@ func (r *Reconciler) writeAheadManifestProcessingAttempts( // reconciliation loop), it is just that they are not relevant in the write-ahead // process. klog.V(2).InfoS("Skipped a manifest in the write-ahead process as it has failed pre-processing", "work", workRef, - "ordinal", idx, "applyErr", bundle.applyErr, "applyResTyp", bundle.applyResTyp) + "ordinal", idx, "applyErr", bundle.applyOrReportDiffErr, "applyResTyp", bundle.applyOrReportDiffResTyp) continue } @@ -180,8 +180,8 @@ func (r *Reconciler) writeAheadManifestProcessingAttempts( if _, found := checked[wriStr]; found { klog.V(2).InfoS("A duplicate manifest has been found", "ordinal", idx, "work", workRef, "workResourceID", wriStr) - bundle.applyErr = fmt.Errorf("a duplicate manifest has been found") - bundle.applyResTyp = ManifestProcessingApplyResultTypeDuplicated + bundle.applyOrReportDiffErr = fmt.Errorf("a duplicate manifest has been found") + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeDuplicated continue } checked[wriStr] = true diff --git a/pkg/controllers/workapplier/preprocess_test.go b/pkg/controllers/workapplier/preprocess_test.go index 63f6d1621..4f229a52b 100644 --- a/pkg/controllers/workapplier/preprocess_test.go +++ b/pkg/controllers/workapplier/preprocess_test.go @@ -448,14 +448,14 @@ func TestPrepareManifestCondForWA(t *testing.T) { { Identifier: *nsWRI(0, nsName), Conditions: []metav1.Condition{ - manifestAppliedCond(workGeneration, metav1.ConditionTrue, string(ManifestProcessingApplyResultTypeApplied), ManifestProcessingApplyResultTypeAppliedDescription), + manifestAppliedCond(workGeneration, metav1.ConditionTrue, string(ApplyOrReportDiffResTypeApplied), ApplyOrReportDiffResTypeAppliedDescription), }, }, }, wantManifestCondForWA: &fleetv1beta1.ManifestCondition{ Identifier: *nsWRI(0, nsName), Conditions: []metav1.Condition{ - manifestAppliedCond(workGeneration, metav1.ConditionTrue, string(ManifestProcessingApplyResultTypeApplied), ManifestProcessingApplyResultTypeAppliedDescription), + manifestAppliedCond(workGeneration, metav1.ConditionTrue, string(ApplyOrReportDiffResTypeApplied), ApplyOrReportDiffResTypeAppliedDescription), }, }, }, @@ -515,7 +515,7 @@ func TestFindLeftOverManifests(t *testing.T) { { Identifier: *nsWRI(1, nsName1), Conditions: []metav1.Condition{ - manifestAppliedCond(workGeneration0, metav1.ConditionTrue, string(ManifestProcessingApplyResultTypeApplied), ManifestProcessingApplyResultTypeAppliedDescription), + manifestAppliedCond(workGeneration0, metav1.ConditionTrue, string(ApplyOrReportDiffResTypeApplied), ApplyOrReportDiffResTypeAppliedDescription), }, }, }, @@ -530,21 +530,21 @@ func TestFindLeftOverManifests(t *testing.T) { { Identifier: *nsWRI(1, nsName1), Conditions: []metav1.Condition{ - manifestAppliedCond(workGeneration0, metav1.ConditionTrue, string(ManifestProcessingApplyResultTypeApplied), ManifestProcessingApplyResultTypeAppliedDescription), + manifestAppliedCond(workGeneration0, metav1.ConditionTrue, string(ApplyOrReportDiffResTypeApplied), ApplyOrReportDiffResTypeAppliedDescription), }, }, // Manifest condition that corresponds to a previously applied and now gone manifest. { Identifier: *nsWRI(2, nsName2), Conditions: []metav1.Condition{ - manifestAppliedCond(workGeneration0, metav1.ConditionTrue, string(ManifestProcessingApplyResultTypeApplied), ManifestProcessingApplyResultTypeAppliedDescription), + manifestAppliedCond(workGeneration0, metav1.ConditionTrue, string(ApplyOrReportDiffResTypeApplied), ApplyOrReportDiffResTypeAppliedDescription), }, }, // Manifest condition that corresponds to a gone manifest that failed to be applied. { Identifier: *nsWRI(3, nsName3), Conditions: []metav1.Condition{ - manifestAppliedCond(workGeneration0, metav1.ConditionFalse, string(ManifestProcessingApplyResultTypeFailedToApply), ""), + manifestAppliedCond(workGeneration0, metav1.ConditionFalse, string(ApplyOrReportDiffResTypeFailedToApply), ""), }, }, // Manifest condition that corresponds to a gone manifest that has been marked as to be applied (preparing to be processed). diff --git a/pkg/controllers/workapplier/process.go b/pkg/controllers/workapplier/process.go index 9b660e528..646433b1f 100644 --- a/pkg/controllers/workapplier/process.go +++ b/pkg/controllers/workapplier/process.go @@ -40,7 +40,7 @@ func (r *Reconciler) processManifests( // TODO: We have to apply the namespace/crd/secret/configmap/pvc first // then we can process some of the manifests in parallel. for _, bundle := range bundles { - if bundle.applyErr != nil { + if bundle.applyOrReportDiffErr != nil { // Skip a manifest if it has failed pre-processing. continue } @@ -95,8 +95,8 @@ func (r *Reconciler) processOneManifest( if !canApplyWithOwnership(bundle.inMemberClusterObj, expectedAppliedWorkOwnerRef) { klog.V(2).InfoS("Ownership is not established yet; skip the apply op", "manifestObj", manifestObjRef, "GVR", *bundle.gvr, "work", workRef) - bundle.applyErr = fmt.Errorf("no ownership of the object in the member cluster; takeover is needed") - bundle.applyResTyp = ManifestProcessingApplyResultTypeNotTakenOver + bundle.applyOrReportDiffErr = fmt.Errorf("no ownership of the object in the member cluster; takeover is needed") + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeNotTakenOver return } @@ -109,8 +109,8 @@ func (r *Reconciler) processOneManifest( // Perform the apply op. appliedObj, err := r.apply(ctx, bundle.gvr, bundle.manifestObj, bundle.inMemberClusterObj, work.Spec.ApplyStrategy, expectedAppliedWorkOwnerRef) if err != nil { - bundle.applyErr = fmt.Errorf("failed to apply the manifest: %w", err) - bundle.applyResTyp = ManifestProcessingApplyResultTypeFailedToApply + bundle.applyOrReportDiffErr = fmt.Errorf("failed to apply the manifest: %w", err) + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFailedToApply klog.ErrorS(err, "Failed to apply the manifest", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), "inMemberClusterObj", klog.KObj(bundle.inMemberClusterObj), "expectedAppliedWorkOwnerRef", *expectedAppliedWorkOwnerRef) @@ -138,7 +138,7 @@ func (r *Reconciler) processOneManifest( } // All done. - bundle.applyResTyp = ManifestProcessingApplyResultTypeApplied + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeApplied klog.V(2).InfoS("Manifest processing completed", "manifestObj", manifestObjRef, "GVR", *bundle.gvr, "work", workRef) } @@ -172,8 +172,8 @@ func (r *Reconciler) findInMemberClusterObjectFor( default: // An unexpected error has occurred. wrappedErr := controller.NewAPIServerError(true, err) - bundle.applyErr = fmt.Errorf("failed to find the corresponding object for the manifest object in the member cluster: %w", wrappedErr) - bundle.applyResTyp = ManifestProcessingApplyResultTypeFailedToFindObjInMemberCluster + bundle.applyOrReportDiffErr = fmt.Errorf("failed to find the corresponding object for the manifest object in the member cluster: %w", wrappedErr) + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFailedToFindObjInMemberCluster klog.ErrorS(wrappedErr, "Failed to find the corresponding object for the manifest object in the member cluster", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), @@ -204,8 +204,8 @@ func (r *Reconciler) takeOverInMemberClusterObjectIfApplicable( switch { case err != nil: // An unexpected error has occurred. - bundle.applyErr = fmt.Errorf("failed to take over a pre-existing object: %w", err) - bundle.applyResTyp = ManifestProcessingApplyResultTypeFailedToTakeOver + bundle.applyOrReportDiffErr = fmt.Errorf("failed to take over a pre-existing object: %w", err) + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFailedToTakeOver klog.ErrorS(err, "Failed to take over a pre-existing object", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), "inMemberClusterObj", klog.KObj(bundle.inMemberClusterObj), "expectedAppliedWorkOwnerRef", *expectedAppliedWorkOwnerRef) @@ -214,8 +214,8 @@ func (r *Reconciler) takeOverInMemberClusterObjectIfApplicable( // Takeover cannot be performed as configuration differences are found between the manifest // object and the object in the member cluster. bundle.diffs = configDiffs - bundle.applyErr = fmt.Errorf("cannot take over object: configuration differences are found between the manifest object and the corresponding object in the member cluster") - bundle.applyResTyp = ManifestProcessingApplyResultTypeFailedToTakeOver + bundle.applyOrReportDiffErr = fmt.Errorf("cannot take over object: configuration differences are found between the manifest object and the corresponding object in the member cluster") + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFailedToTakeOver klog.V(2).InfoS("Cannot take over object as configuration differences are found between the manifest object and the corresponding object in the member cluster", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), "expectedAppliedWorkOwnerRef", *expectedAppliedWorkOwnerRef) @@ -273,20 +273,16 @@ func (r *Reconciler) reportDiffOnlyIfApplicable( expectedAppliedWorkOwnerRef *metav1.OwnerReference, ) (shouldSkipProcessing bool) { if work.Spec.ApplyStrategy.Type != fleetv1beta1.ApplyStrategyTypeReportDiff { - // ReportDiff mode is not enabled; proceed with the processing. - bundle.reportDiffResTyp = ManifestProcessingReportDiffResultTypeNotEnabled klog.V(2).InfoS("ReportDiff mode is not enabled; skip the step") return false } - bundle.applyResTyp = ManifestProcessingApplyResultTypeNoApplyPerformed - if bundle.inMemberClusterObj == nil { // The object has not created in the member cluster yet. // // In this case, the diff found would be the full object; for simplicity reasons, // Fleet will use a placeholder here rather than including the full JSON representation. - bundle.reportDiffResTyp = ManifestProcessingReportDiffResultTypeFoundDiff + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFoundDiff bundle.diffs = []fleetv1beta1.PatchDetail{ { // The root path. @@ -311,8 +307,8 @@ func (r *Reconciler) reportDiffOnlyIfApplicable( switch { case err != nil: // Failed to calculate the configuration diffs. - bundle.reportDiffErr = fmt.Errorf("failed to calculate configuration diffs between the manifest object and the object from the member cluster: %w", err) - bundle.reportDiffResTyp = ManifestProcessingReportDiffResultTypeFailed + bundle.applyOrReportDiffErr = fmt.Errorf("failed to calculate configuration diffs between the manifest object and the object from the member cluster: %w", err) + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFailedToReportDiff klog.ErrorS(err, "Failed to calculate configuration diffs between the manifest object and the object from the member cluster", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), @@ -320,14 +316,14 @@ func (r *Reconciler) reportDiffOnlyIfApplicable( case len(configDiffs) > 0: // Configuration diffs are found. bundle.diffs = configDiffs - bundle.reportDiffResTyp = ManifestProcessingReportDiffResultTypeFoundDiff + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFoundDiff klog.V(2).InfoS("Diff report completed; configuration diffs are found", "diffCount", len(configDiffs), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), "work", klog.KObj(work)) default: // No configuration diffs are found. - bundle.reportDiffResTyp = ManifestProcessingReportDiffResultTypeNoDiffFound + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeNoDiffFound klog.V(2).InfoS("Diff report completed; no configuration diffs are found", "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), "work", klog.KObj(work)) @@ -380,8 +376,8 @@ func (r *Reconciler) performPreApplyDriftDetectionIfApplicable( // For completion purposes, Fleet will still attempt to catch this and // report this as an unexpected error. _ = controller.NewUnexpectedBehaviorError(fmt.Errorf("failed to determine if pre-apply drift detection is needed: %w", err)) - bundle.applyErr = fmt.Errorf("failed to determine if pre-apply drift detection is needed: %w", err) - bundle.applyResTyp = ManifestProcessingApplyResultTypeFailedToRunDriftDetection + bundle.applyOrReportDiffErr = fmt.Errorf("failed to determine if pre-apply drift detection is needed: %w", err) + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFailedToRunDriftDetection return true case !isPreApplyDriftDetectionNeeded: // Drift detection is not needed; proceed with the processing. @@ -396,8 +392,8 @@ func (r *Reconciler) performPreApplyDriftDetectionIfApplicable( switch { case err != nil: // An unexpected error has occurred. - bundle.applyErr = fmt.Errorf("failed to calculate pre-apply drifts between the manifest and the object from the member cluster: %w", err) - bundle.applyResTyp = ManifestProcessingApplyResultTypeFailedToRunDriftDetection + bundle.applyOrReportDiffErr = fmt.Errorf("failed to calculate pre-apply drifts between the manifest and the object from the member cluster: %w", err) + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFailedToRunDriftDetection klog.ErrorS(err, "Failed to calculate pre-apply drifts between the manifest and the object from the member cluster", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), @@ -406,8 +402,8 @@ func (r *Reconciler) performPreApplyDriftDetectionIfApplicable( case len(drifts) > 0: // Drifts are found in the pre-apply drift detection process. bundle.drifts = drifts - bundle.applyErr = fmt.Errorf("cannot apply manifest: drifts are found between the manifest and the object from the member cluster") - bundle.applyResTyp = ManifestProcessingApplyResultTypeFoundDrifts + bundle.applyOrReportDiffErr = fmt.Errorf("cannot apply manifest: drifts are found between the manifest and the object from the member cluster") + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeFoundDrifts klog.V(2).InfoS("Cannot apply manifest: drifts are found between the manifest and the object from the member cluster", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), "inMemberClusterObj", klog.KObj(bundle.inMemberClusterObj), "expectedAppliedWorkOwnerRef", *expectedAppliedWorkOwnerRef) @@ -467,10 +463,10 @@ func (r *Reconciler) performPostApplyDriftDetectionIfApplicable( switch { case err != nil: // An unexpected error has occurred. - bundle.applyErr = fmt.Errorf("failed to calculate post-apply drifts between the manifest object and the object from the member cluster: %w", err) + bundle.applyOrReportDiffErr = fmt.Errorf("failed to calculate post-apply drifts between the manifest object and the object from the member cluster: %w", err) // This case counts as a partial error; the apply op has been completed, but Fleet // cannot determine if there are any drifts. - bundle.applyResTyp = ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection klog.ErrorS(err, "Failed to calculate post-apply drifts between the manifest object and the object from the member cluster", "work", klog.KObj(work), "GVR", *bundle.gvr, "manifestObj", klog.KObj(bundle.manifestObj), diff --git a/pkg/controllers/workapplier/status.go b/pkg/controllers/workapplier/status.go index 85d22ffe6..5ace70115 100644 --- a/pkg/controllers/workapplier/status.go +++ b/pkg/controllers/workapplier/status.go @@ -86,6 +86,7 @@ func (r *Reconciler) refreshWorkStatus( } } + isReportDiffModeOn := work.Spec.ApplyStrategy != nil && work.Spec.ApplyStrategy.Type == fleetv1beta1.ApplyStrategyTypeReportDiff for idx := range bundles { bundle := bundles[idx] @@ -102,9 +103,9 @@ func (r *Reconciler) refreshWorkStatus( if bundle.inMemberClusterObj != nil { inMemberClusterObjGeneration = bundle.inMemberClusterObj.GetGeneration() } - setManifestAppliedCondition(manifestCond, bundle.applyResTyp, bundle.applyErr, inMemberClusterObjGeneration) + setManifestAppliedCondition(manifestCond, isReportDiffModeOn, bundle.applyOrReportDiffResTyp, bundle.applyOrReportDiffErr, inMemberClusterObjGeneration) setManifestAvailableCondition(manifestCond, bundle.availabilityResTyp, bundle.availabilityErr, inMemberClusterObjGeneration) - setManifestDiffReportedCondition(manifestCond, bundle.reportDiffResTyp, bundle.reportDiffErr, inMemberClusterObjGeneration) + setManifestDiffReportedCondition(manifestCond, isReportDiffModeOn, bundle.applyOrReportDiffResTyp, bundle.applyOrReportDiffErr, inMemberClusterObjGeneration) // Check if a first drifted timestamp has been set; if not, set it to the current time. firstDriftedTimestamp := &now @@ -151,7 +152,7 @@ func (r *Reconciler) refreshWorkStatus( } // Tally the stats. - if isManifestObjectApplied(bundle.applyResTyp) { + if isManifestObjectApplied(bundle.applyOrReportDiffResTyp) { appliedManifestsCount++ } if isAppliedObjectAvailable(bundle.availabilityResTyp) { @@ -160,7 +161,7 @@ func (r *Reconciler) refreshWorkStatus( if bundle.availabilityResTyp == ManifestProcessingAvailabilityResultTypeNotTrackable { untrackableAppliedObjectsCount++ } - if isManifestObjectDiffReported(bundle.reportDiffResTyp) { + if isManifestObjectDiffReported(bundle.applyOrReportDiffResTyp) { diffReportedObjectsCount++ } } @@ -210,7 +211,7 @@ func (r *Reconciler) refreshAppliedWorkStatus( for idx := range bundles { bundle := bundles[idx] - if isManifestObjectApplied(bundle.applyResTyp) { + if isManifestObjectApplied(bundle.applyOrReportDiffResTyp) { appliedResources = append(appliedResources, fleetv1beta1.AppliedResourceMeta{ WorkResourceIdentifier: *bundle.id, UID: bundle.inMemberClusterObj.GetUID(), @@ -237,29 +238,33 @@ func isAppliedObjectAvailable(availabilityResTyp ManifestProcessingAvailabilityR // isManifestObjectDiffReported returns if a diff report result type indicates that a manifest // object has been checked for configuration differences. -func isManifestObjectDiffReported(reportDiffResTyp ManifestProcessingReportDiffResultType) bool { - return reportDiffResTyp == ManifestProcessingReportDiffResultTypeFoundDiff || reportDiffResTyp == ManifestProcessingReportDiffResultTypeNoDiffFound +func isManifestObjectDiffReported(reportDiffResTyp ManifestProcessingApplyOrReportDiffResultType) bool { + return reportDiffResTyp == ApplyOrReportDiffResTypeFoundDiff || reportDiffResTyp == ApplyOrReportDiffResTypeNoDiffFound } // setManifestAppliedCondition sets the Applied condition on an applied manifest. func setManifestAppliedCondition( manifestCond *fleetv1beta1.ManifestCondition, - appliedResTyp manifestProcessingAppliedResultType, - applyError error, + isReportDiffModeOn bool, + applyOrReportDiffResTyp ManifestProcessingApplyOrReportDiffResultType, + applyOrReportDiffError error, inMemberClusterObjGeneration int64, ) { var appliedCond *metav1.Condition - switch appliedResTyp { - case ManifestProcessingApplyResultTypeApplied: + switch { + case isReportDiffModeOn: + // ReportDiff mode is on and no apply op has been performed. In this case, Fleet + // will reset the Applied condition. + case applyOrReportDiffResTyp == ApplyOrReportDiffResTypeApplied: // The manifest has been successfully applied. appliedCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), - Message: ManifestProcessingApplyResultTypeAppliedDescription, + Reason: string(ApplyOrReportDiffResTypeApplied), + Message: ApplyOrReportDiffResTypeAppliedDescription, ObservedGeneration: inMemberClusterObjGeneration, } - case ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection: + case applyOrReportDiffResTyp == ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection: // The manifest has been successfully applied, but drift detection has failed. // // At this moment Fleet does not prepare a dedicated condition for drift detection @@ -267,20 +272,35 @@ func setManifestAppliedCondition( appliedCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection), - Message: ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetectionDescription, + Reason: string(ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), + Message: string(ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), + ObservedGeneration: inMemberClusterObjGeneration, + } + case !manifestProcessingApplyResTypSet.Has(applyOrReportDiffResTyp): + // Do a sanity check; verify if the returned result type is a valid one. + // Normally this branch should never run. + wrappedErr := fmt.Errorf("found an unexpected apply result type %s", applyOrReportDiffResTyp) + klog.ErrorS(wrappedErr, "Failed to set Applied condition", + "workResourceID", manifestCond.Identifier, + "applyOrReportDiffResTyp", applyOrReportDiffResTyp, + "applyOrReportDiffError", applyOrReportDiffError) + _ = controller.NewUnexpectedBehaviorError(wrappedErr) + // The work applier will consider this to be an apply failure. + appliedCond = &metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeFailedToApply), + Message: fmt.Sprintf("An unexpected apply result is yielded (%s, error: %s)", + applyOrReportDiffResTyp, applyOrReportDiffError), ObservedGeneration: inMemberClusterObjGeneration, } - case ManifestProcessingApplyResultTypeNoApplyPerformed: - // ReportDiff mode is on and no apply op has been performed. In this case, Fleet - // will reset the Applied condition. default: // The apply op fails. appliedCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(appliedResTyp), - Message: fmt.Sprintf("Failed to apply the manifest (error: %s)", applyError), + Reason: string(applyOrReportDiffResTyp), + Message: fmt.Sprintf("Failed to apply the manifest (error: %s)", applyOrReportDiffError), ObservedGeneration: inMemberClusterObjGeneration, } } @@ -289,7 +309,7 @@ func setManifestAppliedCondition( meta.SetStatusCondition(&manifestCond.Conditions, *appliedCond) klog.V(2).InfoS("Applied condition set in ManifestCondition", "workResourceID", manifestCond.Identifier, - "applyResTyp", appliedResTyp, "applyError", applyError, + "applyOrReportDiffResTyp", applyOrReportDiffResTyp, "applyOrReportDiffError", applyOrReportDiffError, "inMemberClusterObjGeneration", inMemberClusterObjGeneration) } else { // As the conditions are ported back; removal must be performed if the Applied @@ -297,7 +317,7 @@ func setManifestAppliedCondition( meta.RemoveStatusCondition(&manifestCond.Conditions, fleetv1beta1.WorkConditionTypeApplied) klog.V(2).InfoS("Applied condition removed from ManifestCondition", "workResourceID", manifestCond.Identifier, - "applyResTyp", appliedResTyp, "applyError", applyError, + "applyOrReportDiffResTyp", applyOrReportDiffResTyp, "applyOrReportDiffError", applyOrReportDiffError, "inMemberClusterObjGeneration", inMemberClusterObjGeneration) } } @@ -373,43 +393,52 @@ func setManifestAvailableCondition( // setManifestDiffReportedCondition sets the DiffReported condition on a manifest. func setManifestDiffReportedCondition( manifestCond *fleetv1beta1.ManifestCondition, - reportDiffResTyp ManifestProcessingReportDiffResultType, - reportDiffError error, + isReportDiffModeOn bool, + applyOrReportDiffResTyp ManifestProcessingApplyOrReportDiffResultType, + applyOrReportDiffErr error, inMemberClusterObjGeneration int64, ) { var diffReportedCond *metav1.Condition - switch reportDiffResTyp { - case ManifestProcessingReportDiffResultTypeFailed: + switch { + case !isReportDiffModeOn: + // ReportDiff mode is not on; Fleet will remove DiffReported condition. + case applyOrReportDiffResTyp == ApplyOrReportDiffResTypeFailedToReportDiff: // Diff reporting has failed. diffReportedCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingReportDiffResultTypeFailed), - Message: fmt.Sprintf(ManifestProcessingReportDiffResultTypeFailedDescription, reportDiffError), + Reason: string(ApplyOrReportDiffResTypeFailedToReportDiff), + Message: fmt.Sprintf(ApplyOrReportDiffResTypeFailedToReportDiffDescription, applyOrReportDiffErr), ObservedGeneration: inMemberClusterObjGeneration, } - case ManifestProcessingReportDiffResultTypeNotEnabled: - // Diff reporting is not enabled. - // - // For simplicity reasons, the DiffReported condition will only appear when - // the ReportDiff mode is on; in other configurations, the condition will be - // removed. - case ManifestProcessingReportDiffResultTypeNoDiffFound: + case applyOrReportDiffResTyp == ApplyOrReportDiffResTypeNoDiffFound: // No diff has been found. diffReportedCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), - Message: ManifestProcessingReportDiffResultTypeNoDiffFoundDescription, + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), + Message: ApplyOrReportDiffResTypeNoDiffFoundDescription, ObservedGeneration: inMemberClusterObjGeneration, } - case ManifestProcessingReportDiffResultTypeFoundDiff: + case applyOrReportDiffResTyp == ApplyOrReportDiffResTypeFoundDiff: // Found diffs. diffReportedCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), - Message: ManifestProcessingReportDiffResultTypeFoundDiffDescription, + Reason: string(ApplyOrReportDiffResTypeFoundDiff), + Message: ApplyOrReportDiffResTypeFoundDiffDescription, + ObservedGeneration: inMemberClusterObjGeneration, + } + default: + // There are cases where the work applier might not be able to complete the diff reporting + // due to failures in the pre-processing or processing stage (e.g., the manifest cannot be decoded, + // or the user sets up a takeover strategy that cannot be completed). This is not considered + // as a system error. + diffReportedCond = &metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeDiffReported, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeFailedToReportDiff), + Message: fmt.Sprintf("An error blocks the diff reporting process (%s, error: %s)", applyOrReportDiffResTyp, applyOrReportDiffErr), ObservedGeneration: inMemberClusterObjGeneration, } } @@ -418,7 +447,7 @@ func setManifestDiffReportedCondition( meta.SetStatusCondition(&manifestCond.Conditions, *diffReportedCond) klog.V(2).InfoS("DiffReported condition set in ManifestCondition", "workResourceID", manifestCond.Identifier, - "reportDiffResTyp", reportDiffResTyp, "reportDiffError", reportDiffError, + "applyOrReportDiffResTyp", applyOrReportDiffResTyp, "applyOrReportDiffErr", applyOrReportDiffErr, "inMemberClusterObjGeneration", inMemberClusterObjGeneration) } else { // As the conditions are ported back; removal must be performed if the DiffReported @@ -426,7 +455,7 @@ func setManifestDiffReportedCondition( meta.RemoveStatusCondition(&manifestCond.Conditions, fleetv1beta1.WorkConditionTypeDiffReported) klog.V(2).InfoS("DiffReported condition removed from ManifestCondition", "workResourceID", manifestCond.Identifier, - "reportDiffResTyp", reportDiffResTyp, "reportDiffError", reportDiffError, + "applyOrReportDiffResTyp", applyOrReportDiffResTyp, "applyOrReportDiffErr", applyOrReportDiffErr, "inMemberClusterObjGeneration", inMemberClusterObjGeneration) } } diff --git a/pkg/controllers/workapplier/status_test.go b/pkg/controllers/workapplier/status_test.go index 1e64d0fd9..423080118 100644 --- a/pkg/controllers/workapplier/status_test.go +++ b/pkg/controllers/workapplier/status_test.go @@ -18,6 +18,7 @@ package workapplier import ( "context" + "fmt" "testing" "time" @@ -93,10 +94,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy1.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy1.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -129,7 +129,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 2, }, { @@ -163,10 +163,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy2.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy2.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -178,10 +177,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy3.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeFailedToTakeOver, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy3.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToTakeOver, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -208,7 +206,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection), + Reason: string(ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), }, }, }, @@ -226,7 +224,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), }, }, }, @@ -252,10 +250,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeFailed, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeFailed, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -267,10 +264,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -282,10 +278,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "jobs", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -316,7 +311,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, @@ -339,7 +334,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, @@ -362,7 +357,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, @@ -406,7 +401,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), }, }, DriftDetails: &fleetv1beta1.DriftDetails{ @@ -427,7 +422,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), }, }, DiffDetails: &fleetv1beta1.DiffDetails{ @@ -449,10 +444,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeFoundDrifts, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDrifts, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, drifts: []fleetv1beta1.PatchDetail{ { Path: "/spec/replicas", @@ -470,10 +464,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy2.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeFailedToTakeOver, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + inMemberClusterObj: toUnstructured(t, deploy2.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToTakeOver, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, diffs: []fleetv1beta1.PatchDetail{ { Path: "/spec/replicas", @@ -507,7 +500,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), }, }, DriftDetails: &fleetv1beta1.DriftDetails{ @@ -534,7 +527,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), }, }, DiffDetails: &fleetv1beta1.DiffDetails{ @@ -589,7 +582,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), }, }, DriftDetails: &fleetv1beta1.DriftDetails{ @@ -610,10 +603,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeNoApplyPerformed, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFoundDiff, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, diffs: []fleetv1beta1.PatchDetail{ { Path: "/x", @@ -646,7 +638,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), }, }, DiffDetails: &fleetv1beta1.DiffDetails{ @@ -700,7 +692,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, @@ -723,11 +715,10 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeNoApplyPerformed, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, - diffs: []fleetv1beta1.PatchDetail{}, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + diffs: []fleetv1beta1.PatchDetail{}, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -754,7 +745,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), }, }, }, @@ -799,7 +790,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), }, }, DriftDetails: &fleetv1beta1.DriftDetails{ @@ -819,7 +810,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, @@ -842,10 +833,10 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeNoApplyPerformed, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFoundDiff, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + diffs: []fleetv1beta1.PatchDetail{ { Path: "/x", @@ -862,10 +853,9 @@ func TestRefreshWorkStatus(t *testing.T) { Name: nsName, Resource: "namespaces", }, - inMemberClusterObj: toUnstructured(t, ns.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeNoApplyPerformed, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + inMemberClusterObj: toUnstructured(t, ns.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -892,7 +882,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), }, }, DiffDetails: &fleetv1beta1.DiffDetails{ @@ -918,7 +908,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), }, }, }, @@ -963,7 +953,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(ApplyOrReportDiffResTypeFoundDrifts), }, }, DriftDetails: &fleetv1beta1.DriftDetails{ @@ -983,7 +973,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), }, { Type: fleetv1beta1.WorkConditionTypeAvailable, @@ -1006,10 +996,9 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeNoApplyPerformed, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFailed, + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToReportDiff, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -1020,10 +1009,9 @@ func TestRefreshWorkStatus(t *testing.T) { Name: nsName, Resource: "namespaces", }, - inMemberClusterObj: toUnstructured(t, ns.DeepCopy()), - applyResTyp: ManifestProcessingApplyResultTypeNoApplyPerformed, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + inMemberClusterObj: toUnstructured(t, ns.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, + availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -1050,7 +1038,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingReportDiffResultTypeFailed), + Reason: string(ApplyOrReportDiffResTypeFailedToReportDiff), }, }, }, @@ -1067,7 +1055,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), }, }, }, @@ -1166,8 +1154,8 @@ func TestRefreshAppliedWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy1), - applyResTyp: ManifestProcessingApplyResultTypeApplied, + inMemberClusterObj: toUnstructured(t, deploy1), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -1178,8 +1166,8 @@ func TestRefreshAppliedWorkStatus(t *testing.T) { Name: nsName, Resource: "namespaces", }, - inMemberClusterObj: toUnstructured(t, ns1), - applyResTyp: ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection, + inMemberClusterObj: toUnstructured(t, ns1), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -1191,8 +1179,8 @@ func TestRefreshAppliedWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy2), - applyResTyp: ManifestProcessingApplyResultTypeFailedToFindObjInMemberCluster, + inMemberClusterObj: toUnstructured(t, deploy2), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToFindObjInMemberCluster, }, }, wantAppliedWorkStatus: &fleetv1beta1.AppliedWorkStatus{ @@ -1258,22 +1246,23 @@ func TestSetManifestAppliedCondition(t *testing.T) { testCases := []struct { name string manifestCond *fleetv1beta1.ManifestCondition - applyResTyp manifestProcessingAppliedResultType - applyErr error + isReportDiffModeOn bool + applyOrReportDiffResTyp ManifestProcessingApplyOrReportDiffResultType + applyOrReportDiffErr error observedInMemberClusterGeneration int64 wantManifestCond *fleetv1beta1.ManifestCondition }{ { name: "applied", manifestCond: &fleetv1beta1.ManifestCondition{}, - applyResTyp: ManifestProcessingApplyResultTypeApplied, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, observedInMemberClusterGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, }, @@ -1286,19 +1275,19 @@ func TestSetManifestAppliedCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, }, }, - applyResTyp: ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection, observedInMemberClusterGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection), + Reason: string(ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), ObservedGeneration: 1, }, }, @@ -1311,19 +1300,19 @@ func TestSetManifestAppliedCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, }, }, - applyResTyp: ManifestProcessingApplyResultTypeFailedToApply, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, observedInMemberClusterGeneration: 2, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingApplyResultTypeFailedToApply), + Reason: string(ApplyOrReportDiffResTypeFailedToApply), ObservedGeneration: 2, }, }, @@ -1336,22 +1325,51 @@ func TestSetManifestAppliedCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingApplyResultTypeApplied), + Reason: string(ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, }, }, }, - applyResTyp: ManifestProcessingApplyResultTypeNoApplyPerformed, + isReportDiffModeOn: true, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, observedInMemberClusterGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{}, }, }, + { + // Normally this should never occur. + name: "encountered an unexpected result type", + manifestCond: &fleetv1beta1.ManifestCondition{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 1, + }, + }, + }, + isReportDiffModeOn: false, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, + applyOrReportDiffErr: nil, + observedInMemberClusterGeneration: 1, + wantManifestCond: &fleetv1beta1.ManifestCondition{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeFailedToApply), + ObservedGeneration: 1, + }, + }, + }, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - setManifestAppliedCondition(tc.manifestCond, tc.applyResTyp, tc.applyErr, tc.observedInMemberClusterGeneration) + setManifestAppliedCondition(tc.manifestCond, tc.isReportDiffModeOn, tc.applyOrReportDiffResTyp, tc.applyOrReportDiffErr, tc.observedInMemberClusterGeneration) if diff := cmp.Diff(tc.manifestCond, tc.wantManifestCond, ignoreFieldConditionLTTMsg); diff != "" { t.Errorf("set manifest cond mismatches (-got, +want):\n%s", diff) } @@ -1486,22 +1504,24 @@ func TestSetManifestDiffReportedCondition(t *testing.T) { testCases := []struct { name string manifestCond *fleetv1beta1.ManifestCondition - reportDiffResTyp ManifestProcessingReportDiffResultType - reportDiffError error + isReportDiffModeOn bool + applyOrReportDiffResTyp ManifestProcessingApplyOrReportDiffResultType + applyOrReportDiffErr error inMemberClusterObjGeneration int64 wantManifestCond *fleetv1beta1.ManifestCondition }{ { name: "failed", manifestCond: &fleetv1beta1.ManifestCondition{}, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFailed, + isReportDiffModeOn: true, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToReportDiff, inMemberClusterObjGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingReportDiffResultTypeFailed), + Reason: string(ApplyOrReportDiffResTypeFailedToReportDiff), ObservedGeneration: 1, }, }, @@ -1514,19 +1534,20 @@ func TestSetManifestDiffReportedCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 1, }, }, }, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeFoundDiff, + isReportDiffModeOn: true, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, inMemberClusterObjGeneration: 2, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 2, }, }, @@ -1539,19 +1560,20 @@ func TestSetManifestDiffReportedCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 1, }, }, }, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNoDiffFound, + isReportDiffModeOn: true, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, inMemberClusterObjGeneration: 2, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 2, }, }, @@ -1564,22 +1586,50 @@ func TestSetManifestDiffReportedCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 1, }, }, }, - reportDiffResTyp: ManifestProcessingReportDiffResultTypeNotEnabled, + isReportDiffModeOn: false, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, inMemberClusterObjGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{}, }, }, + { + name: "decoding error", + manifestCond: &fleetv1beta1.ManifestCondition{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeDiffReported, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeNoDiffFound), + ObservedGeneration: 1, + }, + }, + }, + isReportDiffModeOn: true, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeDecodingErred, + applyOrReportDiffErr: fmt.Errorf("decoding error"), + inMemberClusterObjGeneration: 1, + wantManifestCond: &fleetv1beta1.ManifestCondition{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeDiffReported, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeFailedToReportDiff), + ObservedGeneration: 1, + }, + }, + }, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - setManifestDiffReportedCondition(tc.manifestCond, tc.reportDiffResTyp, tc.reportDiffError, tc.inMemberClusterObjGeneration) + setManifestDiffReportedCondition(tc.manifestCond, tc.isReportDiffModeOn, tc.applyOrReportDiffResTyp, tc.applyOrReportDiffErr, tc.inMemberClusterObjGeneration) if diff := cmp.Diff(tc.manifestCond, tc.wantManifestCond, ignoreFieldConditionLTTMsg); diff != "" { t.Errorf("set manifest cond mismatches (-got, +want):\n%s", diff) } diff --git a/pkg/controllers/workapplier/utils.go b/pkg/controllers/workapplier/utils.go index 09d72014c..86f3b2b5b 100644 --- a/pkg/controllers/workapplier/utils.go +++ b/pkg/controllers/workapplier/utils.go @@ -44,9 +44,9 @@ func formatWRIString(wri *fleetv1beta1.WorkResourceIdentifier) (string, error) { // isManifestObjectApplied returns if an applied result type indicates that a manifest // object in a bundle has been successfully applied. -func isManifestObjectApplied(appliedResTyp manifestProcessingAppliedResultType) bool { - return appliedResTyp == ManifestProcessingApplyResultTypeApplied || - appliedResTyp == ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection +func isManifestObjectApplied(appliedResTyp ManifestProcessingApplyOrReportDiffResultType) bool { + return appliedResTyp == ApplyOrReportDiffResTypeApplied || + appliedResTyp == ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection } // isPlacedByFleetInDuplicate checks if the object has already been placed by Fleet via another diff --git a/pkg/controllers/workgenerator/controller_integration_test.go b/pkg/controllers/workgenerator/controller_integration_test.go index 834976485..5fdd7cbae 100644 --- a/pkg/controllers/workgenerator/controller_integration_test.go +++ b/pkg/controllers/workgenerator/controller_integration_test.go @@ -2309,7 +2309,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(workapplier.ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2330,7 +2330,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingReportDiffResultTypeFoundDiff), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDiff), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2395,7 +2395,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingReportDiffResultTypeFailed), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToReportDiff), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2448,7 +2448,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeDiffReported, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingReportDiffResultTypeNoDiffFound), + Reason: string(workapplier.ApplyOrReportDiffResTypeNoDiffFound), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2686,7 +2686,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2718,7 +2718,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection), + Reason: string(workapplier.ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2792,7 +2792,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToApply), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToApply), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2822,7 +2822,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", Condition: metav1.Condition{ Type: string(placementv1beta1.WorkConditionTypeApplied), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2839,7 +2839,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", Condition: metav1.Condition{ Type: string(placementv1beta1.WorkConditionTypeApplied), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToApply), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToApply), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2932,7 +2932,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingApplyResultTypeApplied), + Reason: string(workapplier.ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2972,7 +2972,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", Condition: metav1.Condition{ Type: string(placementv1beta1.WorkConditionTypeApplied), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3060,7 +3060,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingApplyResultTypeApplied), + Reason: string(workapplier.ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3089,7 +3089,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection), + Reason: string(workapplier.ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3213,7 +3213,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingApplyResultTypeApplied), + Reason: string(workapplier.ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3242,7 +3242,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingApplyResultTypeAppliedWithFailedDriftDetection), + Reason: string(workapplier.ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3349,7 +3349,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingApplyResultTypeApplied), + Reason: string(workapplier.ApplyOrReportDiffResTypeApplied), ObservedGeneration: 1, Message: "", LastTransitionTime: now, diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index 191f11aeb..aed42282d 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -390,6 +390,45 @@ func crpDiffReportedConditions(generation int64, hasOverride bool) []metav1.Cond } } +func crpDiffReportingFailedConditions(generation int64, hasOverride bool) []metav1.Condition { + overrideConditionReason := condition.OverrideNotSpecifiedReason + if hasOverride { + overrideConditionReason = condition.OverriddenSucceededReason + } + return []metav1.Condition{ + { + Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: overrideConditionReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementDiffReportedConditionType), + Status: metav1.ConditionFalse, + Reason: condition.DiffReportedStatusFalseReason, + ObservedGeneration: generation, + }, + } +} + func crpRolloutCompletedConditions(generation int64, hasOverride bool) []metav1.Condition { overrideConditionReason := condition.OverrideNotSpecifiedReason if hasOverride { @@ -539,6 +578,41 @@ func resourcePlacementDiffReportedConditions(generation int64) []metav1.Conditio } } +func resourcePlacementDiffReportingFailedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.PerClusterScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.PerClusterRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.PerClusterOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.PerClusterWorkSynchronizedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.AllWorkSyncedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourceBindingDiffReported), + Status: metav1.ConditionFalse, + Reason: condition.WorkNotDiffReportedReason, + ObservedGeneration: generation, + }, + } +} + func resourcePlacementRolloutCompletedConditions(generation int64, resourceIsTrackable bool, hasOverride bool) []metav1.Condition { availableConditionReason := condition.WorkNotAvailabilityTrackableReason if resourceIsTrackable { diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index 4b54368b7..ff13cf054 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -609,7 +609,7 @@ var _ = Describe("Process objects with generate name", Ordered, func() { Condition: metav1.Condition{ Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundGenerateName), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundGenerateName), ObservedGeneration: 0, }, }, @@ -689,7 +689,7 @@ func checkForRolloutStuckOnOneFailedClusterStatus(wantSelectedResources []placem Condition: metav1.Condition{ Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToApply), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToApply), }, }, } diff --git a/test/e2e/placement_apply_strategy_test.go b/test/e2e/placement_apply_strategy_test.go index a297f4049..c87a263e3 100644 --- a/test/e2e/placement_apply_strategy_test.go +++ b/test/e2e/placement_apply_strategy_test.go @@ -278,7 +278,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { Condition: metav1.Condition{ Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 0, }, }, @@ -516,7 +516,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { Condition: metav1.Condition{ Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, { @@ -529,7 +529,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { Condition: metav1.Condition{ Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, }, @@ -983,7 +983,7 @@ var _ = Describe("switching apply strategies", func() { Condition: metav1.Condition{ Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeNotTakenOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeNotTakenOver), }, }, }, @@ -1050,7 +1050,7 @@ var _ = Describe("switching apply strategies", func() { Condition: metav1.Condition{ Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeNotTakenOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeNotTakenOver), }, }, }, diff --git a/test/e2e/placement_drift_diff_test.go b/test/e2e/placement_drift_diff_test.go index 02f6bc63b..002e34381 100644 --- a/test/e2e/placement_drift_diff_test.go +++ b/test/e2e/placement_drift_diff_test.go @@ -199,7 +199,7 @@ var _ = Describe("take over existing resources", func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, }, @@ -370,7 +370,7 @@ var _ = Describe("take over existing resources", func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, { @@ -384,7 +384,7 @@ var _ = Describe("take over existing resources", func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, }, @@ -715,7 +715,7 @@ var _ = Describe("detect drifts on placed resources", func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, }, @@ -898,7 +898,7 @@ var _ = Describe("detect drifts on placed resources", func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, { @@ -912,7 +912,7 @@ var _ = Describe("detect drifts on placed resources", func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, }, @@ -1531,7 +1531,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, }, @@ -1572,7 +1572,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 2, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, }, @@ -1611,7 +1611,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, { @@ -1626,7 +1626,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 2, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, }, @@ -1778,7 +1778,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 2, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, }, @@ -1819,7 +1819,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 3, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, }, @@ -1858,7 +1858,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, { @@ -1873,7 +1873,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 2, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, }, @@ -2027,7 +2027,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 0, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), }, }, { @@ -2042,7 +2042,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionFalse, ObservedGeneration: 2, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFoundDrifts), + Reason: string(workapplier.ApplyOrReportDiffResTypeFoundDrifts), }, }, }, diff --git a/test/e2e/placement_negative_cases_test.go b/test/e2e/placement_negative_cases_test.go index cb0d59de5..b0c754441 100644 --- a/test/e2e/placement_negative_cases_test.go +++ b/test/e2e/placement_negative_cases_test.go @@ -12,6 +12,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -23,23 +24,23 @@ import ( ) var _ = Describe("handling errors and failures gracefully", func() { + envelopeName := "wrapper" + wrappedCMName1 := "app-1" + wrappedCMName2 := "app-2" + + cmDataKey := "foo" + cmDataVal1 := "bar" + cmDataVal2 := "baz" + // This test spec uses envelopes for placement as it is a bit tricky to simulate // decoding errors with resources created directly in the hub cluster. // // TO-DO (chenyu1): reserve an API group exclusively on the hub cluster so that - // envelopes do not need to used for this test spec. - Context("pre-processing failure (decoding errors)", Ordered, func() { + // envelopes do not need to be used for this test spec. + Context("pre-processing failure in apply ops (decoding errors)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) - envelopeName := "wrapper" - wrappedCMName1 := "app-1" - wrappedCMName2 := "app-2" - - cmDataKey := "foo" - cmDataVal1 := "bar" - cmDataVal2 := "baz" - BeforeAll(func() { // Use an envelope to create duplicate resource entries. ns := appNamespace() @@ -145,7 +146,7 @@ var _ = Describe("handling errors and failures gracefully", func() { Condition: metav1.Condition{ Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeDecodingErred), + Reason: string(workapplier.ApplyOrReportDiffResTypeDecodingErred), ObservedGeneration: 0, }, }, @@ -217,4 +218,128 @@ var _ = Describe("handling errors and failures gracefully", func() { ensureCRPAndRelatedResourcesDeleted(crpName, []*framework.Cluster{memberCluster1EastProd}) }) }) + + Context("pre-processing failure in report diff mode (decoding errors)", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Use an envelope to create duplicate resource entries. + ns := appNamespace() + Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) + + // Create an envelope resource to wrap the configMaps. + resourceEnvelope := &placementv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: envelopeName, + Namespace: ns.Name, + }, + Data: map[string]runtime.RawExtension{}, + } + + // Create a malformed config map as a wrapped resource. + badConfigMap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "malformed/v10", + Kind: "Unknown", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: wrappedCMName1, + }, + Data: map[string]string{ + cmDataKey: cmDataVal1, + }, + } + badCMBytes, err := json.Marshal(badConfigMap) + Expect(err).To(BeNil(), "Failed to marshal configMap %s", badConfigMap.Name) + resourceEnvelope.Data["cm1.yaml"] = runtime.RawExtension{Raw: badCMBytes} + Expect(hubClient.Create(ctx, resourceEnvelope)).To(Succeed(), "Failed to create configMap %s", resourceEnvelope.Name) + + // Create a CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: workResourceSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + ApplyStrategy: &placementv1beta1.ApplyStrategy{ + Type: placementv1beta1.ApplyStrategyTypeReportDiff, + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + }) + + It("should update CRP status as expected", func() { + Eventually(func() error { + crp := &placementv1beta1.ClusterResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + return err + } + + wantStatus := placementv1beta1.PlacementStatus{ + Conditions: crpDiffReportingFailedConditions(crp.Generation, false), + PerClusterPlacementStatuses: []placementv1beta1.PerClusterPlacementStatus{ + { + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementDiffReportingFailedConditions(crp.Generation), + }, + }, + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Kind: "Namespace", + Name: workNamespaceName, + Version: "v1", + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: envelopeName, + Namespace: workNamespaceName, + }, + }, + ObservedResourceIndex: "0", + } + if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration*20, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("should not apply any resource", func() { + Consistently(func() error { + cm := &corev1.ConfigMap{} + if err := memberCluster1EastProdClient.Get(ctx, types.NamespacedName{Name: wrappedCMName1, Namespace: workNamespaceName}, cm); !errors.IsNotFound(err) { + return fmt.Errorf("the config map exists, or an unexpected error has occurred: %w", err) + } + return nil + }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "The malformed configMap has been applied unexpectedly") + + Consistently(workNamespaceRemovedFromClusterActual(memberCluster1EastProd)).Should(Succeed(), "The namespace object has been applied unexpectedly") + }) + + AfterAll(func() { + // Remove the CRP and the namespace from the hub cluster. + ensureCRPAndRelatedResourcesDeleted(crpName, []*framework.Cluster{memberCluster1EastProd}) + }) + }) }) diff --git a/test/e2e/placement_selecting_resources_test.go b/test/e2e/placement_selecting_resources_test.go index ffd923aa7..c0f1e716a 100644 --- a/test/e2e/placement_selecting_resources_test.go +++ b/test/e2e/placement_selecting_resources_test.go @@ -841,7 +841,7 @@ var _ = Describe("validating CRP when failed to apply resources", Ordered, func( Condition: metav1.Condition{ Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToTakeOver), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), ObservedGeneration: 0, }, }, diff --git a/test/upgrade/before/actuals_test.go b/test/upgrade/before/actuals_test.go index 8108b5c22..287a72f02 100644 --- a/test/upgrade/before/actuals_test.go +++ b/test/upgrade/before/actuals_test.go @@ -486,7 +486,7 @@ func crpWithOneFailedApplyOpStatusUpdatedActual( Status: metav1.ConditionFalse, // The new and old applier uses the same reason string to make things // a bit easier. - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToApply), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToApply), ObservedGeneration: wantFailedResourceObservedGeneration, }, }, @@ -694,7 +694,7 @@ func crpWithStuckRolloutDueToOneFailedApplyOpStatusUpdatedActual( Status: metav1.ConditionFalse, // The new and old applier uses the same reason string to make things // a bit easier. - Reason: string(workapplier.ManifestProcessingApplyResultTypeFailedToApply), + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToApply), ObservedGeneration: failedResourceObservedGeneration, }, }, From 4d24e413f2cfe113b244ae45b1d99c24b7b2e343 Mon Sep 17 00:00:00 2001 From: Wantong Date: Mon, 18 Aug 2025 12:25:00 -0700 Subject: [PATCH 16/38] docs: add rp examples (#192) --- examples/resourceplacement/rp-cm.yaml | 21 ++++++++++++++++++ examples/resourceplacement/rp-deploy.yaml | 26 +++++++++++++++++++++++ examples/resourceplacement/test-crp.yaml | 20 +++++++++++++++++ 3 files changed, 67 insertions(+) create mode 100644 examples/resourceplacement/rp-cm.yaml create mode 100644 examples/resourceplacement/rp-deploy.yaml create mode 100644 examples/resourceplacement/test-crp.yaml diff --git a/examples/resourceplacement/rp-cm.yaml b/examples/resourceplacement/rp-cm.yaml new file mode 100644 index 000000000..ebd0946eb --- /dev/null +++ b/examples/resourceplacement/rp-cm.yaml @@ -0,0 +1,21 @@ +# This tests selecting a single resource in a namespace, +# and applying it to all clusters. +# Prerequisite: create a configMap named "test-cm" in namespace "test-ns". +apiVersion: placement.kubernetes-fleet.io/v1beta1 +kind: ResourcePlacement +metadata: + name: rp-cm + namespace: test-ns +spec: + resourceSelectors: + - group: "" + kind: ConfigMap + name: test-cm + version: v1 + policy: + placementType: PickAll + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 1 diff --git a/examples/resourceplacement/rp-deploy.yaml b/examples/resourceplacement/rp-deploy.yaml new file mode 100644 index 000000000..2d4f32220 --- /dev/null +++ b/examples/resourceplacement/rp-deploy.yaml @@ -0,0 +1,26 @@ +# This tests selecting multiple resources in a namespace, +# and only applying to a subset of clusters. +# Prerequisite: create and expose a deployment named "test-nginx" in namespace "test-ns". +apiVersion: placement.kubernetes-fleet.io/v1beta1 +kind: ResourcePlacement +metadata: + name: rp-nginx + namespace: test-ns +spec: + resourceSelectors: + - group: apps + kind: Deployment + name: test-nginx + version: v1 + - group: "" + kind: Service + name: test-nginx + version: v1 + policy: + placementType: PickN + numberOfClusters: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 1 diff --git a/examples/resourceplacement/test-crp.yaml b/examples/resourceplacement/test-crp.yaml new file mode 100644 index 000000000..b497c9b1c --- /dev/null +++ b/examples/resourceplacement/test-crp.yaml @@ -0,0 +1,20 @@ +# This tests a CRP selecting a namespace only. +# Prerequisite: create a namespace named "test-ns". +apiVersion: placement.kubernetes-fleet.io/v1beta1 +kind: ClusterResourcePlacement +metadata: + name: ns-only-crp +spec: + resourceSelectors: + - group: "" + kind: Namespace + name: test-ns + version: v1 + selectionScope: NamespaceOnly # only namespace itself is placed, no resources within the namespace + policy: + placementType: PickAll + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 1 From d8d26187bc186be74f47372d56929b4d54c29947 Mon Sep 17 00:00:00 2001 From: Wantong Date: Mon, 18 Aug 2025 13:02:53 -0700 Subject: [PATCH 17/38] feat: enable RP metrics (#195) --- cmd/hubagent/main.go | 13 ++++++-- .../clusterresourceplacement/controller.go | 10 +++---- .../controller_integration_test.go | 30 ++++++++++++++++++- .../clusterresourceplacement/suite_test.go | 5 ++++ .../controller.go | 2 +- .../controller_intergration_test.go | 2 +- .../controller_test.go | 2 +- .../suite_test.go | 10 +++++++ pkg/controllers/updaterun/controller.go | 2 +- .../updaterun/controller_integration_test.go | 2 +- pkg/controllers/updaterun/suite_test.go | 11 +++++-- pkg/metrics/metrics.go | 20 +++++++++++++ pkg/utils/controller/metrics/metrics.go | 23 -------------- 13 files changed, 92 insertions(+), 40 deletions(-) diff --git a/cmd/hubagent/main.go b/cmd/hubagent/main.go index 0848f31a2..2257f3664 100644 --- a/cmd/hubagent/main.go +++ b/cmd/hubagent/main.go @@ -85,9 +85,16 @@ func init() { // +kubebuilder:scaffold:scheme klog.InitFlags(nil) - metrics.Registry.MustRegister(fleetmetrics.JoinResultMetrics, fleetmetrics.LeaveResultMetrics, - fleetmetrics.PlacementApplyFailedCount, fleetmetrics.PlacementApplySucceedCount, - fleetmetrics.SchedulingCycleDurationMilliseconds, fleetmetrics.SchedulerActiveWorkers) + metrics.Registry.MustRegister( + fleetmetrics.JoinResultMetrics, + fleetmetrics.LeaveResultMetrics, + fleetmetrics.PlacementApplyFailedCount, + fleetmetrics.PlacementApplySucceedCount, + fleetmetrics.SchedulingCycleDurationMilliseconds, + fleetmetrics.SchedulerActiveWorkers, + fleetmetrics.FleetPlacementStatusLastTimeStampSeconds, + fleetmetrics.FleetEvictionStatus, + ) } func main() { diff --git a/pkg/controllers/clusterresourceplacement/controller.go b/pkg/controllers/clusterresourceplacement/controller.go index eeabce9c4..63729682c 100644 --- a/pkg/controllers/clusterresourceplacement/controller.go +++ b/pkg/controllers/clusterresourceplacement/controller.go @@ -37,11 +37,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/metrics" "github.com/kubefleet-dev/kubefleet/pkg/scheduler/queue" "github.com/kubefleet-dev/kubefleet/pkg/utils/annotations" "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller/metrics" "github.com/kubefleet-dev/kubefleet/pkg/utils/defaulter" "github.com/kubefleet-dev/kubefleet/pkg/utils/labels" "github.com/kubefleet-dev/kubefleet/pkg/utils/resource" @@ -112,7 +112,7 @@ func (r *Reconciler) handleDelete(ctx context.Context, placementObj fleetv1beta1 return ctrl.Result{}, err } // change the metrics to add nameplace of namespace - metrics.FleetPlacementStatusLastTimeStampSeconds.DeletePartialMatch(prometheus.Labels{"name": placementObj.GetName()}) + metrics.FleetPlacementStatusLastTimeStampSeconds.DeletePartialMatch(prometheus.Labels{"namespace": placementObj.GetNamespace(), "name": placementObj.GetName()}) controllerutil.RemoveFinalizer(placementObj, fleetv1beta1.PlacementCleanupFinalizer) if err := r.Client.Update(ctx, placementObj); err != nil { klog.ErrorS(err, "Failed to remove placement finalizer", "placement", placementKObj) @@ -1238,7 +1238,7 @@ func emitPlacementStatusMetric(placementObj fleetv1beta1.PlacementObj) { status = string(cond.Status) reason = cond.Reason } - metrics.FleetPlacementStatusLastTimeStampSeconds.WithLabelValues(placementObj.GetName(), strconv.FormatInt(placementObj.GetGeneration(), 10), scheduledConditionType, status, reason).SetToCurrentTime() + metrics.FleetPlacementStatusLastTimeStampSeconds.WithLabelValues(placementObj.GetNamespace(), placementObj.GetName(), strconv.FormatInt(placementObj.GetGeneration(), 10), scheduledConditionType, status, reason).SetToCurrentTime() return } @@ -1252,12 +1252,12 @@ func emitPlacementStatusMetric(placementObj fleetv1beta1.PlacementObj) { status = string(cond.Status) reason = cond.Reason } - metrics.FleetPlacementStatusLastTimeStampSeconds.WithLabelValues(placementObj.GetName(), strconv.FormatInt(placementObj.GetGeneration(), 10), conditionType, status, reason).SetToCurrentTime() + metrics.FleetPlacementStatusLastTimeStampSeconds.WithLabelValues(placementObj.GetNamespace(), placementObj.GetName(), strconv.FormatInt(placementObj.GetGeneration(), 10), conditionType, status, reason).SetToCurrentTime() return } } // Emit the "Completed" condition metric to indicate that the placement has completed. // This condition is used solely for metric reporting purposes. - metrics.FleetPlacementStatusLastTimeStampSeconds.WithLabelValues(placementObj.GetName(), strconv.FormatInt(placementObj.GetGeneration(), 10), "Completed", string(metav1.ConditionTrue), "Completed").SetToCurrentTime() + metrics.FleetPlacementStatusLastTimeStampSeconds.WithLabelValues(placementObj.GetNamespace(), placementObj.GetName(), strconv.FormatInt(placementObj.GetGeneration(), 10), "Completed", string(metav1.ConditionTrue), "Completed").SetToCurrentTime() } diff --git a/pkg/controllers/clusterresourceplacement/controller_integration_test.go b/pkg/controllers/clusterresourceplacement/controller_integration_test.go index 0365135a0..58993421d 100644 --- a/pkg/controllers/clusterresourceplacement/controller_integration_test.go +++ b/pkg/controllers/clusterresourceplacement/controller_integration_test.go @@ -35,9 +35,9 @@ import ( ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/metrics" "github.com/kubefleet-dev/kubefleet/pkg/utils" "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller/metrics" "github.com/kubefleet-dev/kubefleet/pkg/utils/resource" metricsUtils "github.com/kubefleet-dev/kubefleet/test/utils/metrics" ) @@ -475,6 +475,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -487,6 +488,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType))}, @@ -531,6 +533,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -543,6 +546,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -639,6 +643,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -651,6 +656,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType))}, @@ -754,6 +760,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { By("Ensure placement status metric was emitted") wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType))}, @@ -847,6 +854,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -859,6 +867,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType))}, @@ -962,6 +971,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { By("Ensure placement status metric was emitted for 1st generation") wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType))}, @@ -1024,6 +1034,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { // In this case we have 2 metrics for Scheduled condition type as crp generation goes from 1 to 2. wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -1145,6 +1156,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { // In this case we have 2 metrics for different condition types as crp updates and its generation goes from 1 to 2. wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType))}, @@ -1266,6 +1278,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -1278,6 +1291,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType))}, @@ -1290,6 +1304,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType))}, @@ -1333,6 +1348,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { By("Ensure placement status applied metric was emitted") wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(crp.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementAppliedConditionType))}, @@ -1375,6 +1391,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { By("Ensure placement status completed metric was emitted") wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To("Completed")}, @@ -1560,6 +1577,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -1572,6 +1590,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType))}, @@ -1584,6 +1603,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType))}, @@ -1627,6 +1647,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { By("Ensure placement status metric for reportDiff was emitted") wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementDiffReportedConditionType))}, @@ -1743,6 +1764,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -1755,6 +1777,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType))}, @@ -1767,6 +1790,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType))}, @@ -1884,6 +1908,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To("Completed")}, @@ -1954,6 +1979,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -2036,6 +2062,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantMetrics := []*prometheusclientmodel.Metric{ { Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementScheduledConditionType))}, @@ -2111,6 +2138,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { By("Ensure placement status metric for rollout external was emitted") wantMetrics = append(wantMetrics, &prometheusclientmodel.Metric{ Label: []*prometheusclientmodel.LabelPair{ + {Name: ptr.To("namespace"), Value: ptr.To(gotCRP.Namespace)}, {Name: ptr.To("name"), Value: ptr.To(gotCRP.Name)}, {Name: ptr.To("generation"), Value: ptr.To(strconv.FormatInt(gotCRP.Generation, 10))}, {Name: ptr.To("conditionType"), Value: ptr.To(string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType))}, diff --git a/pkg/controllers/clusterresourceplacement/suite_test.go b/pkg/controllers/clusterresourceplacement/suite_test.go index 13a6c6ec7..50c2af260 100644 --- a/pkg/controllers/clusterresourceplacement/suite_test.go +++ b/pkg/controllers/clusterresourceplacement/suite_test.go @@ -35,6 +35,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" @@ -42,6 +43,7 @@ import ( "github.com/kubefleet-dev/kubefleet/pkg/controllers/clusterresourcebindingwatcher" "github.com/kubefleet-dev/kubefleet/pkg/controllers/clusterresourceplacementwatcher" "github.com/kubefleet-dev/kubefleet/pkg/controllers/clusterschedulingpolicysnapshot" + "github.com/kubefleet-dev/kubefleet/pkg/metrics" "github.com/kubefleet-dev/kubefleet/pkg/utils" "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" "github.com/kubefleet-dev/kubefleet/pkg/utils/informer" @@ -150,6 +152,9 @@ var _ = BeforeSuite(func() { }).SetupWithManagerForClusterResourceBinding(mgr) Expect(err).Should(Succeed(), "failed to create clusterResourceBinding watcher") + // Register metrics. + ctrlmetrics.Registry.MustRegister(metrics.FleetPlacementStatusLastTimeStampSeconds) + ctx, cancel = context.WithCancel(context.TODO()) // Run the controller manager go func() { diff --git a/pkg/controllers/clusterresourceplacementeviction/controller.go b/pkg/controllers/clusterresourceplacementeviction/controller.go index 8cc629a4a..15f351894 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller.go @@ -35,10 +35,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/metrics" bindingutils "github.com/kubefleet-dev/kubefleet/pkg/utils/binding" "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller/metrics" "github.com/kubefleet-dev/kubefleet/pkg/utils/defaulter" evictionutils "github.com/kubefleet-dev/kubefleet/pkg/utils/eviction" ) diff --git a/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go b/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go index 5b1bae995..b127503d8 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go @@ -33,8 +33,8 @@ import ( ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/metrics" "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller/metrics" testutilseviction "github.com/kubefleet-dev/kubefleet/test/utils/eviction" ) diff --git a/pkg/controllers/clusterresourceplacementeviction/controller_test.go b/pkg/controllers/clusterresourceplacementeviction/controller_test.go index cb88e505b..5bbb7a203 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller_test.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller_test.go @@ -38,8 +38,8 @@ import ( ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/metrics" "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller/metrics" "github.com/kubefleet-dev/kubefleet/pkg/utils/defaulter" ) diff --git a/pkg/controllers/clusterresourceplacementeviction/suite_test.go b/pkg/controllers/clusterresourceplacementeviction/suite_test.go index 2ce4cc241..5724d8de3 100644 --- a/pkg/controllers/clusterresourceplacementeviction/suite_test.go +++ b/pkg/controllers/clusterresourceplacementeviction/suite_test.go @@ -19,6 +19,7 @@ package clusterresourceplacementeviction import ( "context" "flag" + "os" "path/filepath" "testing" @@ -33,9 +34,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/metrics" ) var ( @@ -47,6 +50,13 @@ var ( cancel context.CancelFunc ) +func TestMain(m *testing.M) { + // Register here as the metric is both tested in ginkgo tests and go unit tests. + ctrlmetrics.Registry.MustRegister(metrics.FleetEvictionStatus) + + os.Exit(m.Run()) +} + func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) diff --git a/pkg/controllers/updaterun/controller.go b/pkg/controllers/updaterun/controller.go index 9858b300c..0d427ab79 100644 --- a/pkg/controllers/updaterun/controller.go +++ b/pkg/controllers/updaterun/controller.go @@ -41,10 +41,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/metrics" "github.com/kubefleet-dev/kubefleet/pkg/utils" "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller/metrics" "github.com/kubefleet-dev/kubefleet/pkg/utils/informer" ) diff --git a/pkg/controllers/updaterun/controller_integration_test.go b/pkg/controllers/updaterun/controller_integration_test.go index 35da5d6e4..7864ed993 100644 --- a/pkg/controllers/updaterun/controller_integration_test.go +++ b/pkg/controllers/updaterun/controller_integration_test.go @@ -43,9 +43,9 @@ import ( clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/metrics" "github.com/kubefleet-dev/kubefleet/pkg/utils" "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller/metrics" metricsutils "github.com/kubefleet-dev/kubefleet/test/utils/metrics" ) diff --git a/pkg/controllers/updaterun/suite_test.go b/pkg/controllers/updaterun/suite_test.go index 27badf4e6..9ff93f02f 100644 --- a/pkg/controllers/updaterun/suite_test.go +++ b/pkg/controllers/updaterun/suite_test.go @@ -35,11 +35,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" placementv1alpha1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1alpha1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/metrics" "github.com/kubefleet-dev/kubefleet/pkg/utils" "github.com/kubefleet-dev/kubefleet/pkg/utils/informer" ) @@ -96,11 +98,11 @@ var _ = BeforeSuite(func() { }) Expect(err).Should(Succeed()) - // make sure the k8s client is same as the controller client, or we can have cache delay + // Make sure the k8s client is same as the controller client, or we can have cache delay. By("set k8s client same as the controller manager") k8sClient = mgr.GetClient() - // setup informer manager for the reconciler + // Setup informer manager for the reconciler. dynamicClient, err := dynamic.NewForConfig(cfg) Expect(err).Should(Succeed()) dynamicInformerManager := informer.NewInformerManager(dynamicClient, 0, ctx.Done()) @@ -110,13 +112,16 @@ var _ = BeforeSuite(func() { IsClusterScoped: true, }, nil) - // setup our main reconciler + // Setup our main reconciler. err = (&Reconciler{ Client: k8sClient, InformerManager: dynamicInformerManager, }).SetupWithManager(mgr) Expect(err).Should(Succeed()) + // Register metrics. + ctrlmetrics.Registry.MustRegister(metrics.FleetUpdateRunStatusLastTimestampSeconds) + go func() { defer GinkgoRecover() err = mgr.Start(ctx) diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index b92c9c01b..8e4756781 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -43,6 +43,26 @@ var ( Name: "placement_apply_succeed_counter", Help: "Number of successfully applied cluster resource placement", }, []string{"name"}) + + // FleetPlacementStatusLastTimeStampSeconds is a prometheus metric which keeps track of the last placement status. + FleetPlacementStatusLastTimeStampSeconds = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "fleet_workload_placement_status_last_timestamp_seconds", + Help: "Last update timestamp of placement status in seconds", + }, []string{"namespace", "name", "generation", "conditionType", "status", "reason"}) + + // FleetEvictionStatus is prometheus metrics which holds the + // status of eviction completion. + FleetEvictionStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "fleet_workload_eviction_complete", + Help: "Last update timestamp of eviction complete status in seconds", + }, []string{"name", "isCompleted", "isValid"}) + + // FleetUpdateRunStatusLastTimestampSeconds is a prometheus metric which holds the + // last update timestamp of update run status in seconds. + FleetUpdateRunStatusLastTimestampSeconds = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "fleet_workload_update_run_status_last_timestamp_seconds", + Help: "Last update timestamp of update run status in seconds", + }, []string{"name", "generation", "condition", "status", "reason"}) ) var ( diff --git a/pkg/utils/controller/metrics/metrics.go b/pkg/utils/controller/metrics/metrics.go index 853df00ab..d68ab3f5d 100644 --- a/pkg/utils/controller/metrics/metrics.go +++ b/pkg/utils/controller/metrics/metrics.go @@ -60,26 +60,6 @@ var ( Name: "fleet_workload_active_workers", Help: "Number of currently used workers per controller", }, []string{"controller"}) - - // FleetPlacementStatusLastTimeStampSeconds is a prometheus metric which keeps track of the last placement status. - FleetPlacementStatusLastTimeStampSeconds = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "fleet_workload_placement_status_last_timestamp_seconds", - Help: "Timestamp in seconds of the last current placement status condition of crp.", - }, []string{"name", "generation", "conditionType", "status", "reason"}) - - // FleetEvictionStatus is prometheus metrics which holds the - // status of eviction completion. - FleetEvictionStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "fleet_workload_eviction_complete", - Help: "Eviction complete status ", - }, []string{"name", "isCompleted", "isValid"}) - - // FleetUpdateRunStatusLastTimestampSeconds is a prometheus metric which holds the - // last update timestamp of update run status in seconds. - FleetUpdateRunStatusLastTimestampSeconds = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "fleet_workload_update_run_status_last_timestamp_seconds", - Help: "Last update timestamp of update run status in seconds", - }, []string{"name", "generation", "condition", "status", "reason"}) ) func init() { @@ -89,8 +69,5 @@ func init() { FleetReconcileTime, FleetWorkerCount, FleetActiveWorkers, - FleetPlacementStatusLastTimeStampSeconds, - FleetEvictionStatus, - FleetUpdateRunStatusLastTimestampSeconds, ) } From 2aaeb6a9167043ab33f3b1e8f1424daa76fd4440 Mon Sep 17 00:00:00 2001 From: Wantong Date: Tue, 19 Aug 2025 00:08:24 -0700 Subject: [PATCH 18/38] test: update e2e utils for RP and add pickAll tests for RP (#200) Signed-off-by: Wantong Jiang --- .../v1beta1/zz_generated.deepcopy.go | 132 +++---- test/e2e/actuals_test.go | 364 +++++++++++++----- test/e2e/enveloped_object_placement_test.go | 26 +- test/e2e/join_and_leave_test.go | 6 +- test/e2e/placement_apply_strategy_test.go | 68 ++-- test/e2e/placement_drift_diff_test.go | 88 ++--- test/e2e/placement_negative_cases_test.go | 8 +- test/e2e/placement_ro_test.go | 24 +- .../e2e/placement_selecting_resources_test.go | 40 +- test/e2e/placement_with_custom_config_test.go | 4 +- test/e2e/resource_placement_pickall_test.go | 103 +++++ test/e2e/resources_test.go | 26 +- test/e2e/rollout_test.go | 16 +- test/e2e/setup_test.go | 12 +- test/e2e/updaterun_test.go | 2 +- test/e2e/utils_test.go | 74 +++- 16 files changed, 662 insertions(+), 331 deletions(-) create mode 100644 test/e2e/resource_placement_pickall_test.go diff --git a/apis/placement/v1beta1/zz_generated.deepcopy.go b/apis/placement/v1beta1/zz_generated.deepcopy.go index 9758c3799..45163b157 100644 --- a/apis/placement/v1beta1/zz_generated.deepcopy.go +++ b/apis/placement/v1beta1/zz_generated.deepcopy.go @@ -887,26 +887,6 @@ func (in *ClusterResourcePlacementStatusList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceSelectorTerm) DeepCopyInto(out *ResourceSelectorTerm) { - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceSelector. -func (in *ResourceSelectorTerm) DeepCopy() *ResourceSelectorTerm { - if in == nil { - return nil - } - out := new(ResourceSelectorTerm) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterResourceSnapshot) DeepCopyInto(out *ClusterResourceSnapshot) { *out = *in @@ -1542,6 +1522,59 @@ func (in *PatchDetail) DeepCopy() *PatchDetail { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerClusterPlacementStatus) DeepCopyInto(out *PerClusterPlacementStatus) { + *out = *in + if in.ApplicableResourceOverrides != nil { + in, out := &in.ApplicableResourceOverrides, &out.ApplicableResourceOverrides + *out = make([]NamespacedName, len(*in)) + copy(*out, *in) + } + if in.ApplicableClusterResourceOverrides != nil { + in, out := &in.ApplicableClusterResourceOverrides, &out.ApplicableClusterResourceOverrides + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.FailedPlacements != nil { + in, out := &in.FailedPlacements, &out.FailedPlacements + *out = make([]FailedResourcePlacement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DriftedPlacements != nil { + in, out := &in.DriftedPlacements, &out.DriftedPlacements + *out = make([]DriftedResourcePlacement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiffedPlacements != nil { + in, out := &in.DiffedPlacements, &out.DiffedPlacements + *out = make([]DiffedResourcePlacement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerClusterPlacementStatus. +func (in *PerClusterPlacementStatus) DeepCopy() *PerClusterPlacementStatus { + if in == nil { + return nil + } + out := new(PerClusterPlacementStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlacementDisruptionBudgetSpec) DeepCopyInto(out *PlacementDisruptionBudgetSpec) { *out = *in @@ -2263,69 +2296,36 @@ func (in *ResourcePlacementList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PerClusterPlacementStatus) DeepCopyInto(out *PerClusterPlacementStatus) { +func (in *ResourceSelector) DeepCopyInto(out *ResourceSelector) { *out = *in - if in.ApplicableResourceOverrides != nil { - in, out := &in.ApplicableResourceOverrides, &out.ApplicableResourceOverrides - *out = make([]NamespacedName, len(*in)) - copy(*out, *in) - } - if in.ApplicableClusterResourceOverrides != nil { - in, out := &in.ApplicableClusterResourceOverrides, &out.ApplicableClusterResourceOverrides - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.FailedPlacements != nil { - in, out := &in.FailedPlacements, &out.FailedPlacements - *out = make([]FailedResourcePlacement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DriftedPlacements != nil { - in, out := &in.DriftedPlacements, &out.DriftedPlacements - *out = make([]DriftedResourcePlacement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DiffedPlacements != nil { - in, out := &in.DiffedPlacements, &out.DiffedPlacements - *out = make([]DiffedResourcePlacement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePlacementStatus. -func (in *PerClusterPlacementStatus) DeepCopy() *PerClusterPlacementStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSelector. +func (in *ResourceSelector) DeepCopy() *ResourceSelector { if in == nil { return nil } - out := new(PerClusterPlacementStatus) + out := new(ResourceSelector) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceSelector) DeepCopyInto(out *ResourceSelector) { +func (in *ResourceSelectorTerm) DeepCopyInto(out *ResourceSelectorTerm) { *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSelector. -func (in *ResourceSelector) DeepCopy() *ResourceSelector { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSelectorTerm. +func (in *ResourceSelectorTerm) DeepCopy() *ResourceSelectorTerm { if in == nil { return nil } - out := new(ResourceSelector) + out := new(ResourceSelectorTerm) in.DeepCopyInto(out) return out } diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index aed42282d..b5ad36c8a 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -185,6 +185,127 @@ func workNamespacePlacedOnClusterActual(cluster *framework.Cluster) func() error } } +func placementRolloutCompletedConditions(placementKey types.NamespacedName, generation int64, hasOverride bool) []metav1.Condition { + if placementKey.Namespace == "" { + return crpRolloutCompletedConditions(generation, hasOverride) + } else { + return rpRolloutCompletedConditions(generation, hasOverride) + } +} + +func placementSchedulePartiallyFailedConditions(placementKey types.NamespacedName, generation int64) []metav1.Condition { + if placementKey.Namespace == "" { + return crpSchedulePartiallyFailedConditions(generation) + } else { + return rpSchedulePartiallyFailedConditions(generation) + } +} + +func placementScheduleFailedConditions(placementKey types.NamespacedName, generation int64) []metav1.Condition { + if placementKey.Namespace == "" { + return crpScheduleFailedConditions(generation) + } else { + return rpScheduleFailedConditions(generation) + } +} + +func rpRolloutCompletedConditions(generation int64, hasOverride bool) []metav1.Condition { + overrideConditionReason := condition.OverrideNotSpecifiedReason + if hasOverride { + overrideConditionReason = condition.OverriddenSucceededReason + } + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: overrideConditionReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementWorkSynchronizedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementAppliedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.ApplySucceededReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementAvailableConditionType), + Status: metav1.ConditionTrue, + Reason: condition.AvailableReason, + ObservedGeneration: generation, + }, + } +} + +func rpSchedulePartiallyFailedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionFalse, + ObservedGeneration: generation, + Reason: scheduler.NotFullyScheduledReason, + }, + { + Type: string(placementv1beta1.ResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementWorkSynchronizedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementAppliedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.ApplySucceededReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementAvailableConditionType), + Status: metav1.ConditionTrue, + Reason: condition.AvailableReason, + ObservedGeneration: generation, + }, + } +} + +func rpScheduleFailedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionFalse, + ObservedGeneration: generation, + Reason: scheduler.NotFullyScheduledReason, + }, + } +} + func crpScheduleFailedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { @@ -474,7 +595,63 @@ func crpRolloutCompletedConditions(generation int64, hasOverride bool) []metav1. } } -func resourcePlacementSyncPendingConditions(generation int64) []metav1.Condition { +func crpWorkSynchronizedFailedConditions(generation int64, hasOverrides bool) []metav1.Condition { + overridenCondReason := condition.OverrideNotSpecifiedReason + if hasOverrides { + overridenCondReason = condition.OverriddenSucceededReason + } + return []metav1.Condition{ + { + Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: overridenCondReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Status: metav1.ConditionFalse, + Reason: condition.WorkNotSynchronizedYetReason, + ObservedGeneration: generation, + }, + } +} + +func crpOverrideFailedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementOverriddenConditionType), + Status: metav1.ConditionFalse, + Reason: condition.OverriddenFailedReason, + ObservedGeneration: generation, + }, + } +} + +func perClusterSyncPendingConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { Type: string(placementv1beta1.PerClusterScheduledConditionType), @@ -491,7 +668,7 @@ func resourcePlacementSyncPendingConditions(generation int64) []metav1.Condition } } -func resourcePlacementRolloutUnknownConditions(generation int64) []metav1.Condition { +func perClusterRolloutUnknownConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { Type: string(placementv1beta1.PerClusterScheduledConditionType), @@ -508,7 +685,7 @@ func resourcePlacementRolloutUnknownConditions(generation int64) []metav1.Condit } } -func resourcePlacementApplyFailedConditions(generation int64) []metav1.Condition { +func perClusterApplyFailedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { Type: string(placementv1beta1.PerClusterScheduledConditionType), @@ -543,7 +720,7 @@ func resourcePlacementApplyFailedConditions(generation int64) []metav1.Condition } } -func resourcePlacementDiffReportedConditions(generation int64) []metav1.Condition { +func perClusterDiffReportedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { Type: string(placementv1beta1.PerClusterScheduledConditionType), @@ -578,7 +755,7 @@ func resourcePlacementDiffReportedConditions(generation int64) []metav1.Conditio } } -func resourcePlacementDiffReportingFailedConditions(generation int64) []metav1.Condition { +func perClusterDiffReportingFailedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { Type: string(placementv1beta1.PerClusterScheduledConditionType), @@ -613,7 +790,7 @@ func resourcePlacementDiffReportingFailedConditions(generation int64) []metav1.C } } -func resourcePlacementRolloutCompletedConditions(generation int64, resourceIsTrackable bool, hasOverride bool) []metav1.Condition { +func perClusterRolloutCompletedConditions(generation int64, resourceIsTrackable bool, hasOverride bool) []metav1.Condition { availableConditionReason := condition.WorkNotAvailabilityTrackableReason if resourceIsTrackable { availableConditionReason = condition.AllWorkAvailableReason @@ -663,7 +840,7 @@ func resourcePlacementRolloutCompletedConditions(generation int64, resourceIsTra } } -func resourcePlacementScheduleFailedConditions(generation int64) []metav1.Condition { +func perClusterScheduleFailedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { Type: string(placementv1beta1.PerClusterScheduledConditionType), @@ -674,30 +851,7 @@ func resourcePlacementScheduleFailedConditions(generation int64) []metav1.Condit } } -func crpOverrideFailedConditions(generation int64) []metav1.Condition { - return []metav1.Condition{ - { - Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), - Status: metav1.ConditionTrue, - Reason: scheduler.FullyScheduledReason, - ObservedGeneration: generation, - }, - { - Type: string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType), - Status: metav1.ConditionTrue, - Reason: condition.RolloutStartedReason, - ObservedGeneration: generation, - }, - { - Type: string(placementv1beta1.ClusterResourcePlacementOverriddenConditionType), - Status: metav1.ConditionFalse, - Reason: condition.OverriddenFailedReason, - ObservedGeneration: generation, - }, - } -} - -func resourcePlacementOverrideFailedConditions(generation int64) []metav1.Condition { +func perClusterOverrideFailedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { Type: string(placementv1beta1.PerClusterScheduledConditionType), @@ -720,7 +874,7 @@ func resourcePlacementOverrideFailedConditions(generation int64) []metav1.Condit } } -func resourcePlacementWorkSynchronizedFailedConditions(generation int64, hasOverrides bool) []metav1.Condition { +func perClusterWorkSynchronizedFailedConditions(generation int64, hasOverrides bool) []metav1.Condition { overridenCondReason := condition.OverrideNotSpecifiedReason if hasOverrides { overridenCondReason = condition.OverriddenSucceededReason @@ -753,49 +907,40 @@ func resourcePlacementWorkSynchronizedFailedConditions(generation int64, hasOver } } -func crpWorkSynchronizedFailedConditions(generation int64, hasOverrides bool) []metav1.Condition { - overridenCondReason := condition.OverrideNotSpecifiedReason - if hasOverrides { - overridenCondReason = condition.OverriddenSucceededReason - } - return []metav1.Condition{ - { - Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), - Status: metav1.ConditionTrue, - Reason: scheduler.FullyScheduledReason, - ObservedGeneration: generation, - }, - { - Type: string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType), - Status: metav1.ConditionTrue, - Reason: condition.RolloutStartedReason, - ObservedGeneration: generation, - }, +func workResourceIdentifiers() []placementv1beta1.ResourceIdentifier { + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + + return []placementv1beta1.ResourceIdentifier{ { - Type: string(placementv1beta1.ClusterResourcePlacementOverriddenConditionType), - Status: metav1.ConditionTrue, - Reason: overridenCondReason, - ObservedGeneration: generation, + Kind: "Namespace", + Name: workNamespaceName, + Version: "v1", }, { - Type: string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), - Status: metav1.ConditionFalse, - Reason: condition.WorkNotSynchronizedYetReason, - ObservedGeneration: generation, + Kind: "ConfigMap", + Name: appConfigMapName, + Version: "v1", + Namespace: workNamespaceName, }, } } -func workResourceIdentifiers() []placementv1beta1.ResourceIdentifier { +func workNamespaceIdentifiers() []placementv1beta1.ResourceIdentifier { workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) - appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) - return []placementv1beta1.ResourceIdentifier{ { Kind: "Namespace", Name: workNamespaceName, Version: "v1", }, + } +} + +func appConfigMapIdentifiers() []placementv1beta1.ResourceIdentifier { + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + return []placementv1beta1.ResourceIdentifier{ { Kind: "ConfigMap", Name: appConfigMapName, @@ -822,7 +967,7 @@ func crpStatusWithOverrideUpdatedActual( for _, name := range wantSelectedClusters { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, hasOverride), + Conditions: perClusterRolloutCompletedConditions(crp.Generation, true, hasOverride), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, ObservedResourceIndex: wantObservedResourceIndex, @@ -834,7 +979,7 @@ func crpStatusWithOverrideUpdatedActual( SelectedResources: wantSelectedResourceIdentifiers, ObservedResourceIndex: wantObservedResourceIndex, } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -842,8 +987,13 @@ func crpStatusWithOverrideUpdatedActual( } func crpStatusUpdatedActual(wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, wantSelectedClusters, wantUnselectedClusters []string, wantObservedResourceIndex string) func() error { - crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) - return customizedCRPStatusUpdatedActual(crpName, wantSelectedResourceIdentifiers, wantSelectedClusters, wantUnselectedClusters, wantObservedResourceIndex, true) + crpKey := types.NamespacedName{Name: fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())} + return customizedPlacementStatusUpdatedActual(crpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, wantUnselectedClusters, wantObservedResourceIndex, true) +} + +func rpStatusUpdatedActual(wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, wantSelectedClusters, wantUnselectedClusters []string, wantObservedResourceIndex string) func() error { + rpKey := types.NamespacedName{Name: fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()), Namespace: appNamespace().Name} + return customizedPlacementStatusUpdatedActual(rpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, wantUnselectedClusters, wantObservedResourceIndex, true) } func crpStatusWithOverrideUpdatedFailedActual( @@ -864,7 +1014,7 @@ func crpStatusWithOverrideUpdatedFailedActual( for _, name := range wantSelectedClusters { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: resourcePlacementOverrideFailedConditions(crp.Generation), + Conditions: perClusterOverrideFailedConditions(crp.Generation), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, ObservedResourceIndex: wantObservedResourceIndex, @@ -877,7 +1027,7 @@ func crpStatusWithOverrideUpdatedFailedActual( SelectedResources: wantSelectedResourceIdentifiers, ObservedResourceIndex: wantObservedResourceIndex, } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -903,7 +1053,7 @@ func crpStatusWithWorkSynchronizedUpdatedFailedActual( for _, name := range wantSelectedClusters { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: resourcePlacementWorkSynchronizedFailedConditions(crp.Generation, hasOverrides), + Conditions: perClusterWorkSynchronizedFailedConditions(crp.Generation, hasOverrides), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, ObservedResourceIndex: wantObservedResourceIndex, @@ -916,7 +1066,7 @@ func crpStatusWithWorkSynchronizedUpdatedFailedActual( SelectedResources: wantSelectedResourceIdentifiers, ObservedResourceIndex: wantObservedResourceIndex, } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -952,7 +1102,7 @@ func crpStatusWithExternalStrategyActual( // No observed resource index for this cluster, assume rollout is still pending. wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: resourcePlacementRolloutUnknownConditions(crp.Generation), + Conditions: perClusterRolloutUnknownConditions(crp.Generation), ObservedResourceIndex: wantObservedResourceIndexPerCluster[i], }) } else { @@ -965,7 +1115,7 @@ func crpStatusWithExternalStrategyActual( if reportDiff { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: resourcePlacementDiffReportedConditions(crp.Generation), + Conditions: perClusterDiffReportedConditions(crp.Generation), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, ObservedResourceIndex: wantObservedResourceIndexPerCluster[i], @@ -1002,7 +1152,7 @@ func crpStatusWithExternalStrategyActual( } else { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, hasOverrides), + Conditions: perClusterRolloutCompletedConditions(crp.Generation, true, hasOverrides), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, ObservedResourceIndex: wantObservedResourceIndexPerCluster[i], @@ -1026,22 +1176,24 @@ func crpStatusWithExternalStrategyActual( wantStatus.Conditions = crpRolloutPendingDueToExternalStrategyConditions(crp.Generation) } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil } } -func customizedCRPStatusUpdatedActual(crpName string, +func customizedPlacementStatusUpdatedActual( + placementKey types.NamespacedName, wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, wantSelectedClusters, wantUnselectedClusters []string, wantObservedResourceIndex string, - resourceIsTrackable bool) func() error { + resourceIsTrackable bool, +) func() error { return func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { - return err + placement, err := retrievePlacement(placementKey) + if err != nil { + return fmt.Errorf("failed to get placement %s: %w", placementKey, err) } wantPlacementStatus := []placementv1beta1.PerClusterPlacementStatus{} @@ -1049,51 +1201,51 @@ func customizedCRPStatusUpdatedActual(crpName string, wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, ObservedResourceIndex: wantObservedResourceIndex, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, resourceIsTrackable, false), + Conditions: perClusterRolloutCompletedConditions(placement.GetGeneration(), resourceIsTrackable, false), }) } for i := 0; i < len(wantUnselectedClusters); i++ { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ - Conditions: resourcePlacementScheduleFailedConditions(crp.Generation), + Conditions: perClusterScheduleFailedConditions(placement.GetGeneration()), }) } - var wantCRPConditions []metav1.Condition + var wantPlacementConditions []metav1.Condition if len(wantSelectedClusters) > 0 { - wantCRPConditions = crpRolloutCompletedConditions(crp.Generation, false) + wantPlacementConditions = placementRolloutCompletedConditions(placementKey, placement.GetGeneration(), false) } else { - wantCRPConditions = []metav1.Condition{ + wantPlacementConditions = []metav1.Condition{ // we don't set the remaining resource conditions. { Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), Status: metav1.ConditionTrue, Reason: scheduler.FullyScheduledReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, } } if len(wantUnselectedClusters) > 0 { if len(wantSelectedClusters) > 0 { - wantCRPConditions = crpSchedulePartiallyFailedConditions(crp.Generation) + wantPlacementConditions = placementSchedulePartiallyFailedConditions(placementKey, placement.GetGeneration()) } else { // we don't set the remaining resource conditions if there is no clusters to select - wantCRPConditions = crpScheduleFailedConditions(crp.Generation) + wantPlacementConditions = placementScheduleFailedConditions(placementKey, placement.GetGeneration()) } } - // Note that the CRP controller will only keep decisions regarding unselected clusters for a CRP if: + // Note that the placement controller will only keep decisions regarding unselected clusters for a placement if: // - // * The CRP is of the PickN placement type and the required N count cannot be fulfilled; or - // * The CRP is of the PickFixed placement type and the list of target clusters specified cannot be fulfilled. - wantStatus := placementv1beta1.PlacementStatus{ - Conditions: wantCRPConditions, + // * The placement is of the PickN placement type and the required N count cannot be fulfilled; or + // * The placement is of the PickFixed placement type and the list of target clusters specified cannot be fulfilled. + wantStatus := &placementv1beta1.PlacementStatus{ + Conditions: wantPlacementConditions, PerClusterPlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, ObservedResourceIndex: wantObservedResourceIndex, } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { - return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + if diff := cmp.Diff(placement.GetPlacementStatus(), wantStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("Placement status diff (-got, +want): %s", diff) } return nil } @@ -1254,10 +1406,20 @@ func workNamespaceRemovedFromClusterActual(cluster *framework.Cluster) func() er } } -func allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName string) func() error { +func namespacedResourcesRemovedFromClusterActual(cluster *framework.Cluster) func() error { + cm := appConfigMap() return func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + if err := cluster.KubeClient.Get(ctx, types.NamespacedName{Name: cm.Name, Namespace: cm.Namespace}, &cm); !errors.IsNotFound(err) { + return fmt.Errorf("ConfigMap %s/%s still exists on cluster %s or get encountered an error: %w", cm.Namespace, cm.Name, cluster.ClusterName, err) + } + return nil + } +} + +func allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(placementKey types.NamespacedName) func() error { + return func() error { + placement, err := retrievePlacement(placementKey) + if err != nil { if errors.IsNotFound(err) { return nil } @@ -1265,19 +1427,19 @@ func allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName str } wantFinalizers := []string{customDeletionBlockerFinalizer} - finalizer := crp.Finalizers + finalizer := placement.GetFinalizers() if diff := cmp.Diff(finalizer, wantFinalizers); diff != "" { - return fmt.Errorf("CRP finalizers diff (-got, +want): %s", diff) + return fmt.Errorf("Placement finalizers diff (-got, +want): %s", diff) } return nil } } -func crpRemovedActual(crpName string) func() error { +func placementRemovedActual(placementKey types.NamespacedName) func() error { return func() error { - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, &placementv1beta1.ClusterResourcePlacement{}); !errors.IsNotFound(err) { - return fmt.Errorf("CRP still exists or an unexpected error occurred: %w", err) + if _, err := retrievePlacement(placementKey); !errors.IsNotFound(err) { + return fmt.Errorf("Placement %s still exists or an unexpected error occurred: %w", placementKey, err) } return nil diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index ff13cf054..dbc056753 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -131,7 +131,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { Namespace: workNamespaceName, }, } - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", true) Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -176,7 +176,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { }) It("should update CRP status as success again", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "2", true) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "2", true) Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -200,7 +200,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP") }) @@ -360,7 +360,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { return err } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -474,7 +474,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementWorkSynchronizedFailedConditions(crp.Generation, false), + Conditions: perClusterWorkSynchronizedFailedConditions(crp.Generation, false), }, }, SelectedResources: []placementv1beta1.ResourceIdentifier{ @@ -493,7 +493,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { }, ObservedResourceIndex: "0", } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -614,7 +614,7 @@ var _ = Describe("Process objects with generate name", Ordered, func() { }, }, }, - Conditions: resourcePlacementApplyFailedConditions(crp.Generation), + Conditions: perClusterApplyFailedConditions(crp.Generation), }, }, SelectedResources: []placementv1beta1.ResourceIdentifier{ @@ -633,7 +633,7 @@ var _ = Describe("Process objects with generate name", Ordered, func() { }, ObservedResourceIndex: "0", } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -700,11 +700,11 @@ func checkForRolloutStuckOnOneFailedClusterStatus(wantSelectedResources []placem return err } wantCRPConditions := crpRolloutStuckConditions(crp.Generation) - if diff := cmp.Diff(crp.Status.Conditions, wantCRPConditions, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status.Conditions, wantCRPConditions, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } // check the selected resources is still right - if diff := cmp.Diff(crp.Status.SelectedResources, wantSelectedResources, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status.SelectedResources, wantSelectedResources, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } // check the placement status has a failed placement @@ -720,19 +720,19 @@ func checkForRolloutStuckOnOneFailedClusterStatus(wantSelectedResources []placem for _, placementStatus := range crp.Status.PerClusterPlacementStatuses { // this is the cluster that got the new enveloped resource that was malformed if len(placementStatus.FailedPlacements) != 0 { - if diff := cmp.Diff(placementStatus.FailedPlacements, wantFailedResourcePlacement, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(placementStatus.FailedPlacements, wantFailedResourcePlacement, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } // check that the applied error message is correct if !strings.Contains(placementStatus.FailedPlacements[0].Condition.Message, "field is immutable") { return fmt.Errorf("CRP failed resource placement does not have unsupported scope message") } - if diff := cmp.Diff(placementStatus.Conditions, resourcePlacementApplyFailedConditions(crp.Generation), crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(placementStatus.Conditions, perClusterApplyFailedConditions(crp.Generation), placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } } else { // the cluster is stuck behind a rollout schedule since we now have 1 cluster that is not in applied ready status - if diff := cmp.Diff(placementStatus.Conditions, resourcePlacementSyncPendingConditions(crp.Generation), crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(placementStatus.Conditions, perClusterSyncPendingConditions(crp.Generation), placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } } diff --git a/test/e2e/join_and_leave_test.go b/test/e2e/join_and_leave_test.go index e31fe1cbd..b1922676c 100644 --- a/test/e2e/join_and_leave_test.go +++ b/test/e2e/join_and_leave_test.go @@ -117,7 +117,7 @@ var _ = Describe("Test member cluster join and leave flow", Label("joinleave"), }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", true) Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -214,7 +214,7 @@ var _ = Describe("Test member cluster join and leave flow", Label("joinleave"), It("Should update CRP status to not placing any resources since all clusters are left", func() { // resourceQuota is enveloped so it's not trackable yet - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, nil, nil, "0", false) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, nil, nil, "0", false) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -234,7 +234,7 @@ var _ = Describe("Test member cluster join and leave flow", Label("joinleave"), }) It("should update CRP status to applied to all clusters again automatically after rejoining", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", true) Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) }) diff --git a/test/e2e/placement_apply_strategy_test.go b/test/e2e/placement_apply_strategy_test.go index c87a263e3..8fa41b0d4 100644 --- a/test/e2e/placement_apply_strategy_test.go +++ b/test/e2e/placement_apply_strategy_test.go @@ -82,7 +82,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { AfterAll(func() { By(fmt.Sprintf("deleting placement %s", crpName)) - cleanupCRP(crpName) + cleanupPlacement(types.NamespacedName{Name: crpName}) By("deleting created work resources on member cluster") cleanWorkResourcesOnCluster(allMemberClusters[0]) @@ -116,7 +116,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) @@ -141,7 +141,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { AfterAll(func() { By(fmt.Sprintf("deleting placement %s", crpName)) - cleanupCRP(crpName) + cleanupPlacement(types.NamespacedName{Name: crpName}) }) It("should update CRP status as expected", func() { @@ -170,7 +170,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { It("should remove the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -194,7 +194,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { AfterAll(func() { By(fmt.Sprintf("deleting placement %s", crpName)) - cleanupCRP(crpName) + cleanupPlacement(types.NamespacedName{Name: crpName}) }) It("should update CRP status as expected", func() { @@ -223,7 +223,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { It("should remove the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -247,7 +247,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { AfterAll(func() { By(fmt.Sprintf("deleting placement %s", crpName)) - cleanupCRP(crpName) + cleanupPlacement(types.NamespacedName{Name: crpName}) By("deleting created work resources on member cluster") cleanWorkResourcesOnCluster(allMemberClusters[0]) @@ -283,17 +283,17 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { }, }, }, - Conditions: resourcePlacementApplyFailedConditions(crp.Generation), + Conditions: perClusterApplyFailedConditions(crp.Generation), }, { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), + Conditions: perClusterRolloutCompletedConditions(crp.Generation, true, false), }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), + Conditions: perClusterRolloutCompletedConditions(crp.Generation, true, false), }, }, SelectedResources: []placementv1beta1.ResourceIdentifier{ @@ -311,7 +311,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { }, ObservedResourceIndex: "0", } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -336,7 +336,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) @@ -376,7 +376,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { AfterAll(func() { By(fmt.Sprintf("deleting placement %s", crpName)) - cleanupCRP(crpName) + cleanupPlacement(types.NamespacedName{Name: crpName}) By("deleting created work resources on member cluster") cleanWorkResourcesOnCluster(allMemberClusters[0]) @@ -412,7 +412,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) @@ -505,7 +505,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -546,7 +546,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { } wantCRPStatus := buildWantCRPStatus(conflictedCRP.Generation) - if diff := cmp.Diff(conflictedCRP.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(conflictedCRP.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -972,7 +972,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -991,7 +991,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -1005,7 +1005,7 @@ var _ = Describe("switching apply strategies", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1039,7 +1039,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "1", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1058,7 +1058,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "1", - Conditions: resourcePlacementSyncPendingConditions(crpGeneration), + Conditions: perClusterSyncPendingConditions(crpGeneration), }, }, ObservedResourceIndex: "1", @@ -1072,7 +1072,7 @@ var _ = Describe("switching apply strategies", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1110,12 +1110,12 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "1", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), }, { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "1", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1147,7 +1147,7 @@ var _ = Describe("switching apply strategies", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1221,7 +1221,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1256,7 +1256,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1299,7 +1299,7 @@ var _ = Describe("switching apply strategies", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1333,7 +1333,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "1", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1354,7 +1354,7 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "1", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1397,7 +1397,7 @@ var _ = Describe("switching apply strategies", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1435,12 +1435,12 @@ var _ = Describe("switching apply strategies", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "1", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "1", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "1", @@ -1454,7 +1454,7 @@ var _ = Describe("switching apply strategies", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil diff --git a/test/e2e/placement_drift_diff_test.go b/test/e2e/placement_drift_diff_test.go index 002e34381..1c0b56448 100644 --- a/test/e2e/placement_drift_diff_test.go +++ b/test/e2e/placement_drift_diff_test.go @@ -186,7 +186,7 @@ var _ = Describe("take over existing resources", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -225,12 +225,12 @@ var _ = Describe("take over existing resources", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -244,7 +244,7 @@ var _ = Describe("take over existing resources", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -358,7 +358,7 @@ var _ = Describe("take over existing resources", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -424,12 +424,12 @@ var _ = Describe("take over existing resources", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -443,7 +443,7 @@ var _ = Describe("take over existing resources", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -563,7 +563,7 @@ var _ = Describe("detect drifts on placed resources", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{ { @@ -585,12 +585,12 @@ var _ = Describe("detect drifts on placed resources", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -604,7 +604,7 @@ var _ = Describe("detect drifts on placed resources", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -702,7 +702,7 @@ var _ = Describe("detect drifts on placed resources", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -741,12 +741,12 @@ var _ = Describe("detect drifts on placed resources", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -760,7 +760,7 @@ var _ = Describe("detect drifts on placed resources", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -886,7 +886,7 @@ var _ = Describe("detect drifts on placed resources", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -952,12 +952,12 @@ var _ = Describe("detect drifts on placed resources", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -971,7 +971,7 @@ var _ = Describe("detect drifts on placed resources", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1091,7 +1091,7 @@ var _ = Describe("report diff mode", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { @@ -1129,7 +1129,7 @@ var _ = Describe("report diff mode", func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { @@ -1164,7 +1164,7 @@ var _ = Describe("report diff mode", func() { { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { @@ -1208,7 +1208,7 @@ var _ = Describe("report diff mode", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1300,14 +1300,14 @@ var _ = Describe("report diff mode", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, }, { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { @@ -1342,7 +1342,7 @@ var _ = Describe("report diff mode", func() { { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + Conditions: perClusterDiffReportedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { @@ -1386,7 +1386,7 @@ var _ = Describe("report diff mode", func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -1517,7 +1517,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1558,7 +1558,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1599,7 +1599,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1679,7 +1679,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } @@ -1764,7 +1764,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1805,7 +1805,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1846,7 +1846,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1926,7 +1926,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } @@ -2001,21 +2001,21 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, }, { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{}, }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + Conditions: perClusterApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -2095,7 +2095,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { } wantCRPStatus := buildWantCRPStatus(crp.Generation) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -2155,21 +2155,21 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: observedResourceIndex, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, }, { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: observedResourceIndex, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{}, }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: observedResourceIndex, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + Conditions: perClusterRolloutCompletedConditions(crpGeneration, true, false), FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{}, @@ -2189,7 +2189,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { // for comparison. wantCRPStatus := buildWantCRPStatus(crp.Generation, crp.Status.ObservedResourceIndex) - if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, *wantCRPStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil diff --git a/test/e2e/placement_negative_cases_test.go b/test/e2e/placement_negative_cases_test.go index b0c754441..389758bef 100644 --- a/test/e2e/placement_negative_cases_test.go +++ b/test/e2e/placement_negative_cases_test.go @@ -151,7 +151,7 @@ var _ = Describe("handling errors and failures gracefully", func() { }, }, }, - Conditions: resourcePlacementApplyFailedConditions(crp.Generation), + Conditions: perClusterApplyFailedConditions(crp.Generation), }, }, SelectedResources: []placementv1beta1.ResourceIdentifier{ @@ -170,7 +170,7 @@ var _ = Describe("handling errors and failures gracefully", func() { }, ObservedResourceIndex: "0", } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -299,7 +299,7 @@ var _ = Describe("handling errors and failures gracefully", func() { { ClusterName: memberCluster1EastProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementDiffReportingFailedConditions(crp.Generation), + Conditions: perClusterDiffReportingFailedConditions(crp.Generation), }, }, SelectedResources: []placementv1beta1.ResourceIdentifier{ @@ -318,7 +318,7 @@ var _ = Describe("handling errors and failures gracefully", func() { }, ObservedResourceIndex: "0", } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil diff --git a/test/e2e/placement_ro_test.go b/test/e2e/placement_ro_test.go index e2b614092..f6d2c5ab4 100644 --- a/test/e2e/placement_ro_test.go +++ b/test/e2e/placement_ro_test.go @@ -53,7 +53,7 @@ var _ = Context("creating resourceOverride (selecting all clusters) to override Placement: &placementv1beta1.PlacementRef{ Name: crpName, // assigned CRP name }, - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -119,7 +119,7 @@ var _ = Context("creating resourceOverride (selecting all clusters) to override Placement: &placementv1beta1.PlacementRef{ Name: crpName, }, - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -214,7 +214,7 @@ var _ = Context("creating resourceOverride with multiple jsonPatchOverrides to o Namespace: roNamespace, }, Spec: placementv1beta1.ResourceOverrideSpec{ - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -317,7 +317,7 @@ var _ = Context("creating resourceOverride with different rules for each cluster Placement: &placementv1beta1.PlacementRef{ Name: crpName, // assigned CRP name }, - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -452,7 +452,7 @@ var _ = Context("creating resourceOverride and clusterResourceOverride, resource Namespace: roNamespace, }, Spec: placementv1beta1.ResourceOverrideSpec{ - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -543,7 +543,7 @@ var _ = Context("creating resourceOverride with incorrect path", Ordered, func() Placement: &placementv1beta1.PlacementRef{ Name: crpName, // assigned CRP name }, - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -614,7 +614,7 @@ var _ = Context("creating resourceOverride and resource becomes invalid after ov Placement: &placementv1beta1.PlacementRef{ Name: crpName, // assigned CRP name }, - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -674,7 +674,7 @@ var _ = Context("creating resourceOverride with a templated rules with cluster n Namespace: roNamespace, }, Spec: placementv1beta1.ResourceOverrideSpec{ - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -773,7 +773,7 @@ var _ = Context("creating resourceOverride with delete configMap", Ordered, func Namespace: roNamespace, }, Spec: placementv1beta1.ResourceOverrideSpec{ - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -893,7 +893,7 @@ var _ = Context("creating resourceOverride with a templated rules with cluster l Placement: &placementv1beta1.PlacementRef{ Name: crpName, // assigned CRP name }, - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { @@ -1005,7 +1005,7 @@ var _ = Context("creating resourceOverride with a templated rules with cluster l ObservedGeneration: crp.Generation, }, } - if diff := cmp.Diff(crp.Status.Conditions, wantCondition, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status.Conditions, wantCondition, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP condition diff (-got, +want): %s", diff) } return nil @@ -1050,7 +1050,7 @@ var _ = Context("creating resourceOverride with non-exist label", Ordered, func( Placement: &placementv1beta1.PlacementRef{ Name: crpName, // assigned CRP name }, - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { diff --git a/test/e2e/placement_selecting_resources_test.go b/test/e2e/placement_selecting_resources_test.go index c0f1e716a..35ff3cee3 100644 --- a/test/e2e/placement_selecting_resources_test.go +++ b/test/e2e/placement_selecting_resources_test.go @@ -93,7 +93,7 @@ var _ = Describe("creating CRP and selecting resources by name", Ordered, func() It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -157,7 +157,7 @@ var _ = Describe("creating CRP and selecting resources by label", Ordered, func( It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -243,7 +243,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become selected a It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -328,7 +328,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become unselected }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -420,7 +420,7 @@ var _ = Describe("validating CRP when cluster-scoped and namespace-scoped resour It("should remove the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -508,7 +508,7 @@ var _ = Describe("validating CRP when adding resources in a matching namespace", It("should remove the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -601,7 +601,7 @@ var _ = Describe("validating CRP when deleting resources in a matching namespace It("should remove the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -641,7 +641,7 @@ var _ = Describe("validating CRP when selecting a reserved resource", Ordered, f AfterAll(func() { By(fmt.Sprintf("deleting placement %s", crpName)) - cleanupCRP(crpName) + cleanupPlacement(types.NamespacedName{Name: crpName}) }) It("should update CRP status as expected", func() { @@ -661,7 +661,7 @@ var _ = Describe("validating CRP when selecting a reserved resource", Ordered, f }, }, } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -680,7 +680,7 @@ var _ = Describe("validating CRP when selecting a reserved resource", Ordered, f }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -743,7 +743,7 @@ var _ = Describe("When creating a pickN ClusterResourcePlacement with duplicated }, }, } - if diff := cmp.Diff(gotCRP.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(gotCRP.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -846,17 +846,17 @@ var _ = Describe("validating CRP when failed to apply resources", Ordered, func( }, }, }, - Conditions: resourcePlacementApplyFailedConditions(crp.Generation), + Conditions: perClusterApplyFailedConditions(crp.Generation), }, { ClusterName: memberCluster2EastCanaryName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), + Conditions: perClusterRolloutCompletedConditions(crp.Generation, true, false), }, { ClusterName: memberCluster3WestProdName, ObservedResourceIndex: "0", - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), + Conditions: perClusterRolloutCompletedConditions(crp.Generation, true, false), }, }, SelectedResources: []placementv1beta1.ResourceIdentifier{ @@ -874,7 +874,7 @@ var _ = Describe("validating CRP when failed to apply resources", Ordered, func( }, ObservedResourceIndex: "0", } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) } return nil @@ -899,7 +899,7 @@ var _ = Describe("validating CRP when failed to apply resources", Ordered, func( }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) @@ -1020,7 +1020,7 @@ var _ = Describe("validating CRP when placing cluster scope resource (other than }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -1114,7 +1114,7 @@ var _ = Describe("validating CRP revision history allowing single revision when It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -1207,7 +1207,7 @@ var _ = Describe("validating CRP revision history allowing multiple revisions wh It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -1282,7 +1282,7 @@ var _ = Describe("validating CRP when selected resources cross the 1MB limit", O }) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, largeEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) diff --git a/test/e2e/placement_with_custom_config_test.go b/test/e2e/placement_with_custom_config_test.go index 22b15bb2a..b8f9aa2db 100644 --- a/test/e2e/placement_with_custom_config_test.go +++ b/test/e2e/placement_with_custom_config_test.go @@ -137,7 +137,7 @@ var _ = Describe("validating CRP when using customized resourceSnapshotCreationM It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) @@ -257,7 +257,7 @@ var _ = Describe("validating that CRP status can be updated after updating the r It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) }) }) diff --git a/test/e2e/resource_placement_pickall_test.go b/test/e2e/resource_placement_pickall_test.go new file mode 100644 index 000000000..34cc4d308 --- /dev/null +++ b/test/e2e/resource_placement_pickall_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" +) + +var _ = Describe("placing namespaced scoped resources using a RP with PickAll policy", func() { + Context("with no affinities specified", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the resources. + createWorkResources() + + // Create the CRP with Namespace-only selector. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + AfterAll(func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, allMemberClusters) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + }) +}) diff --git a/test/e2e/resources_test.go b/test/e2e/resources_test.go index 7b61238db..8077903cf 100644 --- a/test/e2e/resources_test.go +++ b/test/e2e/resources_test.go @@ -39,6 +39,7 @@ const ( appDeploymentNameTemplate = "app-deploy-%d" appSecretNameTemplate = "app-secret-%d" // #nosec G101 crpNameTemplate = "crp-%d" + rpNameTemplate = "rp-%d" crpNameWithSubIndexTemplate = "crp-%d-%d" croNameTemplate = "cro-%d" roNameTemplate = "ro-%d" @@ -54,6 +55,18 @@ const ( workNamespaceLabelName = "process" ) +func namespaceOnlySelector() []placementv1beta1.ResourceSelectorTerm { + return []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "Namespace", + Version: "v1", + Name: fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()), + SelectionScope: placementv1beta1.NamespaceOnly, + }, + } +} + func workResourceSelector() []placementv1beta1.ResourceSelectorTerm { return []placementv1beta1.ResourceSelectorTerm{ { @@ -65,7 +78,18 @@ func workResourceSelector() []placementv1beta1.ResourceSelectorTerm { } } -func configMapSelector() []placementv1beta1.ResourceSelector { +func configMapSelector() []placementv1beta1.ResourceSelectorTerm { + return []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "ConfigMap", + Version: "v1", + Name: fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()), + }, + } +} + +func configMapOverrideSelector() []placementv1beta1.ResourceSelector { return []placementv1beta1.ResourceSelector{ { Group: "", diff --git a/test/e2e/rollout_test.go b/test/e2e/rollout_test.go index 82b9a19fc..31714bc4b 100644 --- a/test/e2e/rollout_test.go +++ b/test/e2e/rollout_test.go @@ -286,7 +286,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", true) Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -370,7 +370,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", true) Eventually(crpStatusUpdatedActual, 2*workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -693,7 +693,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", false) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", false) Eventually(crpStatusUpdatedActual, 2*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -718,7 +718,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) // job is not trackable, so we need to wait for a bit longer for each roll out It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "1", false) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "1", false) Eventually(crpStatusUpdatedActual, 5*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -807,7 +807,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { return nil }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to select all the expected resources") - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, []string{memberCluster1EastProdName}, nil, observedResourceIdx, false) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, []string{memberCluster1EastProdName}, nil, observedResourceIdx, false) Eventually(crpStatusUpdatedActual, 2*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -863,7 +863,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { return nil }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to select all the expected resources") - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, []string{memberCluster1EastProdName}, nil, observedResourceIdx, false) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, []string{memberCluster1EastProdName}, nil, observedResourceIdx, false) Eventually(crpStatusUpdatedActual, 4*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -929,7 +929,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", false) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", false) Eventually(crpStatusUpdatedActual, 2*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) @@ -992,7 +992,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "1", false) + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "1", false) Eventually(crpStatusUpdatedActual, 4*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) diff --git a/test/e2e/setup_test.go b/test/e2e/setup_test.go index e8c7455ac..8eda2dad9 100644 --- a/test/e2e/setup_test.go +++ b/test/e2e/setup_test.go @@ -219,11 +219,11 @@ var ( // disappear from the status of the MemberCluster object. c.Type == string(clusterv1beta1.ConditionTypeClusterPropertyProviderStarted) }) - ignoreTimeTypeFields = cmpopts.IgnoreTypes(time.Time{}, metav1.Time{}) - ignoreCRPStatusDriftedPlacementsTimestampFields = cmpopts.IgnoreFields(placementv1beta1.DriftedResourcePlacement{}, "ObservationTime", "FirstDriftedObservedTime") - ignoreCRPStatusDiffedPlacementsTimestampFields = cmpopts.IgnoreFields(placementv1beta1.DiffedResourcePlacement{}, "ObservationTime", "FirstDiffedObservedTime") + ignoreTimeTypeFields = cmpopts.IgnoreTypes(time.Time{}, metav1.Time{}) + ignorePlacementStatusDriftedPlacementsTimestampFields = cmpopts.IgnoreFields(placementv1beta1.DriftedResourcePlacement{}, "ObservationTime", "FirstDriftedObservedTime") + ignorePlacementStatusDiffedPlacementsTimestampFields = cmpopts.IgnoreFields(placementv1beta1.DiffedResourcePlacement{}, "ObservationTime", "FirstDiffedObservedTime") - crpStatusCmpOptions = cmp.Options{ + placementStatusCmpOptions = cmp.Options{ cmpopts.SortSlices(lessFuncCondition), cmpopts.SortSlices(lessFuncPlacementStatus), cmpopts.SortSlices(utils.LessFuncResourceIdentifier), @@ -231,8 +231,8 @@ var ( cmpopts.SortSlices(utils.LessFuncDiffedResourcePlacements), cmpopts.SortSlices(utils.LessFuncDriftedResourcePlacements), utils.IgnoreConditionLTTAndMessageFields, - ignoreCRPStatusDriftedPlacementsTimestampFields, - ignoreCRPStatusDiffedPlacementsTimestampFields, + ignorePlacementStatusDriftedPlacementsTimestampFields, + ignorePlacementStatusDiffedPlacementsTimestampFields, cmpopts.EquateEmpty(), } diff --git a/test/e2e/updaterun_test.go b/test/e2e/updaterun_test.go index c36280006..a98aadf22 100644 --- a/test/e2e/updaterun_test.go +++ b/test/e2e/updaterun_test.go @@ -698,7 +698,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { Namespace: roNamespace, }, Spec: placementv1beta1.ResourceOverrideSpec{ - ResourceSelectors: configMapSelector(), + ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ { diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index 1728a07d5..3633e11b1 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -849,12 +849,11 @@ func checkNamespaceExistsWithOwnerRefOnMemberCluster(nsName, crpName string) { }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Namespace which is not owned by the CRP should not be deleted") } -// cleanupCRP deletes the CRP and waits until the resources are not found. -func cleanupCRP(name string) { +// cleanupPlacement deletes the placement and waits until the resources are not found. +func cleanupPlacement(placementKey types.NamespacedName) { // TODO(Arvindthiru): There is a conflict which requires the Eventually block, not sure of series of operations that leads to it yet. Eventually(func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - err := hubClient.Get(ctx, types.NamespacedName{Name: name}, crp) + placement, err := retrievePlacement(placementKey) if k8serrors.IsNotFound(err) { return nil } @@ -862,19 +861,19 @@ func cleanupCRP(name string) { return err } - // Delete the CRP (again, if applicable). + // Delete the placement (again, if applicable). // This helps the After All node to run successfully even if the steps above fail early. - if err = hubClient.Delete(ctx, crp); err != nil { + if err = hubClient.Delete(ctx, placement); err != nil { return err } - crp.Finalizers = []string{} - return hubClient.Update(ctx, crp) - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to delete CRP %s", name) + placement.SetFinalizers([]string{}) + return hubClient.Update(ctx, placement) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to delete placement %s", placementKey) - // Wait until the CRP is removed. - removedActual := crpRemovedActual(name) - Eventually(removedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove CRP %s", name) + // Wait until the placement is removed. + removedActual := placementRemovedActual(placementKey) + Eventually(removedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove placement %s", placementKey) // Check if work is deleted. Needed to ensure that the Work resource is cleaned up before the next CRP is created. // This is because the Work resource is created with a finalizer that blocks deletion until the all applied work @@ -882,14 +881,18 @@ func cleanupCRP(name string) { // and flakiness in subsequent tests. By("Check if work is deleted") var workNS string + workName := fmt.Sprintf("%s-work", placementKey.Name) + if placementKey.Namespace != "" { + workName = fmt.Sprintf("%s.%s", placementKey.Namespace, workName) + } work := &placementv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-work", name), + Name: workName, }, } Eventually(func() bool { for i := range allMemberClusters { - workNS = fmt.Sprintf("fleet-member-%s", allMemberClusterNames[i]) + workNS := fmt.Sprintf("fleet-member-%s", allMemberClusterNames[i]) if err := hubClient.Get(ctx, types.NamespacedName{Name: work.Name, Namespace: workNS}, work); err != nil && k8serrors.IsNotFound(err) { // Work resource is not found, which is expected. continue @@ -1024,11 +1027,11 @@ func ensureCRPAndRelatedResourcesDeleted(crpName string, memberClusters []*frame } // Verify that related finalizers have been removed from the CRP. - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: crpName}) Eventually(finalizerRemovedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP") // Remove the custom deletion blocker finalizer from the CRP. - cleanupCRP(crpName) + cleanupPlacement(types.NamespacedName{Name: crpName}) // Delete the created resources. cleanupWorkResources() @@ -1427,3 +1430,42 @@ func ensureUpdateRunStrategyDeletion(strategyName string) { removedActual := updateRunStrategyRemovedActual(strategyName) Eventually(removedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "ClusterStagedUpdateStrategy still exists") } + +func ensureRPAndRelatedResourcesDeleted(rpKey types.NamespacedName, memberClusters []*framework.Cluster) { + // Delete the ResourcePlacement. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpKey.Name, + Namespace: rpKey.Namespace, + }, + } + Expect(hubClient.Delete(ctx, rp)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{}), "Failed to delete ResourcePlacement") + + // Verify that all resources placed have been removed from specified member clusters. + for idx := range memberClusters { + memberCluster := memberClusters[idx] + + workResourcesRemovedActual := namespacedResourcesRemovedFromClusterActual(memberCluster) + Eventually(workResourcesRemovedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove work resources from member cluster %s", memberCluster.ClusterName) + } + + // Verify that related finalizers have been removed from the ResourcePlacement. + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(rpKey) + Eventually(finalizerRemovedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from ResourcePlacement") + + // Remove the custom deletion blocker finalizer from the ResourcePlacement. + cleanupPlacement(rpKey) +} + +func retrievePlacement(placementKey types.NamespacedName) (placementv1beta1.PlacementObj, error) { + var placement placementv1beta1.PlacementObj + if placementKey.Namespace == "" { + placement = &placementv1beta1.ClusterResourcePlacement{} + } else { + placement = &placementv1beta1.ResourcePlacement{} + } + if err := hubClient.Get(ctx, placementKey, placement); err != nil { + return nil, err + } + return placement, nil +} From 6b62318cd4d9d205ac4778337d1296edb4d45b11 Mon Sep 17 00:00:00 2001 From: Zhiying Lin <54013513+zhiying-lin@users.noreply.github.com> Date: Tue, 19 Aug 2025 18:20:48 +0800 Subject: [PATCH 19/38] fix: add cel for override placement (#190) --------- Signed-off-by: Zhiying Lin --- apis/cluster/v1beta1/zz_generated.deepcopy.go | 2 +- apis/placement/v1/override_types.go | 23 + .../v1alpha1/zz_generated.deepcopy.go | 2 +- apis/placement/v1beta1/override_types.go | 2 + .../v1beta1/zz_generated.deepcopy.go | 2 +- apis/v1alpha1/zz_generated.deepcopy.go | 2 +- ...tes-fleet.io_clusterresourceoverrides.yaml | 23 +- ...t.io_clusterresourceoverridesnapshots.yaml | 23 +- ...kubernetes-fleet.io_resourceoverrides.yaml | 20 +- ...es-fleet.io_resourceoverridesnapshots.yaml | 20 +- .../rollout/controller_integration_test.go | 12 +- .../api_validation_integration_test.go | 528 +++++++++--------- test/apis/v1alpha1/zz_generated.deepcopy.go | 2 +- test/e2e/placement_cro_test.go | 26 +- test/e2e/placement_ro_test.go | 26 +- 15 files changed, 395 insertions(+), 318 deletions(-) diff --git a/apis/cluster/v1beta1/zz_generated.deepcopy.go b/apis/cluster/v1beta1/zz_generated.deepcopy.go index 17e71a1a2..7bb7f501c 100644 --- a/apis/cluster/v1beta1/zz_generated.deepcopy.go +++ b/apis/cluster/v1beta1/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/apis/placement/v1/override_types.go b/apis/placement/v1/override_types.go index e96c3ddad..cf3054f91 100644 --- a/apis/placement/v1/override_types.go +++ b/apis/placement/v1/override_types.go @@ -42,11 +42,13 @@ type ClusterResourceOverride struct { // The ClusterResourceOverride create or update will fail when the resource has been selected by the existing ClusterResourceOverride. // If the resource is selected by both ClusterResourceOverride and ResourceOverride, ResourceOverride will win when resolving // conflicts. +// +kubebuilder:validation:XValidation:rule="(has(oldSelf.placement) && has(self.placement) && oldSelf.placement == self.placement) || (!has(oldSelf.placement) && !has(self.placement))",message="The placement field is immutable" type ClusterResourceOverrideSpec struct { // Placement defines whether the override is applied to a specific placement or not. // If set, the override will trigger the placement rollout immediately when the rollout strategy type is RollingUpdate. // Otherwise, it will be applied to the next rollout. // The recommended way is to set the placement so that the override can be rolled out immediately. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="The placement field is immutable" // +optional Placement *PlacementRef `json:"placement,omitempty"` @@ -66,12 +68,32 @@ type ClusterResourceOverrideSpec struct { Policy *OverridePolicy `json:"policy"` } +// ResourceScope defines the scope of placement reference. +type ResourceScope string + +const ( + // ClusterScoped indicates placement is cluster-scoped. + ClusterScoped ResourceScope = "Cluster" + + // NamespaceScoped indicates placement is namespace-scoped. + NamespaceScoped ResourceScope = "Namespaced" +) + // PlacementRef is the reference to a placement. // For now, we only support ClusterResourcePlacement. type PlacementRef struct { // Name is the reference to the name of placement. // +required + Name string `json:"name"` + // Scope defines the scope of the placement. + // A clusterResourceOverride can only reference a clusterResourcePlacement (cluster-scoped), + // and a resourceOverride can reference either a clusterResourcePlacement or resourcePlacement (namespaced). + // The referenced resourcePlacement must be in the same namespace as the resourceOverride. + // +kubebuilder:validation:Enum=Cluster;Namespaced + // +kubebuilder:default=Cluster + // +optional + Scope ResourceScope `json:"scope,omitempty"` } // OverridePolicy defines how to override the selected resources on the target clusters. @@ -144,6 +166,7 @@ type ResourceOverride struct { // The ResourceOverride create or update will fail when the resource has been selected by the existing ResourceOverride. // If the resource is selected by both ClusterResourceOverride and ResourceOverride, ResourceOverride will win when resolving // conflicts. +// +kubebuilder:validation:XValidation:rule="(has(oldSelf.placement) && has(self.placement) && oldSelf.placement == self.placement) || (!has(oldSelf.placement) && !has(self.placement))",message="The placement field is immutable" type ResourceOverrideSpec struct { // Placement defines whether the override is applied to a specific placement or not. // If set, the override will trigger the placement rollout immediately when the rollout strategy type is RollingUpdate. diff --git a/apis/placement/v1alpha1/zz_generated.deepcopy.go b/apis/placement/v1alpha1/zz_generated.deepcopy.go index df9f5e6d7..6d1656d18 100644 --- a/apis/placement/v1alpha1/zz_generated.deepcopy.go +++ b/apis/placement/v1alpha1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1alpha1 import ( "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/apis/placement/v1beta1/override_types.go b/apis/placement/v1beta1/override_types.go index 193ca790d..9c5c79495 100644 --- a/apis/placement/v1beta1/override_types.go +++ b/apis/placement/v1beta1/override_types.go @@ -44,6 +44,7 @@ type ClusterResourceOverride struct { // The ClusterResourceOverride create or update will fail when the resource has been selected by the existing ClusterResourceOverride. // If the resource is selected by both ClusterResourceOverride and ResourceOverride, ResourceOverride will win when resolving // conflicts. +// +kubebuilder:validation:XValidation:rule="(has(oldSelf.placement) && has(self.placement) && oldSelf.placement == self.placement) || (!has(oldSelf.placement) && !has(self.placement))",message="The placement field is immutable" type ClusterResourceOverrideSpec struct { // Placement defines whether the override is applied to a specific placement or not. // If set, the override will trigger the placement rollout immediately when the rollout strategy type is RollingUpdate. @@ -167,6 +168,7 @@ type ResourceOverride struct { // The ResourceOverride create or update will fail when the resource has been selected by the existing ResourceOverride. // If the resource is selected by both ClusterResourceOverride and ResourceOverride, ResourceOverride will win when resolving // conflicts. +// +kubebuilder:validation:XValidation:rule="(has(oldSelf.placement) && has(self.placement) && oldSelf.placement == self.placement) || (!has(oldSelf.placement) && !has(self.placement))",message="The placement field is immutable" type ResourceOverrideSpec struct { // Placement defines whether the override is applied to a specific placement or not. // If set, the override will trigger the placement rollout immediately when the rollout strategy type is RollingUpdate. diff --git a/apis/placement/v1beta1/zz_generated.deepcopy.go b/apis/placement/v1beta1/zz_generated.deepcopy.go index 45163b157..acd0306ac 100644 --- a/apis/placement/v1beta1/zz_generated.deepcopy.go +++ b/apis/placement/v1beta1/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index 85550ca19..27a862c43 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml index cefcd7c12..5c92a300d 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml @@ -137,11 +137,24 @@ spec: The recommended way is to set the placement so that the override can be rolled out immediately. properties: name: - description: Name is the reference to the name of placement. + type: string + scope: + default: Cluster + description: |- + Scope defines the scope of the placement. + A clusterResourceOverride can only reference a clusterResourcePlacement (cluster-scoped), + and a resourceOverride can reference either a clusterResourcePlacement or resourcePlacement (namespaced). + The referenced resourcePlacement must be in the same namespace as the resourceOverride. + enum: + - Cluster + - Namespaced type: string required: - name type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: self == oldSelf policy: description: Policy defines how to override the selected resources on the target clusters. @@ -363,6 +376,10 @@ spec: - clusterResourceSelectors - policy type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - spec type: object @@ -1105,6 +1122,10 @@ spec: - clusterResourceSelectors - policy type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - spec type: object diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml index eb8e96b83..d8289eec7 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml @@ -151,11 +151,24 @@ spec: The recommended way is to set the placement so that the override can be rolled out immediately. properties: name: - description: Name is the reference to the name of placement. + type: string + scope: + default: Cluster + description: |- + Scope defines the scope of the placement. + A clusterResourceOverride can only reference a clusterResourcePlacement (cluster-scoped), + and a resourceOverride can reference either a clusterResourcePlacement or resourcePlacement (namespaced). + The referenced resourcePlacement must be in the same namespace as the resourceOverride. + enum: + - Cluster + - Namespaced type: string required: - name type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: self == oldSelf policy: description: Policy defines how to override the selected resources on the target clusters. @@ -377,6 +390,10 @@ spec: - clusterResourceSelectors - policy type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - overrideHash - overrideSpec @@ -1151,6 +1168,10 @@ spec: - clusterResourceSelectors - policy type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - overrideHash - overrideSpec diff --git a/config/crd/bases/placement.kubernetes-fleet.io_resourceoverrides.yaml b/config/crd/bases/placement.kubernetes-fleet.io_resourceoverrides.yaml index ffd438cc0..dc538218c 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_resourceoverrides.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_resourceoverrides.yaml @@ -52,7 +52,17 @@ spec: The recommended way is to set the placement so that the override can be rolled out immediately. properties: name: - description: Name is the reference to the name of placement. + type: string + scope: + default: Cluster + description: |- + Scope defines the scope of the placement. + A clusterResourceOverride can only reference a clusterResourcePlacement (cluster-scoped), + and a resourceOverride can reference either a clusterResourcePlacement or resourcePlacement (namespaced). + The referenced resourcePlacement must be in the same namespace as the resourceOverride. + enum: + - Cluster + - Namespaced type: string required: - name @@ -311,6 +321,10 @@ spec: - policy - resourceSelectors type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - spec type: object @@ -931,6 +945,10 @@ spec: - policy - resourceSelectors type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - spec type: object diff --git a/config/crd/bases/placement.kubernetes-fleet.io_resourceoverridesnapshots.yaml b/config/crd/bases/placement.kubernetes-fleet.io_resourceoverridesnapshots.yaml index dc4e56c14..0d5279618 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_resourceoverridesnapshots.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_resourceoverridesnapshots.yaml @@ -66,7 +66,17 @@ spec: The recommended way is to set the placement so that the override can be rolled out immediately. properties: name: - description: Name is the reference to the name of placement. + type: string + scope: + default: Cluster + description: |- + Scope defines the scope of the placement. + A clusterResourceOverride can only reference a clusterResourcePlacement (cluster-scoped), + and a resourceOverride can reference either a clusterResourcePlacement or resourcePlacement (namespaced). + The referenced resourcePlacement must be in the same namespace as the resourceOverride. + enum: + - Cluster + - Namespaced type: string required: - name @@ -325,6 +335,10 @@ spec: - policy - resourceSelectors type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - overrideHash - overrideSpec @@ -981,6 +995,10 @@ spec: - policy - resourceSelectors type: object + x-kubernetes-validations: + - message: The placement field is immutable + rule: (has(oldSelf.placement) && has(self.placement) && oldSelf.placement + == self.placement) || (!has(oldSelf.placement) && !has(self.placement)) required: - overrideHash - overrideSpec diff --git a/pkg/controllers/rollout/controller_integration_test.go b/pkg/controllers/rollout/controller_integration_test.go index 46bddf8fc..3013226b5 100644 --- a/pkg/controllers/rollout/controller_integration_test.go +++ b/pkg/controllers/rollout/controller_integration_test.go @@ -359,14 +359,6 @@ var _ = Describe("Test the rollout Controller", func() { // Verify bindings are NOT updated (rollout not triggered) by resourceOverrideSnapshot. verifyBindingsNotUpdatedWithOverridesConsistently(controller.ConvertCRBArrayToBindingObjs(bindings), nil, nil) - By(fmt.Sprintf("Updating resourceOverrideSnapshot %s to refer the clusterResourcePlacement instead", resourceOverrideSnapshot1.Name)) - resourceOverrideSnapshot1.Spec.OverrideSpec.Placement.Scope = placementv1beta1.ClusterScoped - Expect(k8sClient.Update(ctx, resourceOverrideSnapshot1)).Should(Succeed(), "Failed to update resource override snapshot") - - // Verify bindings are NOT updated (rollout not triggered) by resourceOverrideSnapshot. - // This is because rollout controller is not triggered by overrideSnapshot update events. - verifyBindingsNotUpdatedWithOverridesConsistently(controller.ConvertCRBArrayToBindingObjs(bindings), nil, nil) - // Create a clusterResourceOverrideSnapshot and verify it triggers rollout. testCROName := "cro" + utils.RandStr() clusterResourceOverrideSnapshot := generateClusterResourceOverrideSnapshot(testCROName, testCRPName) @@ -374,8 +366,7 @@ var _ = Describe("Test the rollout Controller", func() { Expect(k8sClient.Create(ctx, clusterResourceOverrideSnapshot)).Should(Succeed(), "Failed to create cluster resource override snapshot") // Verify bindings are updated, note that both clusterResourceOverrideSnapshot and resourceOverrideSnapshot are set in the bindings. - waitUntilRolloutCompleted(controller.ConvertCRBArrayToBindingObjs(bindings), []string{clusterResourceOverrideSnapshot.Name}, - []placementv1beta1.NamespacedName{{Name: resourceOverrideSnapshot1.Name, Namespace: resourceOverrideSnapshot1.Namespace}}) + waitUntilRolloutCompleted(controller.ConvertCRBArrayToBindingObjs(bindings), []string{clusterResourceOverrideSnapshot.Name}, nil) // Create another resourceOverrideSnapshot referencing the same CRP and verify bindings are updated again. testROName2 := "ro" + utils.RandStr() @@ -386,7 +377,6 @@ var _ = Describe("Test the rollout Controller", func() { // Verify bindings are updated, note that both clusterResourceOverrideSnapshot and resourceOverrideSnapshot are set in the bindings. waitUntilRolloutCompleted(controller.ConvertCRBArrayToBindingObjs(bindings), []string{clusterResourceOverrideSnapshot.Name}, []placementv1beta1.NamespacedName{ - {Name: resourceOverrideSnapshot1.Name, Namespace: resourceOverrideSnapshot1.Namespace}, {Name: resourceOverrideSnapshot2.Name, Namespace: resourceOverrideSnapshot2.Namespace}, }, ) diff --git a/test/apis/placement/v1beta1/api_validation_integration_test.go b/test/apis/placement/v1beta1/api_validation_integration_test.go index 0ea1323dc..0569be968 100644 --- a/test/apis/placement/v1beta1/api_validation_integration_test.go +++ b/test/apis/placement/v1beta1/api_validation_integration_test.go @@ -47,6 +47,77 @@ const ( testNamespace = "test-ns" ) +// createValidClusterResourceOverride creates a valid ClusterResourceOverride for testing purposes. +// The placement parameter is optional - pass nil for no placement reference. +func createValidClusterResourceOverride(name string, placement *placementv1beta1.PlacementRef) placementv1beta1.ClusterResourceOverride { + return placementv1beta1.ClusterResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: placementv1beta1.ClusterResourceOverrideSpec{ + Placement: placement, + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + }, + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + OverrideType: placementv1beta1.JSONPatchOverrideType, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/labels/test", + Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, + }, + }, + }, + }, + }, + }, + } +} + +// createValidResourceOverride creates a valid ResourceOverride for testing purposes. +// The placement parameter is optional - pass nil for no placement reference. +func createValidResourceOverride(namespace, name string, placement *placementv1beta1.PlacementRef) placementv1beta1.ResourceOverride { + return placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: placement, + ResourceSelectors: []placementv1beta1.ResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + }, + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + OverrideType: placementv1beta1.JSONPatchOverrideType, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/labels/test", + Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, + }, + }, + }, + }, + }, + }, + } +} + var _ = Describe("Test placement v1beta1 API validation", func() { Context("Test ClusterResourcePlacement API validation - invalid cases", func() { var crp placementv1beta1.ClusterResourcePlacement @@ -1036,110 +1107,33 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Context("Test ClusterResourceOverride API validation - valid cases", func() { It("should allow creation of ClusterResourceOverride without placement reference", func() { - cro := placementv1beta1.ClusterResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), - }, - Spec: placementv1beta1.ClusterResourceOverrideSpec{ - ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, - }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, - }, - }, - } + cro := createValidClusterResourceOverride( + fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), + nil, + ) Expect(hubClient.Create(ctx, &cro)).Should(Succeed()) Expect(hubClient.Delete(ctx, &cro)).Should(Succeed()) }) It("should allow creation of ClusterResourceOverride with cluster-scoped placement reference", func() { - cro := placementv1beta1.ClusterResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), - }, - Spec: placementv1beta1.ClusterResourceOverrideSpec{ - Placement: &placementv1beta1.PlacementRef{ - Name: "test-placement", - Scope: placementv1beta1.ClusterScoped, - }, - ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, - }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, - }, + cro := createValidClusterResourceOverride( + fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), + &placementv1beta1.PlacementRef{ + Name: "test-placement", + Scope: placementv1beta1.ClusterScoped, }, - } + ) Expect(hubClient.Create(ctx, &cro)).Should(Succeed()) Expect(hubClient.Delete(ctx, &cro)).Should(Succeed()) }) It("should allow creation of ClusterResourceOverride without specifying scope in placement reference", func() { - cro := placementv1beta1.ClusterResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), + cro := createValidClusterResourceOverride( + fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), + &placementv1beta1.PlacementRef{ + Name: "test-placement", }, - Spec: placementv1beta1.ClusterResourceOverrideSpec{ - Placement: &placementv1beta1.PlacementRef{ - Name: "test-placement", - }, - ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, - }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, - }, - }, - } + ) Expect(hubClient.Create(ctx, &cro)).Should(Succeed()) Expect(hubClient.Delete(ctx, &cro)).Should(Succeed()) }) @@ -1147,196 +1141,224 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Context("Test ClusterResourceOverride API validation - invalid cases", func() { It("should deny creation of ClusterResourceOverride with namespaced placement reference", func() { - cro := placementv1beta1.ClusterResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), + cro := createValidClusterResourceOverride( + fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()), + &placementv1beta1.PlacementRef{ + Name: "test-placement", + Scope: placementv1beta1.NamespaceScoped, }, - Spec: placementv1beta1.ClusterResourceOverrideSpec{ - Placement: &placementv1beta1.PlacementRef{ - Name: "test-placement", - Scope: placementv1beta1.NamespaceScoped, - }, - ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, - }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, - }, - }, - } + ) err := hubClient.Create(ctx, &cro) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("clusterResourceOverride placement reference cannot be Namespaced scope")) }) + + Context("Test ClusterResourceOverride API validation - placement update invalid cases", func() { + var cro placementv1beta1.ClusterResourceOverride + croName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) + + BeforeEach(func() { + cro = createValidClusterResourceOverride( + croName, + &placementv1beta1.PlacementRef{ + Name: "test-placement", + Scope: placementv1beta1.ClusterScoped, + }, + ) + Expect(hubClient.Create(ctx, &cro)).Should(Succeed()) + }) + + AfterEach(func() { + Expect(hubClient.Delete(ctx, &cro)).Should(Succeed()) + }) + + It("should deny update of ClusterResourceOverride placement name", func() { + Expect(hubClient.Get(ctx, types.NamespacedName{Name: croName}, &cro)).Should(Succeed()) + cro.Spec.Placement.Name = "different-placement" + err := hubClient.Update(ctx, &cro) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + }) + + It("should deny update of ClusterResourceOverride placement scope", func() { + Expect(hubClient.Get(ctx, types.NamespacedName{Name: croName}, &cro)).Should(Succeed()) + cro.Spec.Placement.Scope = placementv1beta1.NamespaceScoped + err := hubClient.Update(ctx, &cro) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(ContainSubstring("placement reference cannot be Namespaced scope")) + }) + + It("should deny update of ClusterResourceOverride placement from nil to non-nil", func() { + croWithoutPlacement := createValidClusterResourceOverride( + fmt.Sprintf(croNameTemplate, GinkgoParallelProcess())+"-nil", + nil, + ) + Expect(hubClient.Create(ctx, &croWithoutPlacement)).Should(Succeed()) + + croWithoutPlacement.Spec.Placement = &placementv1beta1.PlacementRef{ + Name: "new-placement", + Scope: placementv1beta1.ClusterScoped, + } + err := hubClient.Update(ctx, &croWithoutPlacement) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + + Expect(hubClient.Delete(ctx, &croWithoutPlacement)).Should(Succeed()) + }) + + It("should deny update of ClusterResourceOverride placement from non-nil to nil", func() { + Expect(hubClient.Get(ctx, types.NamespacedName{Name: croName}, &cro)).Should(Succeed()) + cro.Spec.Placement = nil + err := hubClient.Update(ctx, &cro) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + }) + }) }) Context("Test ResourceOverride API validation - valid cases", func() { It("should allow creation of ResourceOverride without placement reference", func() { - ro := placementv1beta1.ResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNamespace, - Name: fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), - }, - Spec: placementv1beta1.ResourceOverrideSpec{ - ResourceSelectors: []placementv1beta1.ResourceSelector{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, - }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, - }, - }, - } + ro := createValidResourceOverride( + testNamespace, + fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), + nil, + ) Expect(hubClient.Create(ctx, &ro)).Should(Succeed()) Expect(hubClient.Delete(ctx, &ro)).Should(Succeed()) }) It("should allow creation of ResourceOverride with cluster-scoped placement reference", func() { - ro := placementv1beta1.ResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNamespace, - Name: fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), - }, - Spec: placementv1beta1.ResourceOverrideSpec{ - Placement: &placementv1beta1.PlacementRef{ - Name: "test-placement", - Scope: placementv1beta1.ClusterScoped, - }, - ResourceSelectors: []placementv1beta1.ResourceSelector{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, - }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, - }, - }, - } + ro := createValidResourceOverride( + testNamespace, + fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), + &placementv1beta1.PlacementRef{ + Name: "test-placement", + Scope: placementv1beta1.ClusterScoped, + }, + ) Expect(hubClient.Create(ctx, &ro)).Should(Succeed()) Expect(hubClient.Delete(ctx, &ro)).Should(Succeed()) }) It("should allow creation of ResourceOverride without specifying scope in placement reference", func() { - ro := placementv1beta1.ResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNamespace, - Name: fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), - }, - Spec: placementv1beta1.ResourceOverrideSpec{ - Placement: &placementv1beta1.PlacementRef{ - Name: "test-placement", - }, - ResourceSelectors: []placementv1beta1.ResourceSelector{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, - }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, - }, + ro := createValidResourceOverride( + testNamespace, + fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), + &placementv1beta1.PlacementRef{ + Name: "test-placement", }, - } + ) Expect(hubClient.Create(ctx, &ro)).Should(Succeed()) Expect(hubClient.Delete(ctx, &ro)).Should(Succeed()) }) It("should allow creation of ResourceOverride with namespace-scoped placement reference", func() { - ro := placementv1beta1.ResourceOverride{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNamespace, - Name: fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), - }, - Spec: placementv1beta1.ResourceOverrideSpec{ - Placement: &placementv1beta1.PlacementRef{ + ro := createValidResourceOverride( + testNamespace, + fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()), + &placementv1beta1.PlacementRef{ + Name: "test-placement", + Scope: placementv1beta1.NamespaceScoped, + }, + ) + Expect(hubClient.Create(ctx, &ro)).Should(Succeed()) + Expect(hubClient.Delete(ctx, &ro)).Should(Succeed()) + }) + }) + + Context("Test ResourceOverride API validation - invalid cases", func() { + + Context("Test ResourceOverride API validation - placement update invalid cases", func() { + var ro placementv1beta1.ResourceOverride + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + + BeforeEach(func() { + ro = createValidResourceOverride( + testNamespace, + roName, + &placementv1beta1.PlacementRef{ Name: "test-placement", - Scope: placementv1beta1.NamespaceScoped, - }, - ResourceSelectors: []placementv1beta1.ResourceSelector{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-cm", - }, + Scope: placementv1beta1.ClusterScoped, }, - Policy: &placementv1beta1.OverridePolicy{ - OverrideRules: []placementv1beta1.OverrideRule{ - { - OverrideType: placementv1beta1.JSONPatchOverrideType, - JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ - { - Operator: placementv1beta1.JSONPatchOverrideOpAdd, - Path: "/metadata/labels/test", - Value: apiextensionsv1.JSON{Raw: []byte(`"test-value"`)}, - }, - }, - }, - }, + ) + Expect(hubClient.Create(ctx, &ro)).Should(Succeed()) + }) + + AfterEach(func() { + Expect(hubClient.Delete(ctx, &ro)).Should(Succeed()) + }) + + It("should deny update of ResourceOverride placement name", func() { + Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: testNamespace, Name: roName}, &ro)).Should(Succeed()) + ro.Spec.Placement.Name = "different-placement" + err := hubClient.Update(ctx, &ro) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + }) + + It("should deny update of ResourceOverride placement from nil to non-nil", func() { + roWithoutPlacement := createValidResourceOverride( + testNamespace, + fmt.Sprintf(roNameTemplate, GinkgoParallelProcess())+"-nil", + nil, + ) + Expect(hubClient.Create(ctx, &roWithoutPlacement)).Should(Succeed()) + + roWithoutPlacement.Spec.Placement = &placementv1beta1.PlacementRef{ + Name: "new-placement", + Scope: placementv1beta1.ClusterScoped, + } + err := hubClient.Update(ctx, &roWithoutPlacement) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + + Expect(hubClient.Delete(ctx, &roWithoutPlacement)).Should(Succeed()) + }) + + It("should deny update of ResourceOverride placement from non-nil to nil", func() { + Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: testNamespace, Name: roName}, &ro)).Should(Succeed()) + ro.Spec.Placement = nil + err := hubClient.Update(ctx, &ro) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + }) + + It("should deny update of ResourceOverride placement from cluster-scoped to namespace-scoped", func() { + Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: testNamespace, Name: roName}, &ro)).Should(Succeed()) + ro.Spec.Placement.Scope = placementv1beta1.NamespaceScoped + err := hubClient.Update(ctx, &ro) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + }) + + It("should deny update of ResourceOverride placement from namespace-scoped to cluster-scoped", func() { + roWithNamespaceScope := createValidResourceOverride( + testNamespace, + fmt.Sprintf(roNameTemplate, GinkgoParallelProcess())+"-ns", + &placementv1beta1.PlacementRef{ + Name: "test-placement", + Scope: placementv1beta1.NamespaceScoped, }, - }, - } - Expect(hubClient.Create(ctx, &ro)).Should(Succeed()) - Expect(hubClient.Delete(ctx, &ro)).Should(Succeed()) + ) + Expect(hubClient.Create(ctx, &roWithNamespaceScope)).Should(Succeed()) + + roWithNamespaceScope.Spec.Placement.Scope = placementv1beta1.ClusterScoped + err := hubClient.Update(ctx, &roWithNamespaceScope) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) + + Expect(hubClient.Delete(ctx, &roWithNamespaceScope)).Should(Succeed()) + }) }) }) }) diff --git a/test/apis/v1alpha1/zz_generated.deepcopy.go b/test/apis/v1alpha1/zz_generated.deepcopy.go index 143bdee7b..081bec913 100644 --- a/test/apis/v1alpha1/zz_generated.deepcopy.go +++ b/test/apis/v1alpha1/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/test/e2e/placement_cro_test.go b/test/e2e/placement_cro_test.go index b8b60d18f..8b9822224 100644 --- a/test/e2e/placement_cro_test.go +++ b/test/e2e/placement_cro_test.go @@ -48,6 +48,9 @@ var _ = Context("creating clusterResourceOverride (selecting all clusters) to ov Name: croName, }, Spec: placementv1beta1.ClusterResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: crpName, // assigned CRP name + }, ClusterResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.OverridePolicy{ OverrideRules: []placementv1beta1.OverrideRule{ @@ -101,7 +104,7 @@ var _ = Context("creating clusterResourceOverride (selecting all clusters) to ov checkIfOverrideAnnotationsOnAllMemberClusters(true, want) }) - It("update cro attached to this CRP only and change annotation value", func() { + It("update cro and change annotation value", func() { Eventually(func() error { cro := &placementv1beta1.ClusterResourceOverride{} if err := hubClient.Get(ctx, types.NamespacedName{Name: croName}, cro); err != nil { @@ -148,7 +151,7 @@ var _ = Context("creating clusterResourceOverride (selecting all clusters) to ov checkIfOverrideAnnotationsOnAllMemberClusters(true, want) }) - It("update cro attached to this CRP only and no updates on the namespace", func() { + It("update cro and no updates on the namespace", func() { Eventually(func() error { cro := &placementv1beta1.ClusterResourceOverride{} if err := hubClient.Get(ctx, types.NamespacedName{Name: croName}, cro); err != nil { @@ -276,25 +279,6 @@ var _ = Context("creating clusterResourceOverride with multiple jsonPatchOverrid wantAnnotations := map[string]string{croTestAnnotationKey: croTestAnnotationValue, croTestAnnotationKey1: croTestAnnotationValue1} checkIfOverrideAnnotationsOnAllMemberClusters(true, wantAnnotations) }) - - It("update cro attached to an invalid CRP", func() { - Eventually(func() error { - cro := &placementv1beta1.ClusterResourceOverride{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: croName}, cro); err != nil { - return err - } - cro.Spec.Placement = &placementv1beta1.PlacementRef{ - Name: "invalid-crp", // assigned CRP name - } - return hubClient.Update(ctx, cro) - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cro as expected", crpName) - }) - - It("CRP status should not be changed", func() { - wantCRONames := []string{fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, croName, 0)} - crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", wantCRONames, nil) - Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "CRP %s status has been changed", crpName) - }) }) var _ = Context("creating clusterResourceOverride with different rules for each cluster", Ordered, func() { diff --git a/test/e2e/placement_ro_test.go b/test/e2e/placement_ro_test.go index f6d2c5ab4..f3775a64d 100644 --- a/test/e2e/placement_ro_test.go +++ b/test/e2e/placement_ro_test.go @@ -109,7 +109,7 @@ var _ = Context("creating resourceOverride (selecting all clusters) to override checkIfOverrideAnnotationsOnAllMemberClusters(false, want) }) - It("update ro attached to this CRP only and change annotation value", func() { + It("update ro and change annotation value", func() { Eventually(func() error { ro := &placementv1beta1.ResourceOverride{} if err := hubClient.Get(ctx, types.NamespacedName{Name: roName, Namespace: roNamespace}, ro); err != nil { @@ -157,7 +157,7 @@ var _ = Context("creating resourceOverride (selecting all clusters) to override checkIfOverrideAnnotationsOnAllMemberClusters(false, want) }) - It("update ro attached to this CRP only and no update on the configmap itself", func() { + It("update ro and no update on the configmap itself", func() { Eventually(func() error { ro := &placementv1beta1.ResourceOverride{} if err := hubClient.Get(ctx, types.NamespacedName{Name: roName, Namespace: roNamespace}, ro); err != nil { @@ -273,28 +273,6 @@ var _ = Context("creating resourceOverride with multiple jsonPatchOverrides to o wantAnnotations := map[string]string{roTestAnnotationKey: roTestAnnotationValue, roTestAnnotationKey1: roTestAnnotationValue1} checkIfOverrideAnnotationsOnAllMemberClusters(false, wantAnnotations) }) - - It("update ro attached to an invalid CRP", func() { - Eventually(func() error { - ro := &placementv1beta1.ResourceOverride{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: roName, Namespace: roNamespace}, ro); err != nil { - return err - } - ro.Spec.Placement = &placementv1beta1.PlacementRef{ - Name: "invalid-crp", // assigned CRP name - } - return hubClient.Update(ctx, ro) - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", crpName) - }) - - It("CRP status should not be changed", func() { - wantRONames := []placementv1beta1.NamespacedName{ - {Namespace: roNamespace, Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0)}, - } - crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) - Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "CRP %s status has been changed", crpName) - }) - }) var _ = Context("creating resourceOverride with different rules for each cluster to override configMap", Ordered, func() { From be79070fa6cfbf016a7297fdde54b7daef3e86fa Mon Sep 17 00:00:00 2001 From: Wantong Date: Wed, 20 Aug 2025 07:31:20 -0700 Subject: [PATCH 20/38] test: fix override api integration test flakiness (#203) Signed-off-by: Wantong Jiang --- .../api_validation_integration_test.go | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/test/apis/placement/v1beta1/api_validation_integration_test.go b/test/apis/placement/v1beta1/api_validation_integration_test.go index 0569be968..776a689ec 100644 --- a/test/apis/placement/v1beta1/api_validation_integration_test.go +++ b/test/apis/placement/v1beta1/api_validation_integration_test.go @@ -1174,18 +1174,18 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ClusterResourceOverride placement name", func() { - Expect(hubClient.Get(ctx, types.NamespacedName{Name: croName}, &cro)).Should(Succeed()) - cro.Spec.Placement.Name = "different-placement" - err := hubClient.Update(ctx, &cro) + updatedCRO := cro.DeepCopy() + updatedCRO.Spec.Placement.Name = "different-placement" + err := hubClient.Update(ctx, updatedCRO) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) }) It("should deny update of ClusterResourceOverride placement scope", func() { - Expect(hubClient.Get(ctx, types.NamespacedName{Name: croName}, &cro)).Should(Succeed()) - cro.Spec.Placement.Scope = placementv1beta1.NamespaceScoped - err := hubClient.Update(ctx, &cro) + updatedCRO := cro.DeepCopy() + updatedCRO.Spec.Placement.Scope = placementv1beta1.NamespaceScoped + err := hubClient.Update(ctx, updatedCRO) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(ContainSubstring("placement reference cannot be Namespaced scope")) @@ -1211,9 +1211,9 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ClusterResourceOverride placement from non-nil to nil", func() { - Expect(hubClient.Get(ctx, types.NamespacedName{Name: croName}, &cro)).Should(Succeed()) - cro.Spec.Placement = nil - err := hubClient.Update(ctx, &cro) + updatedCRO := cro.DeepCopy() + updatedCRO.Spec.Placement = nil + err := hubClient.Update(ctx, updatedCRO) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) @@ -1294,9 +1294,9 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ResourceOverride placement name", func() { - Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: testNamespace, Name: roName}, &ro)).Should(Succeed()) - ro.Spec.Placement.Name = "different-placement" - err := hubClient.Update(ctx, &ro) + updatedRO := ro.DeepCopy() + updatedRO.Spec.Placement.Name = "different-placement" + err := hubClient.Update(ctx, updatedRO) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) @@ -1323,18 +1323,18 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ResourceOverride placement from non-nil to nil", func() { - Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: testNamespace, Name: roName}, &ro)).Should(Succeed()) - ro.Spec.Placement = nil - err := hubClient.Update(ctx, &ro) + updatedRO := ro.DeepCopy() + updatedRO.Spec.Placement = nil + err := hubClient.Update(ctx, updatedRO) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) }) It("should deny update of ResourceOverride placement from cluster-scoped to namespace-scoped", func() { - Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: testNamespace, Name: roName}, &ro)).Should(Succeed()) - ro.Spec.Placement.Scope = placementv1beta1.NamespaceScoped - err := hubClient.Update(ctx, &ro) + updatedRO := ro.DeepCopy() + updatedRO.Spec.Placement.Scope = placementv1beta1.NamespaceScoped + err := hubClient.Update(ctx, updatedRO) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) From 6cbfa8f5664eae2d62c4992dfa686fc4489736e5 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes <145056127+britaniar@users.noreply.github.com> Date: Wed, 20 Aug 2025 13:05:38 -0500 Subject: [PATCH 21/38] fix: update scheduler watchers package name (#204) --- cmd/hubagent/workload/setup.go | 18 +++++++++--------- .../controller_integration_test.go | 2 +- .../suite_test.go | 4 ++-- .../watcher.go | 2 +- .../controller_integration_test.go | 2 +- .../suite_test.go | 4 ++-- .../watcher.go | 4 ++-- .../controller_integration_test.go | 2 +- .../suite_test.go | 4 ++-- .../watcher.go | 4 ++-- test/apis/v1alpha1/zz_generated.deepcopy.go | 2 +- test/scheduler/suite_test.go | 18 +++++++++--------- 12 files changed, 33 insertions(+), 33 deletions(-) rename pkg/scheduler/watchers/{clusterresourcebinding => binding}/controller_integration_test.go (99%) rename pkg/scheduler/watchers/{clusterresourcebinding => binding}/suite_test.go (97%) rename pkg/scheduler/watchers/{clusterresourcebinding => binding}/watcher.go (99%) rename pkg/scheduler/watchers/{clusterresourceplacement => placement}/controller_integration_test.go (99%) rename pkg/scheduler/watchers/{clusterresourceplacement => placement}/suite_test.go (97%) rename pkg/scheduler/watchers/{clusterresourceplacement => placement}/watcher.go (97%) rename pkg/scheduler/watchers/{clusterschedulingpolicysnapshot => schedulingpolicysnapshot}/controller_integration_test.go (99%) rename pkg/scheduler/watchers/{clusterschedulingpolicysnapshot => schedulingpolicysnapshot}/suite_test.go (96%) rename pkg/scheduler/watchers/{clusterschedulingpolicysnapshot => schedulingpolicysnapshot}/watcher.go (98%) diff --git a/cmd/hubagent/workload/setup.go b/cmd/hubagent/workload/setup.go index 9049c4523..82394bf7e 100644 --- a/cmd/hubagent/workload/setup.go +++ b/cmd/hubagent/workload/setup.go @@ -53,10 +53,10 @@ import ( "github.com/kubefleet-dev/kubefleet/pkg/scheduler/framework" "github.com/kubefleet-dev/kubefleet/pkg/scheduler/profile" "github.com/kubefleet-dev/kubefleet/pkg/scheduler/queue" - schedulercrbwatcher "github.com/kubefleet-dev/kubefleet/pkg/scheduler/watchers/clusterresourcebinding" - schedulercrpwatcher "github.com/kubefleet-dev/kubefleet/pkg/scheduler/watchers/clusterresourceplacement" - schedulercspswatcher "github.com/kubefleet-dev/kubefleet/pkg/scheduler/watchers/clusterschedulingpolicysnapshot" + schedulerbindingwatcher "github.com/kubefleet-dev/kubefleet/pkg/scheduler/watchers/binding" "github.com/kubefleet-dev/kubefleet/pkg/scheduler/watchers/membercluster" + schedulerplacementwatcher "github.com/kubefleet-dev/kubefleet/pkg/scheduler/watchers/placement" + schedulerspswatcher "github.com/kubefleet-dev/kubefleet/pkg/scheduler/watchers/schedulingpolicysnapshot" "github.com/kubefleet-dev/kubefleet/pkg/utils" "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" "github.com/kubefleet-dev/kubefleet/pkg/utils/informer" @@ -371,7 +371,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, // Set up the watchers for the controller klog.Info("Setting up the clusterResourcePlacement watcher for scheduler") - if err := (&schedulercrpwatcher.Reconciler{ + if err := (&schedulerplacementwatcher.Reconciler{ Client: mgr.GetClient(), SchedulerWorkQueue: defaultSchedulingQueue, }).SetupWithManagerForClusterResourcePlacement(mgr); err != nil { @@ -380,7 +380,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } klog.Info("Setting up the clusterSchedulingPolicySnapshot watcher for scheduler") - if err := (&schedulercspswatcher.Reconciler{ + if err := (&schedulerspswatcher.Reconciler{ Client: mgr.GetClient(), SchedulerWorkQueue: defaultSchedulingQueue, }).SetupWithManagerForClusterSchedulingPolicySnapshot(mgr); err != nil { @@ -389,7 +389,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } klog.Info("Setting up the clusterResourceBinding watcher for scheduler") - if err := (&schedulercrbwatcher.Reconciler{ + if err := (&schedulerbindingwatcher.Reconciler{ Client: mgr.GetClient(), SchedulerWorkQueue: defaultSchedulingQueue, }).SetupWithManagerForClusterResourceBinding(mgr); err != nil { @@ -399,7 +399,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, if opts.EnableResourcePlacement { klog.Info("Setting up the resourcePlacement watcher for scheduler") - if err := (&schedulercrpwatcher.Reconciler{ + if err := (&schedulerplacementwatcher.Reconciler{ Client: mgr.GetClient(), SchedulerWorkQueue: defaultSchedulingQueue, }).SetupWithManagerForResourcePlacement(mgr); err != nil { @@ -408,7 +408,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } klog.Info("Setting up the schedulingPolicySnapshot watcher for scheduler") - if err := (&schedulercspswatcher.Reconciler{ + if err := (&schedulerspswatcher.Reconciler{ Client: mgr.GetClient(), SchedulerWorkQueue: defaultSchedulingQueue, }).SetupWithManagerForSchedulingPolicySnapshot(mgr); err != nil { @@ -417,7 +417,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } klog.Info("Setting up the resourceBinding watcher for scheduler") - if err := (&schedulercrbwatcher.Reconciler{ + if err := (&schedulerbindingwatcher.Reconciler{ Client: mgr.GetClient(), SchedulerWorkQueue: defaultSchedulingQueue, }).SetupWithManagerForResourceBinding(mgr); err != nil { diff --git a/pkg/scheduler/watchers/clusterresourcebinding/controller_integration_test.go b/pkg/scheduler/watchers/binding/controller_integration_test.go similarity index 99% rename from pkg/scheduler/watchers/clusterresourcebinding/controller_integration_test.go rename to pkg/scheduler/watchers/binding/controller_integration_test.go index 33463f4ee..bad0d1a99 100644 --- a/pkg/scheduler/watchers/clusterresourcebinding/controller_integration_test.go +++ b/pkg/scheduler/watchers/binding/controller_integration_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourcebinding +package binding import ( "fmt" diff --git a/pkg/scheduler/watchers/clusterresourcebinding/suite_test.go b/pkg/scheduler/watchers/binding/suite_test.go similarity index 97% rename from pkg/scheduler/watchers/clusterresourcebinding/suite_test.go rename to pkg/scheduler/watchers/binding/suite_test.go index 5161247b3..d2400ac61 100644 --- a/pkg/scheduler/watchers/clusterresourcebinding/suite_test.go +++ b/pkg/scheduler/watchers/binding/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourcebinding +package binding import ( "context" @@ -49,7 +49,7 @@ var ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Scheduler Source Cluster Resource Binding Controller Suite") + RunSpecs(t, "Scheduler Source Binding Controller Suite") } var _ = BeforeSuite(func() { diff --git a/pkg/scheduler/watchers/clusterresourcebinding/watcher.go b/pkg/scheduler/watchers/binding/watcher.go similarity index 99% rename from pkg/scheduler/watchers/clusterresourcebinding/watcher.go rename to pkg/scheduler/watchers/binding/watcher.go index 54ffa8b40..27d4b588e 100644 --- a/pkg/scheduler/watchers/clusterresourcebinding/watcher.go +++ b/pkg/scheduler/watchers/binding/watcher.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourcebinding +package binding import ( "context" diff --git a/pkg/scheduler/watchers/clusterresourceplacement/controller_integration_test.go b/pkg/scheduler/watchers/placement/controller_integration_test.go similarity index 99% rename from pkg/scheduler/watchers/clusterresourceplacement/controller_integration_test.go rename to pkg/scheduler/watchers/placement/controller_integration_test.go index b5113286d..367f6ec4f 100644 --- a/pkg/scheduler/watchers/clusterresourceplacement/controller_integration_test.go +++ b/pkg/scheduler/watchers/placement/controller_integration_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "fmt" diff --git a/pkg/scheduler/watchers/clusterresourceplacement/suite_test.go b/pkg/scheduler/watchers/placement/suite_test.go similarity index 97% rename from pkg/scheduler/watchers/clusterresourceplacement/suite_test.go rename to pkg/scheduler/watchers/placement/suite_test.go index 8ca3befeb..cdd898377 100644 --- a/pkg/scheduler/watchers/clusterresourceplacement/suite_test.go +++ b/pkg/scheduler/watchers/placement/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "context" @@ -49,7 +49,7 @@ var ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Scheduler Source Cluster Resource Placement Controller Suite") + RunSpecs(t, "Scheduler Source Placement Controller Suite") } var _ = BeforeSuite(func() { diff --git a/pkg/scheduler/watchers/clusterresourceplacement/watcher.go b/pkg/scheduler/watchers/placement/watcher.go similarity index 97% rename from pkg/scheduler/watchers/clusterresourceplacement/watcher.go rename to pkg/scheduler/watchers/placement/watcher.go index b6110fa91..e42631d57 100644 --- a/pkg/scheduler/watchers/clusterresourceplacement/watcher.go +++ b/pkg/scheduler/watchers/placement/watcher.go @@ -14,9 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package clusterresourceplacement features a controller that enqueues placement objects for the +// Package placement features a controller that enqueues placement objects for the // scheduler to process where the placement object is marked for deletion. -package clusterresourceplacement +package placement import ( "context" diff --git a/pkg/scheduler/watchers/clusterschedulingpolicysnapshot/controller_integration_test.go b/pkg/scheduler/watchers/schedulingpolicysnapshot/controller_integration_test.go similarity index 99% rename from pkg/scheduler/watchers/clusterschedulingpolicysnapshot/controller_integration_test.go rename to pkg/scheduler/watchers/schedulingpolicysnapshot/controller_integration_test.go index f0c450d20..5ac1d92b7 100644 --- a/pkg/scheduler/watchers/clusterschedulingpolicysnapshot/controller_integration_test.go +++ b/pkg/scheduler/watchers/schedulingpolicysnapshot/controller_integration_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterschedulingpolicysnapshot +package schedulingpolicysnapshot import ( "fmt" diff --git a/pkg/scheduler/watchers/clusterschedulingpolicysnapshot/suite_test.go b/pkg/scheduler/watchers/schedulingpolicysnapshot/suite_test.go similarity index 96% rename from pkg/scheduler/watchers/clusterschedulingpolicysnapshot/suite_test.go rename to pkg/scheduler/watchers/schedulingpolicysnapshot/suite_test.go index 57e4b555b..4ffbbbc2b 100644 --- a/pkg/scheduler/watchers/clusterschedulingpolicysnapshot/suite_test.go +++ b/pkg/scheduler/watchers/schedulingpolicysnapshot/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterschedulingpolicysnapshot +package schedulingpolicysnapshot import ( "context" @@ -49,7 +49,7 @@ var ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Scheduler Source Cluster Scheduling Policy Snapshot Controller Suite") + RunSpecs(t, "Scheduler Source Scheduling Policy Snapshot Controller Suite") } var _ = BeforeSuite(func() { diff --git a/pkg/scheduler/watchers/clusterschedulingpolicysnapshot/watcher.go b/pkg/scheduler/watchers/schedulingpolicysnapshot/watcher.go similarity index 98% rename from pkg/scheduler/watchers/clusterschedulingpolicysnapshot/watcher.go rename to pkg/scheduler/watchers/schedulingpolicysnapshot/watcher.go index 0645260c1..79d9e5a96 100644 --- a/pkg/scheduler/watchers/clusterschedulingpolicysnapshot/watcher.go +++ b/pkg/scheduler/watchers/schedulingpolicysnapshot/watcher.go @@ -14,9 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package clusterschedulingpolicysnapshot features a controller that enqueues placement objects for the +// Package schedulingpolicysnapshot features a controller that enqueues placement objects for the // scheduler to process where there is a change in their scheduling policy snapshots. -package clusterschedulingpolicysnapshot +package schedulingpolicysnapshot import ( "context" diff --git a/test/apis/v1alpha1/zz_generated.deepcopy.go b/test/apis/v1alpha1/zz_generated.deepcopy.go index 081bec913..143bdee7b 100644 --- a/test/apis/v1alpha1/zz_generated.deepcopy.go +++ b/test/apis/v1alpha1/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package v1alpha1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/test/scheduler/suite_test.go b/test/scheduler/suite_test.go index c813595a5..144998168 100644 --- a/test/scheduler/suite_test.go +++ b/test/scheduler/suite_test.go @@ -49,10 +49,10 @@ import ( "github.com/kubefleet-dev/kubefleet/pkg/scheduler" "github.com/kubefleet-dev/kubefleet/pkg/scheduler/clustereligibilitychecker" "github.com/kubefleet-dev/kubefleet/pkg/scheduler/queue" - "github.com/kubefleet-dev/kubefleet/pkg/scheduler/watchers/clusterresourcebinding" - "github.com/kubefleet-dev/kubefleet/pkg/scheduler/watchers/clusterresourceplacement" - "github.com/kubefleet-dev/kubefleet/pkg/scheduler/watchers/clusterschedulingpolicysnapshot" + "github.com/kubefleet-dev/kubefleet/pkg/scheduler/watchers/binding" "github.com/kubefleet-dev/kubefleet/pkg/scheduler/watchers/membercluster" + "github.com/kubefleet-dev/kubefleet/pkg/scheduler/watchers/placement" + "github.com/kubefleet-dev/kubefleet/pkg/scheduler/watchers/schedulingpolicysnapshot" ) const ( @@ -583,28 +583,28 @@ func beforeSuiteForProcess1() []byte { ) // Register the watchers. - crpReconciler := clusterresourceplacement.Reconciler{ + crpReconciler := placement.Reconciler{ Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, } err = crpReconciler.SetupWithManagerForClusterResourcePlacement(ctrlMgr) Expect(err).NotTo(HaveOccurred(), "Failed to set up CRP watcher with controller manager") - rpReconciler := clusterresourceplacement.Reconciler{ + rpReconciler := placement.Reconciler{ Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, } err = rpReconciler.SetupWithManagerForResourcePlacement(ctrlMgr) Expect(err).NotTo(HaveOccurred(), "Failed to set up RP watcher with controller manager") - clusterPolicySnapshotWatcher := clusterschedulingpolicysnapshot.Reconciler{ + clusterPolicySnapshotWatcher := schedulingpolicysnapshot.Reconciler{ Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, } err = clusterPolicySnapshotWatcher.SetupWithManagerForClusterSchedulingPolicySnapshot(ctrlMgr) Expect(err).NotTo(HaveOccurred(), "Failed to set up cluster policy snapshot watcher with controller manager") - policySnapshotWatcher := clusterschedulingpolicysnapshot.Reconciler{ + policySnapshotWatcher := schedulingpolicysnapshot.Reconciler{ Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, } @@ -620,14 +620,14 @@ func beforeSuiteForProcess1() []byte { err = memberClusterWatcher.SetupWithManager(ctrlMgr) Expect(err).NotTo(HaveOccurred(), "Failed to set up member cluster watcher with controller manager") - clusterResourceBindingWatcher := clusterresourcebinding.Reconciler{ + clusterResourceBindingWatcher := binding.Reconciler{ Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, } err = clusterResourceBindingWatcher.SetupWithManagerForClusterResourceBinding(ctrlMgr) Expect(err).NotTo(HaveOccurred(), "Failed to set up cluster resource binding watcher with controller manager") - resourceBindingWatcher := clusterresourcebinding.Reconciler{ + resourceBindingWatcher := binding.Reconciler{ Client: hubClient, SchedulerWorkQueue: schedulerWorkQueue, } From f85e0eb7fb2f4b56f5095412acfd4d743b8d1c15 Mon Sep 17 00:00:00 2001 From: Wantong Date: Thu, 21 Aug 2025 05:58:42 -0700 Subject: [PATCH 22/38] test: enable pickN e2e tests for RP (#205) Signed-off-by: Wantong Jiang --- test/e2e/actuals_test.go | 41 +- test/e2e/resource_placement_pickn_test.go | 889 ++++++++++++++++++++++ test/e2e/utils_test.go | 10 + 3 files changed, 931 insertions(+), 9 deletions(-) create mode 100644 test/e2e/resource_placement_pickn_test.go diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index b5ad36c8a..23aed8944 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -193,6 +193,14 @@ func placementRolloutCompletedConditions(placementKey types.NamespacedName, gene } } +func placementScheduledConditions(placementKey types.NamespacedName, generation int64) []metav1.Condition { + if placementKey.Namespace == "" { + return crpScheduledConditions(generation) + } else { + return rpScheduledConditions(generation) + } +} + func placementSchedulePartiallyFailedConditions(placementKey types.NamespacedName, generation int64) []metav1.Condition { if placementKey.Namespace == "" { return crpSchedulePartiallyFailedConditions(generation) @@ -306,6 +314,28 @@ func rpScheduleFailedConditions(generation int64) []metav1.Condition { } } +func rpScheduledConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: generation, + Reason: scheduler.FullyScheduledReason, + }, + } +} + +func crpScheduledConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: generation, + Reason: scheduler.FullyScheduledReason, + }, + } +} + func crpScheduleFailedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { @@ -1214,15 +1244,8 @@ func customizedPlacementStatusUpdatedActual( if len(wantSelectedClusters) > 0 { wantPlacementConditions = placementRolloutCompletedConditions(placementKey, placement.GetGeneration(), false) } else { - wantPlacementConditions = []metav1.Condition{ - // we don't set the remaining resource conditions. - { - Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), - Status: metav1.ConditionTrue, - Reason: scheduler.FullyScheduledReason, - ObservedGeneration: placement.GetGeneration(), - }, - } + // We don't set the remaining resource conditions. + wantPlacementConditions = placementScheduledConditions(placementKey, placement.GetGeneration()) } if len(wantUnselectedClusters) > 0 { diff --git a/test/e2e/resource_placement_pickn_test.go b/test/e2e/resource_placement_pickn_test.go new file mode 100644 index 000000000..a7732bfbd --- /dev/null +++ b/test/e2e/resource_placement_pickn_test.go @@ -0,0 +1,889 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/propertyprovider" + "github.com/kubefleet-dev/kubefleet/pkg/propertyprovider/azure" + "github.com/kubefleet-dev/kubefleet/test/e2e/framework" +) + +var _ = Describe("placing namespaced scoped resources using a RP with PickN policy", func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Name: rpName, Namespace: appNamespace().Name} + + BeforeEach(OncePerOrdered, func() { + // Create the resources. + createWorkResources() + + // Create the CRP with Namespace-only selector. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterEach(OncePerOrdered, func() { + ensureRPAndRelatedResourcesDeleted(rpKey, allMemberClusters) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("picking N clusters with no affinities/topology spread constraints (pick by cluster names in alphanumeric order)", Ordered, func() { + It("should create rp with pickN policy successfully", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(1)), + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster3WestProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster3WestProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + }) + }) + + Context("upscaling", Ordered, func() { + It("should create rp with pickN policy for upscaling test", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(1)), + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should place resources on the picked clusters", func() { + // Verify that resources have been placed on the picked clusters. + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster3WestProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + }) + + It("can upscale", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return err + } + + rp.Spec.Policy.NumberOfClusters = ptr.To(int32(2)) + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to upscale") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster3WestProdName, memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the newly picked clusters", func() { + targetClusters := []*framework.Cluster{memberCluster3WestProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("downscaling", Ordered, func() { + It("should create rp with pickN policy for downscaling test", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should place resources on the picked clusters", func() { + targetClusters := []*framework.Cluster{memberCluster3WestProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + + It("can downscale", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return err + } + + rp.Spec.Policy.NumberOfClusters = ptr.To(int32(1)) + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to downscale") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster3WestProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the newly picked clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster3WestProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + }) + + It("should remove resources from the downscaled clusters", func() { + checkIfRemovedConfigMapFromMemberClusters([]*framework.Cluster{memberCluster2EastCanary}) + }) + }) + + Context("picking N clusters with affinities and topology spread constraints", Ordered, func() { + It("should create rp with pickN policy and constraints successfully", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + // Note that due to limitations in the E2E environment, specifically the limited + // number of clusters available, the affinity and topology spread constraints + // specified here are validated only on a very superficial level, i.e., the flow + // functions. For further evaluations, specifically the correctness check + // of the affinity and topology spread constraint logic, see the scheduler + // integration tests. + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + }, + }, + }, + }, + }, + TopologySpreadConstraints: []placementv1beta1.TopologySpreadConstraint{ + { + MaxSkew: ptr.To(int32(1)), + TopologyKey: envLabelName, + WhenUnsatisfiable: placementv1beta1.DoNotSchedule, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName, memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + targetClusters := []*framework.Cluster{memberCluster1EastProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("affinities and topology spread constraints updated", Ordered, func() { + It("should create rp with initial constraints", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + }, + }, + }, + }, + }, + TopologySpreadConstraints: []placementv1beta1.TopologySpreadConstraint{ + { + MaxSkew: ptr.To(int32(1)), + TopologyKey: envLabelName, + WhenUnsatisfiable: placementv1beta1.DoNotSchedule, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should place resources on the picked clusters", func() { + targetClusters := []*framework.Cluster{memberCluster1EastProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + + It("can update the RP", func() { + // Specify new affinity and topology spread constraints. + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return err + } + + rp.Spec.Policy.Affinity = &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []placementv1beta1.PreferredClusterSelector{ + { + Weight: 20, + Preference: placementv1beta1.ClusterSelectorTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabelName: envProd, + }, + }, + }, + }, + }, + }, + } + rp.Spec.Policy.TopologySpreadConstraints = []placementv1beta1.TopologySpreadConstraint{ + { + MaxSkew: ptr.To(int32(1)), + TopologyKey: regionLabelName, + WhenUnsatisfiable: placementv1beta1.ScheduleAnyway, + }, + } + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP with new affinity and topology spread constraints") + }) + + // topology spread constraints takes a bit longer to be applied + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName, memberCluster3WestProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the newly picked clusters", func() { + targetClusters := []*framework.Cluster{memberCluster1EastProd, memberCluster3WestProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + + It("should remove resources from the unpicked clusters", func() { + checkIfRemovedConfigMapFromMemberClusters([]*framework.Cluster{memberCluster2EastCanary}) + }) + }) + + Context("not enough clusters to pick", Ordered, func() { + It("should create rp with pickN policy requesting more clusters than available", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + // This spec uses an RP of the PickN placement type with the number of + // target clusters equal to that of all clusters present in the environment. + // + // This is necessary as the RP controller reports status for unselected clusters + // only in a partial manner; specifically, for an RP of the PickN placement with + // N target clusters but only M matching clusters, only N - M decisions for + // unselected clusters will be reported in the RP status. To avoid + // undeterministic behaviors, here this value is set to make sure that all + // unselected clusters will be included in the status. + NumberOfClusters: ptr.To(int32(5)), + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName, memberCluster2EastCanaryName}, []string{memberCluster3WestProdName, memberCluster4UnhealthyName, memberCluster5LeftName}, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + targetClusters := []*framework.Cluster{memberCluster1EastProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("downscaling to zero", Ordered, func() { + It("should create rp with pickN policy for downscaling to zero test", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should place resources on the picked clusters", func() { + targetClusters := []*framework.Cluster{memberCluster3WestProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + + It("can downscale", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return err + } + + rp.Spec.Policy.NumberOfClusters = ptr.To(int32(0)) + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to downscale") + }) + + It("should remove resources from the downscaled clusters", func() { + downscaledClusters := []*framework.Cluster{memberCluster3WestProd, memberCluster2EastCanary} + checkIfRemovedConfigMapFromMemberClusters(downscaledClusters) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), nil, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) + + Context("picking N clusters with single property sorter", Ordered, func() { + It("should create rp with pickN policy and single property sorter", func() { + // Have to add this check in each It() spec, instead of using BeforeAll(). + // Otherwise, the AfterEach() would be skipped too and the namespace does not get cleaned up. + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + // Note that due to limitations in the E2E environment, specifically the limited + // number of clusters available, the affinity and topology spread constraints + // specified here are validated only on a very superficial level, i.e., the flow + // functions. For further evaluations, specifically the correctness check + // of the affinity and topology spread constraint logic, see the scheduler + // integration tests. + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []placementv1beta1.PreferredClusterSelector{ + { + Weight: 20, + Preference: placementv1beta1.ClusterSelectorTerm{ + PropertySorter: &placementv1beta1.PropertySorter{ + Name: propertyprovider.NodeCountProperty, + SortOrder: placementv1beta1.Ascending, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName, memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + targetClusters := []*framework.Cluster{memberCluster1EastProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("picking N clusters with multiple property sorters", Ordered, func() { + It("should create rp with pickN policy and multiple property sorters", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + // Note that due to limitations in the E2E environment, specifically the limited + // number of clusters available, the affinity and topology spread constraints + // specified here are validated only on a very superficial level, i.e., the flow + // functions. For further evaluations, specifically the correctness check + // of the affinity and topology spread constraint logic, see the scheduler + // integration tests. + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []placementv1beta1.PreferredClusterSelector{ + { + Weight: 20, + Preference: placementv1beta1.ClusterSelectorTerm{ + PropertySorter: &placementv1beta1.PropertySorter{ + Name: propertyprovider.NodeCountProperty, + SortOrder: placementv1beta1.Ascending, + }, + }, + }, + { + Weight: 20, + Preference: placementv1beta1.ClusterSelectorTerm{ + PropertySorter: &placementv1beta1.PropertySorter{ + Name: propertyprovider.AvailableMemoryCapacityProperty, + SortOrder: placementv1beta1.Descending, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster3WestProdName, memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + targetClusters := []*framework.Cluster{memberCluster3WestProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("picking N clusters with label selector and property sorter", Ordered, func() { + It("should create rp with pickN policy, label selector and property sorter", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + // Note that due to limitations in the E2E environment, specifically the limited + // number of clusters available, the affinity and topology spread constraints + // specified here are validated only on a very superficial level, i.e., the flow + // functions. For further evaluations, specifically the correctness check + // of the affinity and topology spread constraint logic, see the scheduler + // integration tests. + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []placementv1beta1.PreferredClusterSelector{ + { + Weight: 20, + Preference: placementv1beta1.ClusterSelectorTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + PropertySorter: &placementv1beta1.PropertySorter{ + Name: propertyprovider.NodeCountProperty, + SortOrder: placementv1beta1.Ascending, + }, + }, + }, + { + Weight: 20, + Preference: placementv1beta1.ClusterSelectorTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabelName: envCanary, + }, + }, + PropertySorter: &placementv1beta1.PropertySorter{ + Name: propertyprovider.AvailableMemoryCapacityProperty, + SortOrder: placementv1beta1.Descending, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName, memberCluster1EastProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + targetClusters := []*framework.Cluster{memberCluster2EastCanary, memberCluster1EastProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("picking N clusters with required and preferred affinity terms", Ordered, func() { + It("should create rp with pickN policy, required and preferred affinity terms", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(1)), + // Note that due to limitations in the E2E environment, specifically the limited + // number of clusters available, the affinity and topology spread constraints + // specified here are validated only on a very superficial level, i.e., the flow + // functions. For further evaluations, specifically the correctness check + // of the affinity and topology spread constraint logic, see the scheduler + // integration tests. + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabelName: envProd, + }, + }, + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: azure.PerCPUCoreCostProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "0", + }, + }, + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorNotEqualTo, + Values: []string{ + "3", + }, + }, + }, + }, + }, + }, + }, + PreferredDuringSchedulingIgnoredDuringExecution: []placementv1beta1.PreferredClusterSelector{ + { + Weight: 30, + Preference: placementv1beta1.ClusterSelectorTerm{ + PropertySorter: &placementv1beta1.PropertySorter{ + Name: propertyprovider.NodeCountProperty, + SortOrder: placementv1beta1.Ascending, + }, + }, + }, + { + Weight: 40, + Preference: placementv1beta1.ClusterSelectorTerm{ + PropertySorter: &placementv1beta1.PropertySorter{ + Name: propertyprovider.AvailableMemoryCapacityProperty, + SortOrder: placementv1beta1.Descending, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster3WestProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + targetClusters := []*framework.Cluster{memberCluster3WestProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) +}) diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index 3633e11b1..e3e926aa2 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -802,6 +802,16 @@ func checkIfPlacedNamespaceResourceOnAllMemberClusters() { } } +// checkIfRemovedConfigMapFromMemberCluster verifies that the ConfigMap has been removed from the specified member cluster. +func checkIfRemovedConfigMapFromMemberClusters(clusters []*framework.Cluster) { + for idx := range clusters { + memberCluster := clusters[idx] + + configMapRemovedActual := namespacedResourcesRemovedFromClusterActual(memberCluster) + Eventually(configMapRemovedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove config map from member cluster %s", memberCluster.ClusterName) + } +} + func checkIfRemovedWorkResourcesFromAllMemberClusters() { checkIfRemovedWorkResourcesFromMemberClusters(allMemberClusters) } From 24aee3272af3e647caa740a1c71e459f96b168e5 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes <145056127+britaniar@users.noreply.github.com> Date: Thu, 21 Aug 2025 09:21:01 -0500 Subject: [PATCH 23/38] fix: Update controller package names (#206) --- cmd/hubagent/workload/setup.go | 33 ++++++++++--------- .../suite_test.go | 4 +-- .../watcher.go | 4 +-- .../watcher_integration_test.go | 2 +- .../cluster_selector.go | 2 +- .../cluster_selector_test.go | 2 +- .../controller.go | 4 +-- .../controller_integration_test.go | 2 +- .../controller_test.go | 2 +- .../placement_controllerv1alpha1.go | 2 +- .../placement_status.go | 2 +- .../placement_status_test.go | 2 +- .../resource_selector.go | 2 +- .../resource_selector_test.go | 2 +- .../suite_test.go | 16 ++++----- .../work_propagation.go | 2 +- .../suite_test.go | 4 +-- .../watcher.go | 4 +-- .../watcher_integration_test.go | 2 +- .../controller.go | 4 +-- .../controller_integration_test.go | 2 +- .../suite_test.go | 4 +-- 22 files changed, 52 insertions(+), 51 deletions(-) rename pkg/controllers/{clusterresourcebindingwatcher => bindingwatcher}/suite_test.go (97%) rename pkg/controllers/{clusterresourcebindingwatcher => bindingwatcher}/watcher.go (97%) rename pkg/controllers/{clusterresourcebindingwatcher => bindingwatcher}/watcher_integration_test.go (99%) rename pkg/controllers/{clusterresourceplacement => placement}/cluster_selector.go (99%) rename pkg/controllers/{clusterresourceplacement => placement}/cluster_selector_test.go (99%) rename pkg/controllers/{clusterresourceplacement => placement}/controller.go (99%) rename pkg/controllers/{clusterresourceplacement => placement}/controller_integration_test.go (99%) rename pkg/controllers/{clusterresourceplacement => placement}/controller_test.go (99%) rename pkg/controllers/{clusterresourceplacement => placement}/placement_controllerv1alpha1.go (99%) rename pkg/controllers/{clusterresourceplacement => placement}/placement_status.go (99%) rename pkg/controllers/{clusterresourceplacement => placement}/placement_status_test.go (99%) rename pkg/controllers/{clusterresourceplacement => placement}/resource_selector.go (99%) rename pkg/controllers/{clusterresourceplacement => placement}/resource_selector_test.go (99%) rename pkg/controllers/{clusterresourceplacement => placement}/suite_test.go (91%) rename pkg/controllers/{clusterresourceplacement => placement}/work_propagation.go (99%) rename pkg/controllers/{clusterresourceplacementwatcher => placementwatcher}/suite_test.go (97%) rename pkg/controllers/{clusterresourceplacementwatcher => placementwatcher}/watcher.go (93%) rename pkg/controllers/{clusterresourceplacementwatcher => placementwatcher}/watcher_integration_test.go (99%) rename pkg/controllers/{clusterschedulingpolicysnapshot => schedulingpolicysnapshot}/controller.go (96%) rename pkg/controllers/{clusterschedulingpolicysnapshot => schedulingpolicysnapshot}/controller_integration_test.go (99%) rename pkg/controllers/{clusterschedulingpolicysnapshot => schedulingpolicysnapshot}/suite_test.go (97%) diff --git a/cmd/hubagent/workload/setup.go b/cmd/hubagent/workload/setup.go index 82394bf7e..6c722c7ec 100644 --- a/cmd/hubagent/workload/setup.go +++ b/cmd/hubagent/workload/setup.go @@ -35,16 +35,16 @@ import ( placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" fleetv1alpha1 "github.com/kubefleet-dev/kubefleet/apis/v1alpha1" "github.com/kubefleet-dev/kubefleet/cmd/hubagent/options" + "github.com/kubefleet-dev/kubefleet/pkg/controllers/bindingwatcher" "github.com/kubefleet-dev/kubefleet/pkg/controllers/clusterinventory/clusterprofile" - "github.com/kubefleet-dev/kubefleet/pkg/controllers/clusterresourcebindingwatcher" - "github.com/kubefleet-dev/kubefleet/pkg/controllers/clusterresourceplacement" "github.com/kubefleet-dev/kubefleet/pkg/controllers/clusterresourceplacementeviction" - "github.com/kubefleet-dev/kubefleet/pkg/controllers/clusterresourceplacementwatcher" - "github.com/kubefleet-dev/kubefleet/pkg/controllers/clusterschedulingpolicysnapshot" "github.com/kubefleet-dev/kubefleet/pkg/controllers/memberclusterplacement" "github.com/kubefleet-dev/kubefleet/pkg/controllers/overrider" + "github.com/kubefleet-dev/kubefleet/pkg/controllers/placement" + "github.com/kubefleet-dev/kubefleet/pkg/controllers/placementwatcher" "github.com/kubefleet-dev/kubefleet/pkg/controllers/resourcechange" "github.com/kubefleet-dev/kubefleet/pkg/controllers/rollout" + "github.com/kubefleet-dev/kubefleet/pkg/controllers/schedulingpolicysnapshot" "github.com/kubefleet-dev/kubefleet/pkg/controllers/updaterun" "github.com/kubefleet-dev/kubefleet/pkg/controllers/workgenerator" "github.com/kubefleet-dev/kubefleet/pkg/resourcewatcher" @@ -68,6 +68,7 @@ const ( crpControllerV1Alpha1Name = crpControllerName + "-v1alpha1" crpControllerV1Beta1Name = crpControllerName + "-v1beta1" rpControllerName = "resource-placement-controller" + placementControllerName = "placement-controller" resourceChangeControllerName = "resource-change-controller" mcPlacementControllerName = "memberCluster-placement-controller" @@ -159,10 +160,10 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, validator.ResourceInformer = dynamicInformerManager // webhook needs this to check resource scope validator.RestMapper = mgr.GetRESTMapper() // webhook needs this to validate GVK of resource selector - // Set up a custom controller to reconcile cluster resource placement - crpc := &clusterresourceplacement.Reconciler{ + // Set up a custom controller to reconcile placement objects + pc := &placement.Reconciler{ Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(crpControllerName), + Recorder: mgr.GetEventRecorderFor(placementControllerName), RestMapper: mgr.GetRESTMapper(), InformerManager: dynamicInformerManager, ResourceConfig: resourceConfig, @@ -186,7 +187,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } } klog.Info("Setting up clusterResourcePlacement v1alpha1 controller") - clusterResourcePlacementControllerV1Alpha1 = controller.NewController(crpControllerV1Alpha1Name, controller.NamespaceKeyFunc, crpc.ReconcileV1Alpha1, rateLimiter) + clusterResourcePlacementControllerV1Alpha1 = controller.NewController(crpControllerV1Alpha1Name, controller.NamespaceKeyFunc, pc.ReconcileV1Alpha1, rateLimiter) klog.Info("Setting up member cluster change controller") mcp := &memberclusterplacement.Reconciler{ InformerManager: dynamicInformerManager, @@ -203,9 +204,9 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } } klog.Info("Setting up clusterResourcePlacement v1beta1 controller") - clusterResourcePlacementControllerV1Beta1 = controller.NewController(crpControllerV1Beta1Name, controller.NamespaceKeyFunc, crpc.Reconcile, rateLimiter) + clusterResourcePlacementControllerV1Beta1 = controller.NewController(crpControllerV1Beta1Name, controller.NamespaceKeyFunc, pc.Reconcile, rateLimiter) klog.Info("Setting up clusterResourcePlacement watcher") - if err := (&clusterresourceplacementwatcher.Reconciler{ + if err := (&placementwatcher.Reconciler{ PlacementController: clusterResourcePlacementControllerV1Beta1, }).SetupWithManagerForClusterResourcePlacement(mgr); err != nil { klog.ErrorS(err, "Unable to set up the clusterResourcePlacement watcher") @@ -213,7 +214,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } klog.Info("Setting up clusterResourceBinding watcher") - if err := (&clusterresourcebindingwatcher.Reconciler{ + if err := (&bindingwatcher.Reconciler{ PlacementController: clusterResourcePlacementControllerV1Beta1, Client: mgr.GetClient(), }).SetupWithManagerForClusterResourceBinding(mgr); err != nil { @@ -222,7 +223,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } klog.Info("Setting up clusterSchedulingPolicySnapshot watcher") - if err := (&clusterschedulingpolicysnapshot.Reconciler{ + if err := (&schedulingpolicysnapshot.Reconciler{ Client: mgr.GetClient(), PlacementController: clusterResourcePlacementControllerV1Beta1, }).SetupWithManagerForClusterSchedulingPolicySnapshot(mgr); err != nil { @@ -238,9 +239,9 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } } klog.Info("Setting up resourcePlacement controller") - resourcePlacementController = controller.NewController(rpControllerName, controller.NamespaceKeyFunc, crpc.Reconcile, rateLimiter) + resourcePlacementController = controller.NewController(rpControllerName, controller.NamespaceKeyFunc, pc.Reconcile, rateLimiter) klog.Info("Setting up resourcePlacement watcher") - if err := (&clusterresourceplacementwatcher.Reconciler{ + if err := (&placementwatcher.Reconciler{ PlacementController: resourcePlacementController, }).SetupWithManagerForResourcePlacement(mgr); err != nil { klog.ErrorS(err, "Unable to set up the resourcePlacement watcher") @@ -248,7 +249,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } klog.Info("Setting up resourceBinding watcher") - if err := (&clusterresourcebindingwatcher.Reconciler{ + if err := (&bindingwatcher.Reconciler{ PlacementController: resourcePlacementController, Client: mgr.GetClient(), }).SetupWithManagerForResourceBinding(mgr); err != nil { @@ -257,7 +258,7 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, } klog.Info("Setting up schedulingPolicySnapshot watcher") - if err := (&clusterschedulingpolicysnapshot.Reconciler{ + if err := (&schedulingpolicysnapshot.Reconciler{ Client: mgr.GetClient(), PlacementController: resourcePlacementController, }).SetupWithManagerForSchedulingPolicySnapshot(mgr); err != nil { diff --git a/pkg/controllers/clusterresourcebindingwatcher/suite_test.go b/pkg/controllers/bindingwatcher/suite_test.go similarity index 97% rename from pkg/controllers/clusterresourcebindingwatcher/suite_test.go rename to pkg/controllers/bindingwatcher/suite_test.go index b1f38c7a4..592f1cccf 100644 --- a/pkg/controllers/clusterresourcebindingwatcher/suite_test.go +++ b/pkg/controllers/bindingwatcher/suite_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourcebindingwatcher +package bindingwatcher import ( "context" @@ -52,7 +52,7 @@ var ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "ClusterResourceBinding Watcher Suite") + RunSpecs(t, "Binding Watcher Suite") } var _ = BeforeSuite(func() { diff --git a/pkg/controllers/clusterresourcebindingwatcher/watcher.go b/pkg/controllers/bindingwatcher/watcher.go similarity index 97% rename from pkg/controllers/clusterresourcebindingwatcher/watcher.go rename to pkg/controllers/bindingwatcher/watcher.go index 32f848bf5..bc2486275 100644 --- a/pkg/controllers/clusterresourcebindingwatcher/watcher.go +++ b/pkg/controllers/bindingwatcher/watcher.go @@ -14,8 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package clusterresourcebindingwatcher features a controller to watch the clusterResourceBinding and resourceBinding changes. -package clusterresourcebindingwatcher +// Package bindingwatcher features a controller to watch the clusterResourceBinding and resourceBinding changes. +package bindingwatcher import ( "context" diff --git a/pkg/controllers/clusterresourcebindingwatcher/watcher_integration_test.go b/pkg/controllers/bindingwatcher/watcher_integration_test.go similarity index 99% rename from pkg/controllers/clusterresourcebindingwatcher/watcher_integration_test.go rename to pkg/controllers/bindingwatcher/watcher_integration_test.go index f24db8198..db169ce42 100644 --- a/pkg/controllers/clusterresourcebindingwatcher/watcher_integration_test.go +++ b/pkg/controllers/bindingwatcher/watcher_integration_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourcebindingwatcher +package bindingwatcher import ( "fmt" diff --git a/pkg/controllers/clusterresourceplacement/cluster_selector.go b/pkg/controllers/placement/cluster_selector.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/cluster_selector.go rename to pkg/controllers/placement/cluster_selector.go index 453bf86ab..c42e47e8d 100644 --- a/pkg/controllers/clusterresourceplacement/cluster_selector.go +++ b/pkg/controllers/placement/cluster_selector.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "fmt" diff --git a/pkg/controllers/clusterresourceplacement/cluster_selector_test.go b/pkg/controllers/placement/cluster_selector_test.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/cluster_selector_test.go rename to pkg/controllers/placement/cluster_selector_test.go index 6c17890e4..ce62dc72d 100644 --- a/pkg/controllers/clusterresourceplacement/cluster_selector_test.go +++ b/pkg/controllers/placement/cluster_selector_test.go @@ -1,4 +1,4 @@ -package clusterresourceplacement +package placement import ( "testing" diff --git a/pkg/controllers/clusterresourceplacement/controller.go b/pkg/controllers/placement/controller.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/controller.go rename to pkg/controllers/placement/controller.go index 63729682c..7f31fdb7c 100644 --- a/pkg/controllers/clusterresourceplacement/controller.go +++ b/pkg/controllers/placement/controller.go @@ -14,8 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package clusterresourceplacement features a controller to reconcile the clusterResourcePlacement changes. -package clusterresourceplacement +// Package placement features a controller to reconcile the clusterResourcePlacement or resourcePlacement changes. +package placement import ( "context" diff --git a/pkg/controllers/clusterresourceplacement/controller_integration_test.go b/pkg/controllers/placement/controller_integration_test.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/controller_integration_test.go rename to pkg/controllers/placement/controller_integration_test.go index 58993421d..b96fc80ae 100644 --- a/pkg/controllers/clusterresourceplacement/controller_integration_test.go +++ b/pkg/controllers/placement/controller_integration_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "fmt" diff --git a/pkg/controllers/clusterresourceplacement/controller_test.go b/pkg/controllers/placement/controller_test.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/controller_test.go rename to pkg/controllers/placement/controller_test.go index f86ff540a..3bbfef28d 100644 --- a/pkg/controllers/clusterresourceplacement/controller_test.go +++ b/pkg/controllers/placement/controller_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "context" diff --git a/pkg/controllers/clusterresourceplacement/placement_controllerv1alpha1.go b/pkg/controllers/placement/placement_controllerv1alpha1.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/placement_controllerv1alpha1.go rename to pkg/controllers/placement/placement_controllerv1alpha1.go index 1c48fe9a3..17cfc1c6b 100644 --- a/pkg/controllers/clusterresourceplacement/placement_controllerv1alpha1.go +++ b/pkg/controllers/placement/placement_controllerv1alpha1.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "context" diff --git a/pkg/controllers/clusterresourceplacement/placement_status.go b/pkg/controllers/placement/placement_status.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/placement_status.go rename to pkg/controllers/placement/placement_status.go index 95c89abbc..c2ea5642a 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status.go +++ b/pkg/controllers/placement/placement_status.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "context" diff --git a/pkg/controllers/clusterresourceplacement/placement_status_test.go b/pkg/controllers/placement/placement_status_test.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/placement_status_test.go rename to pkg/controllers/placement/placement_status_test.go index fc7078dc5..361af9a1f 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status_test.go +++ b/pkg/controllers/placement/placement_status_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "context" diff --git a/pkg/controllers/clusterresourceplacement/resource_selector.go b/pkg/controllers/placement/resource_selector.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/resource_selector.go rename to pkg/controllers/placement/resource_selector.go index a6fdd8de9..7feb7a516 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector.go +++ b/pkg/controllers/placement/resource_selector.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "fmt" diff --git a/pkg/controllers/clusterresourceplacement/resource_selector_test.go b/pkg/controllers/placement/resource_selector_test.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/resource_selector_test.go rename to pkg/controllers/placement/resource_selector_test.go index 428d7b58f..bc1628f40 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector_test.go +++ b/pkg/controllers/placement/resource_selector_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "errors" diff --git a/pkg/controllers/clusterresourceplacement/suite_test.go b/pkg/controllers/placement/suite_test.go similarity index 91% rename from pkg/controllers/clusterresourceplacement/suite_test.go rename to pkg/controllers/placement/suite_test.go index 50c2af260..a3e50afc9 100644 --- a/pkg/controllers/clusterresourceplacement/suite_test.go +++ b/pkg/controllers/placement/suite_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "context" @@ -40,9 +40,9 @@ import ( placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/cmd/hubagent/options" - "github.com/kubefleet-dev/kubefleet/pkg/controllers/clusterresourcebindingwatcher" - "github.com/kubefleet-dev/kubefleet/pkg/controllers/clusterresourceplacementwatcher" - "github.com/kubefleet-dev/kubefleet/pkg/controllers/clusterschedulingpolicysnapshot" + "github.com/kubefleet-dev/kubefleet/pkg/controllers/bindingwatcher" + "github.com/kubefleet-dev/kubefleet/pkg/controllers/placementwatcher" + "github.com/kubefleet-dev/kubefleet/pkg/controllers/schedulingpolicysnapshot" "github.com/kubefleet-dev/kubefleet/pkg/metrics" "github.com/kubefleet-dev/kubefleet/pkg/utils" "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" @@ -65,7 +65,7 @@ const ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "ClusterResourcePlacement Controller Suite") + RunSpecs(t, "Placement Controller Suite") } var _ = BeforeSuite(func() { @@ -135,18 +135,18 @@ var _ = BeforeSuite(func() { crpController := controller.NewController(controllerName, controller.NamespaceKeyFunc, reconciler.Reconcile, rateLimiter) // Set up the watchers - err = (&clusterschedulingpolicysnapshot.Reconciler{ + err = (&schedulingpolicysnapshot.Reconciler{ Client: mgr.GetClient(), PlacementController: crpController, }).SetupWithManagerForClusterSchedulingPolicySnapshot(mgr) Expect(err).Should(Succeed(), "failed to create clusterSchedulingPolicySnapshot watcher") - err = (&clusterresourceplacementwatcher.Reconciler{ + err = (&placementwatcher.Reconciler{ PlacementController: crpController, }).SetupWithManagerForClusterResourcePlacement(mgr) Expect(err).Should(Succeed(), "failed to create clusterResourcePlacement watcher") - err = (&clusterresourcebindingwatcher.Reconciler{ + err = (&bindingwatcher.Reconciler{ Client: mgr.GetClient(), PlacementController: crpController, }).SetupWithManagerForClusterResourceBinding(mgr) diff --git a/pkg/controllers/clusterresourceplacement/work_propagation.go b/pkg/controllers/placement/work_propagation.go similarity index 99% rename from pkg/controllers/clusterresourceplacement/work_propagation.go rename to pkg/controllers/placement/work_propagation.go index eee3a53cf..e6d51598a 100644 --- a/pkg/controllers/clusterresourceplacement/work_propagation.go +++ b/pkg/controllers/placement/work_propagation.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacement +package placement import ( "context" diff --git a/pkg/controllers/clusterresourceplacementwatcher/suite_test.go b/pkg/controllers/placementwatcher/suite_test.go similarity index 97% rename from pkg/controllers/clusterresourceplacementwatcher/suite_test.go rename to pkg/controllers/placementwatcher/suite_test.go index 0a22bfcb4..f3481344f 100644 --- a/pkg/controllers/clusterresourceplacementwatcher/suite_test.go +++ b/pkg/controllers/placementwatcher/suite_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacementwatcher +package placementwatcher import ( "context" @@ -53,7 +53,7 @@ var ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "ClusterResourcePlacement Watcher Suite") + RunSpecs(t, "Placement Watcher Suite") } var _ = BeforeSuite(func() { diff --git a/pkg/controllers/clusterresourceplacementwatcher/watcher.go b/pkg/controllers/placementwatcher/watcher.go similarity index 93% rename from pkg/controllers/clusterresourceplacementwatcher/watcher.go rename to pkg/controllers/placementwatcher/watcher.go index 5bdc2a8c7..61e5c90ed 100644 --- a/pkg/controllers/clusterresourceplacementwatcher/watcher.go +++ b/pkg/controllers/placementwatcher/watcher.go @@ -14,8 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package clusterresourceplacementwatcher features a controller to watch the clusterResourcePlacement and resourcePlacement changes. -package clusterresourceplacementwatcher +// Package placementwatcher features a controller to watch the clusterResourcePlacement and resourcePlacement changes. +package placementwatcher import ( "context" diff --git a/pkg/controllers/clusterresourceplacementwatcher/watcher_integration_test.go b/pkg/controllers/placementwatcher/watcher_integration_test.go similarity index 99% rename from pkg/controllers/clusterresourceplacementwatcher/watcher_integration_test.go rename to pkg/controllers/placementwatcher/watcher_integration_test.go index 01bda4f34..3cd97ce7d 100644 --- a/pkg/controllers/clusterresourceplacementwatcher/watcher_integration_test.go +++ b/pkg/controllers/placementwatcher/watcher_integration_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clusterresourceplacementwatcher +package placementwatcher import ( "time" diff --git a/pkg/controllers/clusterschedulingpolicysnapshot/controller.go b/pkg/controllers/schedulingpolicysnapshot/controller.go similarity index 96% rename from pkg/controllers/clusterschedulingpolicysnapshot/controller.go rename to pkg/controllers/schedulingpolicysnapshot/controller.go index fb49824ee..1833aa6df 100644 --- a/pkg/controllers/clusterschedulingpolicysnapshot/controller.go +++ b/pkg/controllers/schedulingpolicysnapshot/controller.go @@ -14,8 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package clusterschedulingpolicysnapshot features a controller to reconcile the clusterSchedulingPolicySnapshot object. -package clusterschedulingpolicysnapshot +// Package schedulingpolicysnapshot features a controller to reconcile the clusterSchedulingPolicySnapshot or the schedulingPolicySnapshot objects. +package schedulingpolicysnapshot import ( "context" diff --git a/pkg/controllers/clusterschedulingpolicysnapshot/controller_integration_test.go b/pkg/controllers/schedulingpolicysnapshot/controller_integration_test.go similarity index 99% rename from pkg/controllers/clusterschedulingpolicysnapshot/controller_integration_test.go rename to pkg/controllers/schedulingpolicysnapshot/controller_integration_test.go index defa98aa6..6e59c68ba 100644 --- a/pkg/controllers/clusterschedulingpolicysnapshot/controller_integration_test.go +++ b/pkg/controllers/schedulingpolicysnapshot/controller_integration_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterschedulingpolicysnapshot +package schedulingpolicysnapshot import ( "time" diff --git a/pkg/controllers/clusterschedulingpolicysnapshot/suite_test.go b/pkg/controllers/schedulingpolicysnapshot/suite_test.go similarity index 97% rename from pkg/controllers/clusterschedulingpolicysnapshot/suite_test.go rename to pkg/controllers/schedulingpolicysnapshot/suite_test.go index 8c20182fb..84cb73b04 100644 --- a/pkg/controllers/clusterschedulingpolicysnapshot/suite_test.go +++ b/pkg/controllers/schedulingpolicysnapshot/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clusterschedulingpolicysnapshot +package schedulingpolicysnapshot import ( "context" @@ -54,7 +54,7 @@ var ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "ClusterSchedulingPolicySnapshot Controller Suite") + RunSpecs(t, "SchedulingPolicySnapshot Controller Suite") } var _ = BeforeSuite(func() { From f6245daf06c0b43f5f852ece38a69b1447272234 Mon Sep 17 00:00:00 2001 From: Zhiying Lin <54013513+zhiying-lin@users.noreply.github.com> Date: Fri, 22 Aug 2025 04:24:30 +0800 Subject: [PATCH 24/38] test: add rp pickAll tests (#201) --- test/e2e/resource_placement_pickall_test.go | 806 +++++++++++++++++++- test/e2e/utils_test.go | 4 + 2 files changed, 789 insertions(+), 21 deletions(-) diff --git a/test/e2e/resource_placement_pickall_test.go b/test/e2e/resource_placement_pickall_test.go index 34cc4d308..a035f71ac 100644 --- a/test/e2e/resource_placement_pickall_test.go +++ b/test/e2e/resource_placement_pickall_test.go @@ -26,29 +26,152 @@ import ( "k8s.io/utils/ptr" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/propertyprovider" + "github.com/kubefleet-dev/kubefleet/pkg/propertyprovider/azure" + "github.com/kubefleet-dev/kubefleet/test/e2e/framework" ) var _ = Describe("placing namespaced scoped resources using a RP with PickAll policy", func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + + BeforeEach(OncePerOrdered, func() { + // Create the resources. + createWorkResources() + + // Create the CRP with Namespace-only selector. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + By("should update CRP status as expected") + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterEach(OncePerOrdered, func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, allMemberClusters) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("with no placement policy specified", Ordered, func() { + It("creating the RP should succeed", func() { + // Create the RP in the same namespace selecting namespaced resources with no placement policy. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + }) + Context("with no affinities specified", Ordered, func() { - crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) - rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + It("creating the RP should succeed", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) - BeforeAll(func() { - // Create the resources. - createWorkResources() + It("should place the resources on all member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + }) - // Create the CRP with Namespace-only selector. - crp := &placementv1beta1.ClusterResourcePlacement{ + Context("with affinities, label selector only, updated", Ordered, func() { + It("creating the RP should succeed", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. + Name: rpName, + Namespace: appNamespace().Name, Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: namespaceOnlySelector(), + ResourceSelectors: configMapSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + envProd, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -58,8 +181,67 @@ var _ = Describe("placing namespaced scoped resources using a RP with PickAll po }, }, } - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should place resources on matching clusters", func() { + // Verify that resources have been placed on the matching clusters. + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1EastProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on matching clusters") + }) + + It("can update the RP", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, rp); err != nil { + return err + } + rp.Spec.Policy.Affinity = &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionWest, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + envProd, + }, + }, + }, + }, + }, + }, + }, + }, + } + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster3WestProdName}, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on matching clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster3WestProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on matching clusters") + }) + + It("should remove resources on previously matched clusters", func() { + checkIfRemovedConfigMapFromMemberClusters([]*framework.Cluster{memberCluster1EastProd}) + }) + }) + + Context("with affinities, label selector only, no matching clusters", Ordered, func() { + It("creating the RP should succeed", func() { // Create the RP in the same namespace selecting namespaced resources. rp := &placementv1beta1.ResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -71,6 +253,30 @@ var _ = Describe("placing namespaced scoped resources using a RP with PickAll po ResourceSelectors: configMapSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionWest, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + envCanary, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -83,21 +289,579 @@ var _ = Describe("placing namespaced scoped resources using a RP with PickAll po Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") }) - It("should update CRP status as expected", func() { - crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") - Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), nil, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should not place resources on any cluster", checkIfRemovedConfigMapFromAllMemberClusters) + }) + + Context("with affinities, metric selector only", Ordered, func() { + It("creating the RP should succeed", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "3", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") }) It("should update RP status as expected", func() { - rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") - Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName, memberCluster3WestProdName}, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") }) - It("should place the resources on all member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + It("should place resources on matching clusters", func() { + targetClusters := []*framework.Cluster{memberCluster2EastCanary, memberCluster3WestProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("with affinities, metric selector only, updated", Ordered, func() { + It("creating the RP should succeed", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "3", + }, + }, + { + Name: propertyprovider.TotalCPUCapacityProperty, + Operator: placementv1beta1.PropertySelectorLessThan, + Values: []string{ + "10000", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) - AfterAll(func() { - ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, allMemberClusters) - ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName, memberCluster3WestProdName}, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") }) + + It("should place resources on matching clusters", func() { + targetClusters := []*framework.Cluster{memberCluster2EastCanary, memberCluster3WestProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + + It("can update the RP", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, rp); err != nil { + return err + } + + rp.Spec.Policy.Affinity = &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "3", + }, + }, + { + Name: propertyprovider.TotalCPUCapacityProperty, + Operator: placementv1beta1.PropertySelectorLessThan, + Values: []string{ + "10000", + }, + }, + }, + }, + }, + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorEqualTo, + Values: []string{ + "4", + }, + }, + { + Name: propertyprovider.AvailableMemoryCapacityProperty, + Operator: placementv1beta1.PropertySelectorNotEqualTo, + Values: []string{ + "20000Gi", + }, + }, + }, + }, + }, + }, + }, + }, + } + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName, memberCluster3WestProdName}, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on matching clusters", func() { + targetClusters := []*framework.Cluster{memberCluster2EastCanary, memberCluster3WestProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + }) + + Context("with affinities, metric selector only, no matching clusters", Ordered, func() { + It("creating the RP should succeed", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: azure.PerCPUCoreCostProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "0.01", + }, + }, + { + Name: propertyprovider.AllocatableCPUCapacityProperty, + Operator: placementv1beta1.PropertySelectorGreaterThan, + Values: []string{ + "10000", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), nil, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should not place resources on any cluster", checkIfRemovedConfigMapFromAllMemberClusters) + }) + + Context("with affinities, label and metric selectors", Ordered, func() { + It("creating the RP should succeed", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "3", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName}, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on matching clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster2EastCanary) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on matching clusters") + }) + }) + + Context("with affinities, label and metric selectors, updated", Ordered, func() { + It("creating the RP should succeed", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.AllocatableCPUCapacityProperty, + Operator: placementv1beta1.PropertySelectorLessThanOrEqualTo, + Values: []string{ + "10000", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName, memberCluster2EastCanaryName}, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on matching clusters", func() { + targetClusters := []*framework.Cluster{memberCluster1EastProd, memberCluster2EastCanary} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + } + }) + + It("can update the RP", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, rp); err != nil { + return err + } + + rp.Spec.Policy.Affinity = &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + envCanary, + }, + }, + }, + }, + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.AllocatableMemoryCapacityProperty, + Operator: placementv1beta1.PropertySelectorLessThan, + Values: []string{ + "1Ki", + }, + }, + }, + }, + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabelName, + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{ + regionWest, + }, + }, + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + envProd, + }, + }, + }, + }, + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: propertyprovider.NodeCountProperty, + Operator: placementv1beta1.PropertySelectorEqualTo, + Values: []string{ + "2", + }, + }, + { + Name: propertyprovider.TotalMemoryCapacityProperty, + Operator: placementv1beta1.PropertySelectorGreaterThanOrEqualTo, + Values: []string{ + "1Ki", + }, + }, + }, + }, + }, + }, + }, + }, + } + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName}, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on matching clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1EastProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on matching clusters") + }) + + It("should remove resources on previously matched clusters", func() { + checkIfRemovedConfigMapFromMemberClusters([]*framework.Cluster{memberCluster2EastCanary}) + }) + }) + + Context("with affinities, label and metric selectors, no matching clusters", Ordered, func() { + It("creating the RP should succeed", func() { + if !isAzurePropertyProviderEnabled { + Skip("Skipping this test spec as Azure property provider is not enabled in the test environment") + } + + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + PropertySelector: &placementv1beta1.PropertySelector{ + MatchExpressions: []placementv1beta1.PropertySelectorRequirement{ + { + Name: azure.PerGBMemoryCostProperty, + Operator: placementv1beta1.PropertySelectorEqualTo, + Values: []string{ + "0", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), nil, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should not place resources on any cluster", checkIfRemovedConfigMapFromAllMemberClusters) }) }) diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index e3e926aa2..e03a97c24 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -825,6 +825,10 @@ func checkIfRemovedWorkResourcesFromMemberClusters(clusters []*framework.Cluster } } +func checkIfRemovedConfigMapFromAllMemberClusters() { + checkIfRemovedConfigMapFromMemberClusters(allMemberClusters) +} + func checkIfRemovedWorkResourcesFromAllMemberClustersConsistently() { checkIfRemovedWorkResourcesFromMemberClustersConsistently(allMemberClusters) } From ca9b195d29aa629b8ebd82fe53db7c71d136799b Mon Sep 17 00:00:00 2001 From: Audra Stump <71152145+audrastump@users.noreply.github.com> Date: Thu, 21 Aug 2025 17:01:18 -0700 Subject: [PATCH 25/38] feat: Loosening webhook to allow for kubernetes-fleet.io/ labels to be modified (#210) --- apis/placement/v1beta1/binding_types.go | 2 +- .../v1beta1/clusterresourceplacement_types.go | 6 +- apis/placement/v1beta1/commons.go | 50 ++++----- .../v1beta1/overridesnapshot_types.go | 6 +- .../placement/v1beta1/policysnapshot_types.go | 4 +- .../v1beta1/resourcesnapshot_types.go | 12 +-- apis/placement/v1beta1/work_types.go | 4 +- pkg/webhook/validation/uservalidation.go | 37 +++++-- pkg/webhook/validation/uservalidation_test.go | 100 ++++++++++++++++++ 9 files changed, 171 insertions(+), 50 deletions(-) diff --git a/apis/placement/v1beta1/binding_types.go b/apis/placement/v1beta1/binding_types.go index ca1e7a5af..b3d76d7aa 100644 --- a/apis/placement/v1beta1/binding_types.go +++ b/apis/placement/v1beta1/binding_types.go @@ -28,7 +28,7 @@ const ( // SchedulerBindingCleanupFinalizer is a finalizer added to bindings to ensure we can look up the // corresponding CRP name for deleting bindings to trigger a new scheduling cycle. // TODO: migrate the finalizer to the new name "scheduler-binding-cleanup" in the future. - SchedulerBindingCleanupFinalizer = fleetPrefix + "scheduler-crb-cleanup" + SchedulerBindingCleanupFinalizer = FleetPrefix + "scheduler-crb-cleanup" ) // make sure the BindingObj and BindingObjList interfaces are implemented by the diff --git a/apis/placement/v1beta1/clusterresourceplacement_types.go b/apis/placement/v1beta1/clusterresourceplacement_types.go index 86bbdeb0c..8181aae11 100644 --- a/apis/placement/v1beta1/clusterresourceplacement_types.go +++ b/apis/placement/v1beta1/clusterresourceplacement_types.go @@ -29,11 +29,11 @@ import ( const ( // PlacementCleanupFinalizer is a finalizer added by the placement controller to all placement objects, to make sure // that the placement controller can react to placement object deletions if necessary. - PlacementCleanupFinalizer = fleetPrefix + "crp-cleanup" + PlacementCleanupFinalizer = FleetPrefix + "crp-cleanup" // SchedulerCleanupFinalizer is a finalizer added by the scheduler to placement objects, to make sure // that all bindings derived from a placement object can be cleaned up after the placement object is deleted. - SchedulerCleanupFinalizer = fleetPrefix + "scheduler-cleanup" + SchedulerCleanupFinalizer = FleetPrefix + "scheduler-cleanup" ) // make sure the PlacementObj and PlacementObjList interfaces are implemented by the @@ -1521,7 +1521,7 @@ func (m *ClusterResourcePlacement) SetPlacementStatus(status PlacementStatus) { const ( // ResourcePlacementCleanupFinalizer is a finalizer added by the RP controller to all RPs, to make sure // that the RP controller can react to RP deletions if necessary. - ResourcePlacementCleanupFinalizer = fleetPrefix + "rp-cleanup" + ResourcePlacementCleanupFinalizer = FleetPrefix + "rp-cleanup" ) // +genclient diff --git a/apis/placement/v1beta1/commons.go b/apis/placement/v1beta1/commons.go index 49bc1683a..479217dcb 100644 --- a/apis/placement/v1beta1/commons.go +++ b/apis/placement/v1beta1/commons.go @@ -58,32 +58,32 @@ const ( ) const ( - // fleetPrefix is the prefix used for official fleet labels/annotations. + // FleetPrefix is the prefix used for official fleet labels/annotations. // Unprefixed labels/annotations are reserved for end-users // See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#label-selector-and-annotation-conventions - fleetPrefix = "kubernetes-fleet.io/" + FleetPrefix = "kubernetes-fleet.io/" // MemberClusterFinalizer is used to make sure that we handle gc of all the member cluster resources on the hub cluster. - MemberClusterFinalizer = fleetPrefix + "membercluster-finalizer" + MemberClusterFinalizer = FleetPrefix + "membercluster-finalizer" // WorkFinalizer is used by the work generator to make sure that the binding is not deleted until the work objects // it generates are all deleted, or used by the work controller to make sure the work has been deleted in the member // cluster. - WorkFinalizer = fleetPrefix + "work-cleanup" + WorkFinalizer = FleetPrefix + "work-cleanup" // ClusterResourcePlacementStatusCleanupFinalizer is a finalizer added by the controller to all ClusterResourcePlacementStatus objects, to make sure // that the controller can react to ClusterResourcePlacementStatus deletions if necessary. - ClusterResourcePlacementStatusCleanupFinalizer = fleetPrefix + "cluster-resource-placement-status-cleanup" + ClusterResourcePlacementStatusCleanupFinalizer = FleetPrefix + "cluster-resource-placement-status-cleanup" // PlacementTrackingLabel points to the placement that creates this resource binding. // TODO: migrate the label content to "parent-placement" to work with both the PR and CRP - PlacementTrackingLabel = fleetPrefix + "parent-CRP" + PlacementTrackingLabel = FleetPrefix + "parent-CRP" // IsLatestSnapshotLabel indicates if the snapshot is the latest one. - IsLatestSnapshotLabel = fleetPrefix + "is-latest-snapshot" + IsLatestSnapshotLabel = FleetPrefix + "is-latest-snapshot" // FleetResourceLabelKey indicates that the resource is a fleet resource. - FleetResourceLabelKey = fleetPrefix + "is-fleet-resource" + FleetResourceLabelKey = FleetPrefix + "is-fleet-resource" // FirstWorkNameFmt is the format of the name of the work generated with the first resource snapshot. // The name of the first work is {crpName}-work. @@ -105,59 +105,59 @@ const ( WorkNameWithEnvelopeCRFmt = "%s-envelope-%s" // ParentClusterResourceOverrideSnapshotHashAnnotation is the annotation to work that contains the hash of the parent cluster resource override snapshot list. - ParentClusterResourceOverrideSnapshotHashAnnotation = fleetPrefix + "parent-cluster-resource-override-snapshot-hash" + ParentClusterResourceOverrideSnapshotHashAnnotation = FleetPrefix + "parent-cluster-resource-override-snapshot-hash" // ParentResourceOverrideSnapshotHashAnnotation is the annotation to work that contains the hash of the parent resource override snapshot list. - ParentResourceOverrideSnapshotHashAnnotation = fleetPrefix + "parent-resource-override-snapshot-hash" + ParentResourceOverrideSnapshotHashAnnotation = FleetPrefix + "parent-resource-override-snapshot-hash" // ParentResourceSnapshotNameAnnotation is the annotation applied to work that contains the name of the master resource snapshot that generates the work. - ParentResourceSnapshotNameAnnotation = fleetPrefix + "parent-resource-snapshot-name" + ParentResourceSnapshotNameAnnotation = FleetPrefix + "parent-resource-snapshot-name" // ParentResourceSnapshotIndexLabel is the label applied to work that contains the index of the resource snapshot that generates the work. - ParentResourceSnapshotIndexLabel = fleetPrefix + "parent-resource-snapshot-index" + ParentResourceSnapshotIndexLabel = FleetPrefix + "parent-resource-snapshot-index" // ParentBindingLabel is the label applied to work that contains the name of the binding that generates the work. - ParentBindingLabel = fleetPrefix + "parent-resource-binding" + ParentBindingLabel = FleetPrefix + "parent-resource-binding" // ParentNamespaceLabel is the label applied to work that contains the namespace of the binding that generates the work. - ParentNamespaceLabel = fleetPrefix + "parent-placement-namespace" + ParentNamespaceLabel = FleetPrefix + "parent-placement-namespace" // CRPGenerationAnnotation indicates the generation of the placement from which an object is derived or last updated. // TODO: rename this variable - CRPGenerationAnnotation = fleetPrefix + "CRP-generation" + CRPGenerationAnnotation = FleetPrefix + "CRP-generation" // EnvelopeConfigMapAnnotation indicates the configmap is an envelope configmap containing resources we need to apply to the member cluster instead of the configMap itself. - EnvelopeConfigMapAnnotation = fleetPrefix + "envelope-configmap" + EnvelopeConfigMapAnnotation = FleetPrefix + "envelope-configmap" // EnvelopeTypeLabel marks the work object as generated from an envelope object. // The value of the annotation is the type of the envelope object. - EnvelopeTypeLabel = fleetPrefix + "envelope-work" + EnvelopeTypeLabel = FleetPrefix + "envelope-work" // EnvelopeNamespaceLabel contains the namespace of the envelope object that the work is generated from. - EnvelopeNamespaceLabel = fleetPrefix + "envelope-namespace" + EnvelopeNamespaceLabel = FleetPrefix + "envelope-namespace" // EnvelopeNameLabel contains the name of the envelope object that the work is generated from. - EnvelopeNameLabel = fleetPrefix + "envelope-name" + EnvelopeNameLabel = FleetPrefix + "envelope-name" // PreviousBindingStateAnnotation records the previous state of a binding. // This is used to remember if an "unscheduled" binding was moved from a "bound" state or a "scheduled" state. - PreviousBindingStateAnnotation = fleetPrefix + "previous-binding-state" + PreviousBindingStateAnnotation = FleetPrefix + "previous-binding-state" // ClusterStagedUpdateRunFinalizer is used by the ClusterStagedUpdateRun controller to make sure that the ClusterStagedUpdateRun // object is not deleted until all its dependent resources are deleted. - ClusterStagedUpdateRunFinalizer = fleetPrefix + "stagedupdaterun-finalizer" + ClusterStagedUpdateRunFinalizer = FleetPrefix + "stagedupdaterun-finalizer" // TargetUpdateRunLabel indicates the target update run on a staged run related object. - TargetUpdateRunLabel = fleetPrefix + "targetupdaterun" + TargetUpdateRunLabel = FleetPrefix + "targetupdaterun" // UpdateRunDeleteStageName is the name of delete stage in the staged update run. - UpdateRunDeleteStageName = fleetPrefix + "deleteStage" + UpdateRunDeleteStageName = FleetPrefix + "deleteStage" // IsLatestUpdateRunApprovalLabel indicates if the approval is the latest approval on a staged run. - IsLatestUpdateRunApprovalLabel = fleetPrefix + "isLatestUpdateRunApproval" + IsLatestUpdateRunApprovalLabel = FleetPrefix + "isLatestUpdateRunApproval" // TargetUpdatingStageNameLabel indicates the updating stage name on a staged run related object. - TargetUpdatingStageNameLabel = fleetPrefix + "targetUpdatingStage" + TargetUpdatingStageNameLabel = FleetPrefix + "targetUpdatingStage" // ApprovalTaskNameFmt is the format of the approval task name. ApprovalTaskNameFmt = "%s-%s" diff --git a/apis/placement/v1beta1/overridesnapshot_types.go b/apis/placement/v1beta1/overridesnapshot_types.go index aec163412..00dc8b470 100644 --- a/apis/placement/v1beta1/overridesnapshot_types.go +++ b/apis/placement/v1beta1/overridesnapshot_types.go @@ -21,17 +21,17 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" const ( // OverrideIndexLabel is the label that indicate the policy snapshot index of a cluster policy. - OverrideIndexLabel = fleetPrefix + "override-index" + OverrideIndexLabel = FleetPrefix + "override-index" // OverrideSnapshotNameFmt is clusterResourceOverrideSnapshot name format: {CROName}-{OverrideSnapshotIndex}. OverrideSnapshotNameFmt = "%s-%d" // OverrideTrackingLabel is the label that points to the cluster resource override that creates a resource snapshot. - OverrideTrackingLabel = fleetPrefix + "parent-resource-override" + OverrideTrackingLabel = FleetPrefix + "parent-resource-override" // OverrideFinalizer is a finalizer added by the override controllers to all override, to make sure // that the override controller can react to override deletions if necessary. - OverrideFinalizer = fleetPrefix + "override-cleanup" + OverrideFinalizer = FleetPrefix + "override-cleanup" ) // +genclient diff --git a/apis/placement/v1beta1/policysnapshot_types.go b/apis/placement/v1beta1/policysnapshot_types.go index 0d33112c7..5bfaff5f2 100644 --- a/apis/placement/v1beta1/policysnapshot_types.go +++ b/apis/placement/v1beta1/policysnapshot_types.go @@ -26,13 +26,13 @@ import ( const ( // PolicyIndexLabel is the label that indicate the policy snapshot index of a cluster policy. - PolicyIndexLabel = fleetPrefix + "policy-index" + PolicyIndexLabel = FleetPrefix + "policy-index" // PolicySnapshotNameFmt is clusterPolicySnapshot name format: {CRPName}-{PolicySnapshotIndex}. PolicySnapshotNameFmt = "%s-%d" // NumberOfClustersAnnotation is the annotation that indicates how many clusters should be selected for selectN placement type. - NumberOfClustersAnnotation = fleetPrefix + "number-of-clusters" + NumberOfClustersAnnotation = FleetPrefix + "number-of-clusters" ) // make sure the PolicySnapshotObj and PolicySnapshotList interfaces are implemented by the diff --git a/apis/placement/v1beta1/resourcesnapshot_types.go b/apis/placement/v1beta1/resourcesnapshot_types.go index 3e39894de..4820e63fd 100644 --- a/apis/placement/v1beta1/resourcesnapshot_types.go +++ b/apis/placement/v1beta1/resourcesnapshot_types.go @@ -27,23 +27,23 @@ import ( const ( // ResourceIndexLabel is the label that indicate the resource snapshot index of a cluster resource snapshot. - ResourceIndexLabel = fleetPrefix + "resource-index" + ResourceIndexLabel = FleetPrefix + "resource-index" // ResourceGroupHashAnnotation is the annotation that contains the value of the sha-256 hash // value of all the snapshots belong to the same snapshot index. - ResourceGroupHashAnnotation = fleetPrefix + "resource-hash" + ResourceGroupHashAnnotation = FleetPrefix + "resource-hash" // NumberOfEnvelopedObjectsAnnotation is the annotation that contains the number of the enveloped objects in the resource snapshot group. - NumberOfEnvelopedObjectsAnnotation = fleetPrefix + "number-of-enveloped-object" + NumberOfEnvelopedObjectsAnnotation = FleetPrefix + "number-of-enveloped-object" // NumberOfResourceSnapshotsAnnotation is the annotation that contains the total number of resource snapshots. - NumberOfResourceSnapshotsAnnotation = fleetPrefix + "number-of-resource-snapshots" + NumberOfResourceSnapshotsAnnotation = FleetPrefix + "number-of-resource-snapshots" // SubindexOfResourceSnapshotAnnotation is the annotation to store the subindex of resource snapshot in the group. - SubindexOfResourceSnapshotAnnotation = fleetPrefix + "subindex-of-resource-snapshot" + SubindexOfResourceSnapshotAnnotation = FleetPrefix + "subindex-of-resource-snapshot" // NextResourceSnapshotCandidateDetectionTimeAnnotation is the annotation to store the time of next resourceSnapshot candidate detected by the controller. - NextResourceSnapshotCandidateDetectionTimeAnnotation = fleetPrefix + "next-resource-snapshot-candidate-detection-time" + NextResourceSnapshotCandidateDetectionTimeAnnotation = FleetPrefix + "next-resource-snapshot-candidate-detection-time" // ResourceSnapshotNameFmt is resourcePolicySnapshot name format: {CRPName}-{resourceIndex}-snapshot. ResourceSnapshotNameFmt = "%s-%d-snapshot" diff --git a/apis/placement/v1beta1/work_types.go b/apis/placement/v1beta1/work_types.go index d1339ac7b..4781e72ff 100644 --- a/apis/placement/v1beta1/work_types.go +++ b/apis/placement/v1beta1/work_types.go @@ -40,10 +40,10 @@ import ( // The following definitions are originally declared in the controllers/workv1alpha1/manager.go file. const ( // ManifestHashAnnotation is the annotation that indicates whether the spec of the object has been changed or not. - ManifestHashAnnotation = fleetPrefix + "spec-hash" + ManifestHashAnnotation = FleetPrefix + "spec-hash" // LastAppliedConfigAnnotation is to record the last applied configuration on the object. - LastAppliedConfigAnnotation = fleetPrefix + "last-applied-configuration" + LastAppliedConfigAnnotation = FleetPrefix + "last-applied-configuration" // WorkConditionTypeApplied represents workload in Work is applied successfully on the spoke cluster. WorkConditionTypeApplied = "Applied" diff --git a/pkg/webhook/validation/uservalidation.go b/pkg/webhook/validation/uservalidation.go index 334d9b716..2bf7af72f 100644 --- a/pkg/webhook/validation/uservalidation.go +++ b/pkg/webhook/validation/uservalidation.go @@ -18,6 +18,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" fleetv1alpha1 "github.com/kubefleet-dev/kubefleet/apis/v1alpha1" "github.com/kubefleet-dev/kubefleet/pkg/utils" ) @@ -109,14 +110,11 @@ func ValidateFleetMemberClusterUpdate(currentMC, oldMC clusterv1beta1.MemberClus return admission.Denied(err.Error()) } - // Users are no longer allowed to modify labels of fleet member cluster through webhook. - // This will be disabled until member labels are accessible through CLI - if denyModifyMemberClusterLabels { - isLabelUpdated := isMapFieldUpdated(currentMC.GetLabels(), oldMC.GetLabels()) - if isLabelUpdated && !isUserInGroup(userInfo, mastersGroup) { - klog.V(2).InfoS(DeniedModifyMemberClusterLabels, "user", userInfo.Username, "groups", userInfo.Groups, "operation", req.Operation, "GVK", req.RequestKind, "subResource", req.SubResource, "namespacedName", namespacedName) - return admission.Denied(DeniedModifyMemberClusterLabels) - } + isLabelUpdated := isMapFieldUpdated(currentMC.GetLabels(), oldMC.GetLabels()) + if isLabelUpdated && !isUserInGroup(userInfo, mastersGroup) && shouldDenyLabelModification(currentMC.GetLabels(), oldMC.GetLabels(), denyModifyMemberClusterLabels) { + // allow any user to modify kubernetes-fleet.io/* labels, but restricts other label modifications given denyModifyMemberClusterLabels is true. + klog.V(2).InfoS(DeniedModifyMemberClusterLabels, "user", userInfo.Username, "groups", userInfo.Groups, "operation", req.Operation, "GVK", req.RequestKind, "subResource", req.SubResource, "namespacedName", namespacedName) + return admission.Denied(DeniedModifyMemberClusterLabels) } isAnnotationUpdated := isFleetAnnotationUpdated(currentMC.Annotations, oldMC.Annotations) @@ -179,6 +177,29 @@ func isUserInGroup(userInfo authenticationv1.UserInfo, groupName string) bool { return slices.Contains(userInfo.Groups, groupName) } +// shouldDenyLabelModification returns true if any labels (besides kubernetes-fleet.io/* labels) are being modified and denyModifyMemberClusterLabels is true. +func shouldDenyLabelModification(currentLabels, oldLabels map[string]string, denyModifyMemberClusterLabels bool) bool { + if !denyModifyMemberClusterLabels { + return false + } + for k, v := range currentLabels { + oldV, exists := oldLabels[k] + if !exists || oldV != v { + if !strings.HasPrefix(k, placementv1beta1.FleetPrefix) { + return true + } + } + } + for k := range oldLabels { + if _, exists := currentLabels[k]; !exists { + if !strings.HasPrefix(k, placementv1beta1.FleetPrefix) { + return true + } + } + } + return false +} + // isMemberClusterMapFieldUpdated return true if member cluster label is updated. func isMapFieldUpdated(currentMap, oldMap map[string]string) bool { return !reflect.DeepEqual(currentMap, oldMap) diff --git a/pkg/webhook/validation/uservalidation_test.go b/pkg/webhook/validation/uservalidation_test.go index 7bcfa3919..6afecc742 100644 --- a/pkg/webhook/validation/uservalidation_test.go +++ b/pkg/webhook/validation/uservalidation_test.go @@ -328,6 +328,106 @@ func TestValidateFleetMemberClusterUpdate(t *testing.T) { }, wantResponse: admission.Allowed(fmt.Sprintf(ResourceAllowedFormat, "nonSystemMastersUser", utils.GenerateGroupString([]string{"system:authenticated"}), admissionv1.Update, &utils.MCMetaGVK, "", types.NamespacedName{Name: "test-mc"})), }, + "allow label modification by any user for kubernetes-fleet.io/* labels": { + denyModifyMemberClusterLabels: true, + oldMC: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mc", + Labels: map[string]string{"kubernetes-fleet.io/some-label": "old-value"}, + Annotations: map[string]string{ + "fleet.azure.com/cluster-resource-id": "test-cluster-resource-id", + }, + }, + }, + newMC: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mc", + Labels: map[string]string{"kubernetes-fleet.io/some-label": "new-value"}, + Annotations: map[string]string{ + "fleet.azure.com/cluster-resource-id": "test-cluster-resource-id", + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Name: "test-mc", + UserInfo: authenticationv1.UserInfo{ + Username: "nonSystemMastersUser", + Groups: []string{"someGroup"}, + }, + RequestKind: &utils.MCMetaGVK, + Operation: admissionv1.Update, + }, + }, + wantResponse: admission.Allowed(fmt.Sprintf(ResourceAllowedFormat, "nonSystemMastersUser", utils.GenerateGroupString([]string{"someGroup"}), + admissionv1.Update, &utils.MCMetaGVK, "", types.NamespacedName{Name: "test-mc"})), + }, + "allow label creation by any user for kubernetes-fleet.io/* labels": { + denyModifyMemberClusterLabels: true, + oldMC: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mc", + Annotations: map[string]string{ + "fleet.azure.com/cluster-resource-id": "test-cluster-resource-id", + }, + }, + }, + newMC: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mc", + Labels: map[string]string{"kubernetes-fleet.io/some-label": "new-value"}, + Annotations: map[string]string{ + "fleet.azure.com/cluster-resource-id": "test-cluster-resource-id", + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Name: "test-mc", + UserInfo: authenticationv1.UserInfo{ + Username: "nonSystemMastersUser", + Groups: []string{"someGroup"}, + }, + RequestKind: &utils.MCMetaGVK, + Operation: admissionv1.Update, + }, + }, + wantResponse: admission.Allowed(fmt.Sprintf(ResourceAllowedFormat, "nonSystemMastersUser", utils.GenerateGroupString([]string{"someGroup"}), + admissionv1.Update, &utils.MCMetaGVK, "", types.NamespacedName{Name: "test-mc"})), + }, + "allow label deletion by any user for kubernetes-fleet.io/* labels": { + denyModifyMemberClusterLabels: true, + oldMC: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mc", + Labels: map[string]string{"kubernetes-fleet.io/some-label": "old-value"}, + Annotations: map[string]string{ + "fleet.azure.com/cluster-resource-id": "test-cluster-resource-id", + }, + }, + }, + newMC: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mc", + Annotations: map[string]string{ + "fleet.azure.com/cluster-resource-id": "test-cluster-resource-id", + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Name: "test-mc", + UserInfo: authenticationv1.UserInfo{ + Username: "nonSystemMastersUser", + Groups: []string{"someGroup"}, + }, + RequestKind: &utils.MCMetaGVK, + Operation: admissionv1.Update, + }, + }, + wantResponse: admission.Allowed(fmt.Sprintf(ResourceAllowedFormat, "nonSystemMastersUser", utils.GenerateGroupString([]string{"someGroup"}), + admissionv1.Update, &utils.MCMetaGVK, "", types.NamespacedName{Name: "test-mc"})), + }, } for testName, testCase := range testCases { From ced8ea65fb253ef31eea229b23b2fee30babfb83 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Fri, 22 Aug 2025 10:55:06 +1000 Subject: [PATCH 26/38] feat: a minor naming change to keep things consistent in the work applier (#211) Minor fixes Signed-off-by: michaelawyu --- apis/cluster/v1beta1/zz_generated.deepcopy.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- .../v1beta1/zz_generated.deepcopy.go | 2 +- apis/v1alpha1/zz_generated.deepcopy.go | 2 +- .../workapplier/availability_tracker.go | 50 +-- .../workapplier/availability_tracker_test.go | 382 +++++++++--------- pkg/controllers/workapplier/backoff_test.go | 22 +- pkg/controllers/workapplier/controller.go | 23 +- .../controller_integration_migrated_test.go | 2 +- .../controller_integration_test.go | 74 ++-- pkg/controllers/workapplier/metrics_test.go | 8 +- pkg/controllers/workapplier/status.go | 28 +- pkg/controllers/workapplier/status_test.go | 66 +-- .../controller_integration_test.go | 16 +- test/e2e/actuals_test.go | 2 +- test/e2e/enveloped_object_placement_test.go | 2 +- test/e2e/utils_test.go | 2 +- test/upgrade/before/actuals_test.go | 4 +- 18 files changed, 345 insertions(+), 344 deletions(-) diff --git a/apis/cluster/v1beta1/zz_generated.deepcopy.go b/apis/cluster/v1beta1/zz_generated.deepcopy.go index 7bb7f501c..17e71a1a2 100644 --- a/apis/cluster/v1beta1/zz_generated.deepcopy.go +++ b/apis/cluster/v1beta1/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/apis/placement/v1alpha1/zz_generated.deepcopy.go b/apis/placement/v1alpha1/zz_generated.deepcopy.go index 6d1656d18..df9f5e6d7 100644 --- a/apis/placement/v1alpha1/zz_generated.deepcopy.go +++ b/apis/placement/v1alpha1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1alpha1 import ( "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/apis/placement/v1beta1/zz_generated.deepcopy.go b/apis/placement/v1beta1/zz_generated.deepcopy.go index acd0306ac..45163b157 100644 --- a/apis/placement/v1beta1/zz_generated.deepcopy.go +++ b/apis/placement/v1beta1/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index 27a862c43..85550ca19 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/pkg/controllers/workapplier/availability_tracker.go b/pkg/controllers/workapplier/availability_tracker.go index 944145134..021c7babd 100644 --- a/pkg/controllers/workapplier/availability_tracker.go +++ b/pkg/controllers/workapplier/availability_tracker.go @@ -51,7 +51,7 @@ func (r *Reconciler) trackInMemberClusterObjAvailability(ctx context.Context, bu if !isManifestObjectApplied(bundle.applyOrReportDiffResTyp) { // The manifest object in the bundle has not been applied yet. No availability check // is needed. - bundle.availabilityResTyp = ManifestProcessingAvailabilityResultTypeSkipped + bundle.availabilityResTyp = AvailabilityResultTypeSkipped // Note that some of the objects might have failed the pre-processing stage and do not // even have a GVR or a manifest object. @@ -69,7 +69,7 @@ func (r *Reconciler) trackInMemberClusterObjAvailability(ctx context.Context, bu if err != nil { // An unexpected error has occurred during the availability check. bundle.availabilityErr = err - bundle.availabilityResTyp = ManifestProcessingAvailabilityResultTypeFailed + bundle.availabilityResTyp = AvailabilityResultTypeFailed klog.ErrorS(err, "Failed to track the availability of the applied object in the member cluster", "work", workRef, "GVR", *bundle.gvr, "inMemberClusterObj", klog.KObj(bundle.inMemberClusterObj)) @@ -108,11 +108,11 @@ func trackInMemberClusterObjAvailabilityByGVR( if isDataResource(*gvr) { klog.V(2).InfoS("The object from the member cluster is a data object, consider it to be immediately available", "gvr", *gvr, "inMemberClusterObj", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Cannot determine the availability of the object from the member cluster; untrack its availability", "gvr", *gvr, "resource", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeNotTrackable, nil + return AvailabilityResultTypeNotTrackable, nil } } @@ -123,7 +123,7 @@ func trackDeploymentAvailability(inMemberClusterObj *unstructured.Unstructured) // Normally this branch should never run. wrappedErr := fmt.Errorf("failed to convert the unstructured object to a deployment: %w", err) _ = controller.NewUnexpectedBehaviorError(wrappedErr) - return ManifestProcessingAvailabilityResultTypeFailed, wrappedErr + return AvailabilityResultTypeFailed, wrappedErr } // Check if the deployment is available. @@ -136,10 +136,10 @@ func trackDeploymentAvailability(inMemberClusterObj *unstructured.Unstructured) requiredReplicas == deploy.Status.UpdatedReplicas && deploy.Status.UnavailableReplicas == 0 { klog.V(2).InfoS("Deployment is available", "deployment", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Deployment is not ready yet, will check later to see if it becomes available", "deployment", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeNotYetAvailable, nil + return AvailabilityResultTypeNotYetAvailable, nil } // trackStatefulSetAvailability tracks the availability of a stateful set in the member cluster. @@ -149,7 +149,7 @@ func trackStatefulSetAvailability(inMemberClusterObj *unstructured.Unstructured) // Normally this branch should never run. wrappedErr := fmt.Errorf("failed to convert the unstructured object to a stateful set: %w", err) _ = controller.NewUnexpectedBehaviorError(wrappedErr) - return ManifestProcessingAvailabilityResultTypeFailed, wrappedErr + return AvailabilityResultTypeFailed, wrappedErr } // Check if the stateful set is available. @@ -165,10 +165,10 @@ func trackStatefulSetAvailability(inMemberClusterObj *unstructured.Unstructured) statefulSet.Status.CurrentReplicas == statefulSet.Status.UpdatedReplicas && statefulSet.Status.CurrentRevision == statefulSet.Status.UpdateRevision { klog.V(2).InfoS("StatefulSet is available", "statefulSet", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Stateful set is not ready yet, will check later to see if it becomes available", "statefulSet", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeNotYetAvailable, nil + return AvailabilityResultTypeNotYetAvailable, nil } // trackDaemonSetAvailability tracks the availability of a daemon set in the member cluster. @@ -178,7 +178,7 @@ func trackDaemonSetAvailability(inMemberClusterObj *unstructured.Unstructured) ( wrappedErr := fmt.Errorf("failed to convert the unstructured object to a daemon set: %w", err) _ = controller.NewUnexpectedBehaviorError(wrappedErr) // Normally this branch should never run. - return ManifestProcessingAvailabilityResultTypeFailed, wrappedErr + return AvailabilityResultTypeFailed, wrappedErr } // Check if the daemonSet is available. @@ -190,10 +190,10 @@ func trackDaemonSetAvailability(inMemberClusterObj *unstructured.Unstructured) ( daemonSet.Status.NumberAvailable == daemonSet.Status.DesiredNumberScheduled && daemonSet.Status.CurrentNumberScheduled == daemonSet.Status.UpdatedNumberScheduled { klog.V(2).InfoS("DaemonSet is available", "daemonSet", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Daemon set is not ready yet, will check later to see if it becomes available", "daemonSet", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeNotYetAvailable, nil + return AvailabilityResultTypeNotYetAvailable, nil } // trackServiceAvailability tracks the availability of a service in the member cluster. @@ -202,7 +202,7 @@ func trackServiceAvailability(inMemberClusterObj *unstructured.Unstructured) (Ma if err := runtime.DefaultUnstructuredConverter.FromUnstructured(inMemberClusterObj.Object, &svc); err != nil { wrappedErr := fmt.Errorf("failed to convert the unstructured object to a service: %w", err) _ = controller.NewUnexpectedBehaviorError(wrappedErr) - return ManifestProcessingAvailabilityResultTypeFailed, wrappedErr + return AvailabilityResultTypeFailed, wrappedErr } switch svc.Spec.Type { case "": @@ -214,25 +214,25 @@ func trackServiceAvailability(inMemberClusterObj *unstructured.Unstructured) (Ma // IP assigned. if len(svc.Spec.ClusterIPs) > 0 && len(svc.Spec.ClusterIPs[0]) > 0 { klog.V(2).InfoS("Service is available", "service", klog.KObj(inMemberClusterObj), "serviceType", svc.Spec.Type) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Service is not ready yet, will check later to see if it becomes available", "service", klog.KObj(inMemberClusterObj), "serviceType", svc.Spec.Type) - return ManifestProcessingAvailabilityResultTypeNotYetAvailable, nil + return AvailabilityResultTypeNotYetAvailable, nil case corev1.ServiceTypeLoadBalancer: // Fleet considers a loadBalancer service to be available if it has at least one load // balancer IP or hostname assigned. if len(svc.Status.LoadBalancer.Ingress) > 0 && (len(svc.Status.LoadBalancer.Ingress[0].IP) > 0 || len(svc.Status.LoadBalancer.Ingress[0].Hostname) > 0) { klog.V(2).InfoS("Service is available", "service", klog.KObj(inMemberClusterObj), "serviceType", svc.Spec.Type) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Service is not ready yet, will check later to see if it becomes available", "service", klog.KObj(inMemberClusterObj), "serviceType", svc.Spec.Type) - return ManifestProcessingAvailabilityResultTypeNotYetAvailable, nil + return AvailabilityResultTypeNotYetAvailable, nil } // we don't know how to track the availability of when the service type is externalName klog.V(2).InfoS("Cannot determine the availability of external name services; untrack its availability", "service", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeNotTrackable, nil + return AvailabilityResultTypeNotTrackable, nil } // trackCRDAvailability tracks the availability of a custom resource definition in the member cluster. @@ -241,32 +241,32 @@ func trackCRDAvailability(inMemberClusterObj *unstructured.Unstructured) (Manife if err := runtime.DefaultUnstructuredConverter.FromUnstructured(inMemberClusterObj.Object, &crd); err != nil { wrappedErr := fmt.Errorf("failed to convert the unstructured object to a custom resource definition: %w", err) _ = controller.NewUnexpectedBehaviorError(wrappedErr) - return ManifestProcessingAvailabilityResultTypeFailed, wrappedErr + return AvailabilityResultTypeFailed, wrappedErr } // If both conditions are True, the CRD has become available. if apiextensionshelpers.IsCRDConditionTrue(&crd, apiextensionsv1.Established) && apiextensionshelpers.IsCRDConditionTrue(&crd, apiextensionsv1.NamesAccepted) { klog.V(2).InfoS("CustomResourceDefinition is available", "customResourceDefinition", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Custom resource definition is not ready yet, will check later to see if it becomes available", klog.KObj(inMemberClusterObj)) - return ManifestProcessingAvailabilityResultTypeNotYetAvailable, nil + return AvailabilityResultTypeNotYetAvailable, nil } // trackPDBAvailability tracks the availability of a pod disruption budget in the member cluster func trackPDBAvailability(curObj *unstructured.Unstructured) (ManifestProcessingAvailabilityResultType, error) { var pdb policyv1.PodDisruptionBudget if err := runtime.DefaultUnstructuredConverter.FromUnstructured(curObj.Object, &pdb); err != nil { - return ManifestProcessingAvailabilityResultTypeFailed, controller.NewUnexpectedBehaviorError(err) + return AvailabilityResultTypeFailed, controller.NewUnexpectedBehaviorError(err) } // Check if conditions are up-to-date if poddisruptionbudget.ConditionsAreUpToDate(&pdb) { klog.V(2).InfoS("PodDisruptionBudget is available", "pdb", klog.KObj(curObj)) - return ManifestProcessingAvailabilityResultTypeAvailable, nil + return AvailabilityResultTypeAvailable, nil } klog.V(2).InfoS("Still need to wait for PodDisruptionBudget to be available", "pdb", klog.KObj(curObj)) - return ManifestProcessingAvailabilityResultTypeNotYetAvailable, nil + return AvailabilityResultTypeNotYetAvailable, nil } // isDataResource checks if the resource is a data resource; such resources are diff --git a/pkg/controllers/workapplier/availability_tracker_test.go b/pkg/controllers/workapplier/availability_tracker_test.go index 77cf4379c..1127cf612 100644 --- a/pkg/controllers/workapplier/availability_tracker_test.go +++ b/pkg/controllers/workapplier/availability_tracker_test.go @@ -224,39 +224,39 @@ func TestTrackDeploymentAvailability(t *testing.T) { } testCases := []struct { - name string - deploy *appsv1.Deployment - wantManifestProcessingAvailabilityResultType ManifestProcessingAvailabilityResultType + name string + deploy *appsv1.Deployment + wantAvailabilityResultType ManifestProcessingAvailabilityResultType }{ { - name: "available deployment (w/ fixed replica count)", - deploy: availableDeployWithFixedReplicaCount, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available deployment (w/ fixed replica count)", + deploy: availableDeployWithFixedReplicaCount, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available deployment (w/ default replica count)", - deploy: availableDeployWithDefaultReplicaCount, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available deployment (w/ default replica count)", + deploy: availableDeployWithDefaultReplicaCount, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "unavailable deployment with stale status", - deploy: unavailableDeployWithStaleStatus, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable deployment with stale status", + deploy: unavailableDeployWithStaleStatus, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable deployment with not enough available replicas", - deploy: unavailableDeployWithNotEnoughAvailableReplicas, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable deployment with not enough available replicas", + deploy: unavailableDeployWithNotEnoughAvailableReplicas, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable deployment with not enough updated replicas", - deploy: unavailableDeployWithNotEnoughUpdatedReplicas, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable deployment with not enough updated replicas", + deploy: unavailableDeployWithNotEnoughUpdatedReplicas, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable deployment with unavailable replicas", - deploy: unavailableDeployWithMoreReplicasThanRequired, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable deployment with unavailable replicas", + deploy: unavailableDeployWithMoreReplicasThanRequired, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, } @@ -266,8 +266,8 @@ func TestTrackDeploymentAvailability(t *testing.T) { if err != nil { t.Fatalf("trackDeploymentAvailability() = %v, want no error", err) } - if gotResTyp != tc.wantManifestProcessingAvailabilityResultType { - t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantManifestProcessingAvailabilityResultType) + if gotResTyp != tc.wantAvailabilityResultType { + t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantAvailabilityResultType) } }) } @@ -341,39 +341,39 @@ func TestTrackStatefulSetAvailability(t *testing.T) { } testCases := []struct { - name string - statefulSet *appsv1.StatefulSet - wantManifestProcessingAvailabilityResultType ManifestProcessingAvailabilityResultType + name string + statefulSet *appsv1.StatefulSet + wantAvailabilityResultType ManifestProcessingAvailabilityResultType }{ { - name: "available stateful set (w/ fixed replica count)", - statefulSet: availableStatefulSetWithFixedReplicaCount, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available stateful set (w/ fixed replica count)", + statefulSet: availableStatefulSetWithFixedReplicaCount, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available stateful set (w/ default replica count)", - statefulSet: availableStatefulSetWithDefaultReplicaCount, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available stateful set (w/ default replica count)", + statefulSet: availableStatefulSetWithDefaultReplicaCount, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "unavailable stateful set with stale status", - statefulSet: unavailableStatefulSetWithStaleStatus, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable stateful set with stale status", + statefulSet: unavailableStatefulSetWithStaleStatus, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable stateful set with not enough available replicas", - statefulSet: unavailableStatefulSetWithNotEnoughAvailableReplicas, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable stateful set with not enough available replicas", + statefulSet: unavailableStatefulSetWithNotEnoughAvailableReplicas, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable stateful set with not enough current replicas", - statefulSet: unavailableStatefulSetWithNotEnoughCurrentReplicas, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable stateful set with not enough current replicas", + statefulSet: unavailableStatefulSetWithNotEnoughCurrentReplicas, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable stateful set with not latest revision", - statefulSet: unavailableStatefulSetWithNotLatestRevision, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable stateful set with not latest revision", + statefulSet: unavailableStatefulSetWithNotLatestRevision, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, } @@ -383,8 +383,8 @@ func TestTrackStatefulSetAvailability(t *testing.T) { if err != nil { t.Fatalf("trackStatefulSetAvailability() = %v, want no error", err) } - if gotResTyp != tc.wantManifestProcessingAvailabilityResultType { - t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantManifestProcessingAvailabilityResultType) + if gotResTyp != tc.wantAvailabilityResultType { + t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantAvailabilityResultType) } }) } @@ -427,29 +427,29 @@ func TestTrackDaemonSetAvailability(t *testing.T) { } testCases := []struct { - name string - daemonSet *appsv1.DaemonSet - wantManifestProcessingAvailabilityResultType ManifestProcessingAvailabilityResultType + name string + daemonSet *appsv1.DaemonSet + wantAvailabilityResultType ManifestProcessingAvailabilityResultType }{ { - name: "available daemon set", - daemonSet: availableDaemonSet, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available daemon set", + daemonSet: availableDaemonSet, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "unavailable daemon set with stale status", - daemonSet: unavailableDaemonSetWithStaleStatus, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable daemon set with stale status", + daemonSet: unavailableDaemonSetWithStaleStatus, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable daemon set with not enough available pods", - daemonSet: unavailableDaemonSetWithNotEnoughAvailablePods, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable daemon set with not enough available pods", + daemonSet: unavailableDaemonSetWithNotEnoughAvailablePods, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable daemon set with not enough updated pods", - daemonSet: unavailableDaemonSetWithNotEnoughUpdatedPods, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable daemon set with not enough updated pods", + daemonSet: unavailableDaemonSetWithNotEnoughUpdatedPods, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, } @@ -459,8 +459,8 @@ func TestTrackDaemonSetAvailability(t *testing.T) { if err != nil { t.Fatalf("trackDaemonSetAvailability() = %v, want no error", err) } - if gotResTyp != tc.wantManifestProcessingAvailabilityResultType { - t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantManifestProcessingAvailabilityResultType) + if gotResTyp != tc.wantAvailabilityResultType { + t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantAvailabilityResultType) } }) } @@ -469,9 +469,9 @@ func TestTrackDaemonSetAvailability(t *testing.T) { // TestTrackServiceAvailability tests the trackServiceAvailability function. func TestTrackServiceAvailability(t *testing.T) { testCases := []struct { - name string - service *corev1.Service - wantManifestProcessingAvailabilityResultType ManifestProcessingAvailabilityResultType + name string + service *corev1.Service + wantAvailabilityResultType ManifestProcessingAvailabilityResultType }{ { name: "untrackable service (external name type)", @@ -485,7 +485,7 @@ func TestTrackServiceAvailability(t *testing.T) { ClusterIPs: []string{"192.168.1.1"}, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotTrackable, + wantAvailabilityResultType: AvailabilityResultTypeNotTrackable, }, { name: "available default typed service (IP assigned)", @@ -499,7 +499,7 @@ func TestTrackServiceAvailability(t *testing.T) { ClusterIPs: []string{"192.168.1.1"}, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { name: "available ClusterIP service (IP assigned)", @@ -514,7 +514,7 @@ func TestTrackServiceAvailability(t *testing.T) { ClusterIPs: []string{"192.168.1.1"}, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { name: "available headless service", @@ -528,7 +528,7 @@ func TestTrackServiceAvailability(t *testing.T) { ClusterIPs: []string{"None"}, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { name: "available node port service (IP assigned)", @@ -543,7 +543,7 @@ func TestTrackServiceAvailability(t *testing.T) { ClusterIPs: []string{"192.168.1.1"}, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { name: "unavailable ClusterIP service (no IP assigned)", @@ -557,7 +557,7 @@ func TestTrackServiceAvailability(t *testing.T) { ClusterIP: "13.6.2.2", }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { name: "available LoadBalancer service (IP assigned)", @@ -579,7 +579,7 @@ func TestTrackServiceAvailability(t *testing.T) { }, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { name: "available LoadBalancer service (hostname assigned)", @@ -601,7 +601,7 @@ func TestTrackServiceAvailability(t *testing.T) { }, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { name: "unavailable LoadBalancer service (ingress not ready)", @@ -619,7 +619,7 @@ func TestTrackServiceAvailability(t *testing.T) { }, }, }, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, } @@ -629,8 +629,8 @@ func TestTrackServiceAvailability(t *testing.T) { if err != nil { t.Errorf("trackServiceAvailability() = %v, want no error", err) } - if gotResTyp != tc.wantManifestProcessingAvailabilityResultType { - t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantManifestProcessingAvailabilityResultType) + if gotResTyp != tc.wantAvailabilityResultType { + t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantAvailabilityResultType) } }) } @@ -681,24 +681,24 @@ func TestTrackCRDAvailability(t *testing.T) { } testCases := []struct { - name string - crd *apiextensionsv1.CustomResourceDefinition - wantManifestProcessingAvailabilityResultType ManifestProcessingAvailabilityResultType + name string + crd *apiextensionsv1.CustomResourceDefinition + wantAvailabilityResultType ManifestProcessingAvailabilityResultType }{ { - name: "available CRD", - crd: availableCRD, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available CRD", + crd: availableCRD, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "unavailable CRD (not established)", - crd: unavailableCRDNotEstablished, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable CRD (not established)", + crd: unavailableCRDNotEstablished, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable CRD (name not accepted)", - crd: unavailableCRDNameNotAccepted, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable CRD (name not accepted)", + crd: unavailableCRDNameNotAccepted, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, } @@ -708,8 +708,8 @@ func TestTrackCRDAvailability(t *testing.T) { if err != nil { t.Fatalf("trackCRDAvailability() = %v, want no error", err) } - if gotResTyp != tc.wantManifestProcessingAvailabilityResultType { - t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantManifestProcessingAvailabilityResultType) + if gotResTyp != tc.wantAvailabilityResultType { + t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantAvailabilityResultType) } }) } @@ -768,24 +768,24 @@ func TestTrackPDBAvailability(t *testing.T) { } testCases := []struct { - name string - pdb *policyv1.PodDisruptionBudget - wantManifestProcessingAvailabilityResultType ManifestProcessingAvailabilityResultType + name string + pdb *policyv1.PodDisruptionBudget + wantAvailabilityResultType ManifestProcessingAvailabilityResultType }{ { - name: "available PDB", - pdb: availablePDB, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available PDB", + pdb: availablePDB, + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "unavailable PDB (insufficient pods)", - pdb: unavailablePDBInsufficientPods, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable PDB (insufficient pods)", + pdb: unavailablePDBInsufficientPods, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, { - name: "unavailable PDB (stale condition)", - pdb: unavailablePDBStaleCondition, - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + name: "unavailable PDB (stale condition)", + pdb: unavailablePDBStaleCondition, + wantAvailabilityResultType: AvailabilityResultTypeNotYetAvailable, }, } @@ -795,8 +795,8 @@ func TestTrackPDBAvailability(t *testing.T) { if err != nil { t.Fatalf("trackPDBAvailability() = %v, want no error", err) } - if gotResTyp != tc.wantManifestProcessingAvailabilityResultType { - t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantManifestProcessingAvailabilityResultType) + if gotResTyp != tc.wantAvailabilityResultType { + t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantAvailabilityResultType) } }) } @@ -876,124 +876,124 @@ func TestTrackInMemberClusterObjAvailabilityByGVR(t *testing.T) { } testCases := []struct { - name string - gvr schema.GroupVersionResource - inMemberClusterObj *unstructured.Unstructured - wantManifestProcessingAvailabilityResultType ManifestProcessingAvailabilityResultType + name string + gvr schema.GroupVersionResource + inMemberClusterObj *unstructured.Unstructured + wantAvailabilityResultType ManifestProcessingAvailabilityResultType }{ { - name: "available deployment", - gvr: utils.DeploymentGVR, - inMemberClusterObj: toUnstructured(t, availableDeploy), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available deployment", + gvr: utils.DeploymentGVR, + inMemberClusterObj: toUnstructured(t, availableDeploy), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available stateful set", - gvr: utils.StatefulSetGVR, - inMemberClusterObj: toUnstructured(t, availableStatefulSet), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available stateful set", + gvr: utils.StatefulSetGVR, + inMemberClusterObj: toUnstructured(t, availableStatefulSet), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available service", - gvr: utils.ServiceGVR, - inMemberClusterObj: toUnstructured(t, availableSvc), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available service", + gvr: utils.ServiceGVR, + inMemberClusterObj: toUnstructured(t, availableSvc), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available daemon set", - gvr: utils.DaemonSetGVR, - inMemberClusterObj: toUnstructured(t, availableDaemonSet), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available daemon set", + gvr: utils.DaemonSetGVR, + inMemberClusterObj: toUnstructured(t, availableDaemonSet), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available custom resource definition", - gvr: utils.CustomResourceDefinitionGVR, - inMemberClusterObj: toUnstructured(t, availableCRD), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available custom resource definition", + gvr: utils.CustomResourceDefinitionGVR, + inMemberClusterObj: toUnstructured(t, availableCRD), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "data object (namespace)", - gvr: utils.NamespaceGVR, - inMemberClusterObj: toUnstructured(t, ns.DeepCopy()), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "data object (namespace)", + gvr: utils.NamespaceGVR, + inMemberClusterObj: toUnstructured(t, ns.DeepCopy()), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "data object (config map)", - gvr: utils.ConfigMapGVR, - inMemberClusterObj: toUnstructured(t, cm), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "data object (config map)", + gvr: utils.ConfigMapGVR, + inMemberClusterObj: toUnstructured(t, cm), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "untrackable object (job)", - gvr: utils.JobGVR, - inMemberClusterObj: toUnstructured(t, untrackableJob), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeNotTrackable, + name: "untrackable object (job)", + gvr: utils.JobGVR, + inMemberClusterObj: toUnstructured(t, untrackableJob), + wantAvailabilityResultType: AvailabilityResultTypeNotTrackable, }, { - name: "available service account", - gvr: utils.ServiceAccountGVR, - inMemberClusterObj: toUnstructured(t, &corev1.ServiceAccount{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available service account", + gvr: utils.ServiceAccountGVR, + inMemberClusterObj: toUnstructured(t, &corev1.ServiceAccount{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available network policy", - gvr: utils.NetworkPolicyGVR, - inMemberClusterObj: toUnstructured(t, &networkingv1.NetworkPolicy{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available network policy", + gvr: utils.NetworkPolicyGVR, + inMemberClusterObj: toUnstructured(t, &networkingv1.NetworkPolicy{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available csi driver", - gvr: utils.CSIDriverGVR, - inMemberClusterObj: toUnstructured(t, &storagev1.CSIDriver{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available csi driver", + gvr: utils.CSIDriverGVR, + inMemberClusterObj: toUnstructured(t, &storagev1.CSIDriver{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available csi node", - gvr: utils.CSINodeGVR, - inMemberClusterObj: toUnstructured(t, &storagev1.CSINode{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available csi node", + gvr: utils.CSINodeGVR, + inMemberClusterObj: toUnstructured(t, &storagev1.CSINode{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available storage class", - gvr: utils.StorageClassGVR, - inMemberClusterObj: toUnstructured(t, &storagev1.StorageClass{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available storage class", + gvr: utils.StorageClassGVR, + inMemberClusterObj: toUnstructured(t, &storagev1.StorageClass{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available csi storage capacity", - gvr: utils.CSIStorageCapacityGVR, - inMemberClusterObj: toUnstructured(t, &storagev1.CSIStorageCapacity{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available csi storage capacity", + gvr: utils.CSIStorageCapacityGVR, + inMemberClusterObj: toUnstructured(t, &storagev1.CSIStorageCapacity{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available controller revision", - gvr: utils.ControllerRevisionGVR, - inMemberClusterObj: toUnstructured(t, &appsv1.ControllerRevision{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available controller revision", + gvr: utils.ControllerRevisionGVR, + inMemberClusterObj: toUnstructured(t, &appsv1.ControllerRevision{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available ingress class", - gvr: utils.IngressClassGVR, - inMemberClusterObj: toUnstructured(t, &networkingv1.IngressClass{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available ingress class", + gvr: utils.IngressClassGVR, + inMemberClusterObj: toUnstructured(t, &networkingv1.IngressClass{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available limit range", - gvr: utils.LimitRangeGVR, - inMemberClusterObj: toUnstructured(t, &corev1.LimitRange{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available limit range", + gvr: utils.LimitRangeGVR, + inMemberClusterObj: toUnstructured(t, &corev1.LimitRange{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available resource quota", - gvr: utils.ResourceQuotaGVR, - inMemberClusterObj: toUnstructured(t, &corev1.ResourceQuota{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available resource quota", + gvr: utils.ResourceQuotaGVR, + inMemberClusterObj: toUnstructured(t, &corev1.ResourceQuota{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, { - name: "available priority class", - gvr: utils.PriorityClassGVR, - inMemberClusterObj: toUnstructured(t, &schedulingv1.PriorityClass{}), - wantManifestProcessingAvailabilityResultType: ManifestProcessingAvailabilityResultTypeAvailable, + name: "available priority class", + gvr: utils.PriorityClassGVR, + inMemberClusterObj: toUnstructured(t, &schedulingv1.PriorityClass{}), + wantAvailabilityResultType: AvailabilityResultTypeAvailable, }, } @@ -1003,8 +1003,8 @@ func TestTrackInMemberClusterObjAvailabilityByGVR(t *testing.T) { if err != nil { t.Fatalf("trackInMemberClusterObjAvailabilityByGVR() = %v, want no error", err) } - if gotResTyp != tc.wantManifestProcessingAvailabilityResultType { - t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantManifestProcessingAvailabilityResultType) + if gotResTyp != tc.wantAvailabilityResultType { + t.Errorf("manifestProcessingAvailabilityResultType = %v, want %v", gotResTyp, tc.wantAvailabilityResultType) } }) } @@ -1087,7 +1087,7 @@ func TestTrackInMemberClusterObjAvailability(t *testing.T) { gvr: &utils.DeploymentGVR, inMemberClusterObj: toUnstructured(t, availableDeploy), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + availabilityResTyp: AvailabilityResultTypeAvailable, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -1096,7 +1096,7 @@ func TestTrackInMemberClusterObjAvailability(t *testing.T) { gvr: &utils.ServiceGVR, inMemberClusterObj: nil, applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + availabilityResTyp: AvailabilityResultTypeSkipped, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -1105,7 +1105,7 @@ func TestTrackInMemberClusterObjAvailability(t *testing.T) { gvr: &utils.DaemonSetGVR, inMemberClusterObj: toUnstructured(t, unavailableDaemonSet), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + availabilityResTyp: AvailabilityResultTypeNotYetAvailable, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -1114,7 +1114,7 @@ func TestTrackInMemberClusterObjAvailability(t *testing.T) { gvr: &utils.JobGVR, inMemberClusterObj: toUnstructured(t, untrackableJob), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, + availabilityResTyp: AvailabilityResultTypeNotTrackable, }, }, }, diff --git a/pkg/controllers/workapplier/backoff_test.go b/pkg/controllers/workapplier/backoff_test.go index d46ee5d2f..fac61b8e6 100644 --- a/pkg/controllers/workapplier/backoff_test.go +++ b/pkg/controllers/workapplier/backoff_test.go @@ -977,7 +977,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { bundles: []*manifestProcessingBundle{ { applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + availabilityResTyp: AvailabilityResultTypeNotYetAvailable, }, }, wantRequeueDelaySeconds: 5, @@ -1001,7 +1001,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { bundles: []*manifestProcessingBundle{ { applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + availabilityResTyp: AvailabilityResultTypeAvailable, }, }, wantRequeueDelaySeconds: 5, // Use fixed delay, since the processing result has changed. @@ -1025,7 +1025,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { bundles: []*manifestProcessingBundle{ { applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + availabilityResTyp: AvailabilityResultTypeAvailable, }, }, wantRequeueDelaySeconds: 10, // Start the slow backoff. @@ -1049,7 +1049,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { bundles: []*manifestProcessingBundle{ { applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + availabilityResTyp: AvailabilityResultTypeAvailable, }, }, wantRequeueDelaySeconds: 50, // Skip to fast back off. @@ -1073,7 +1073,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { bundles: []*manifestProcessingBundle{ { applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + availabilityResTyp: AvailabilityResultTypeAvailable, }, }, wantRequeueDelaySeconds: 200, // Reached the max. cap. @@ -1510,7 +1510,7 @@ func TestComputeProcessingResultHash(t *testing.T) { bundles: []*manifestProcessingBundle{ { applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + availabilityResTyp: AvailabilityResultTypeNotYetAvailable, }, }, wantHash: "339954d2619310502c70300409bdf65fd6f14d81c12cfade84879e713ea850ea", @@ -1520,7 +1520,7 @@ func TestComputeProcessingResultHash(t *testing.T) { bundles: []*manifestProcessingBundle{ { applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + availabilityResTyp: AvailabilityResultTypeAvailable, }, }, wantHash: "708387dadaf07f43d46b032c3afb5d984868107b297dad9c99c2d258584d2377", @@ -1552,11 +1552,11 @@ func TestComputeProcessingResultHash(t *testing.T) { }, { applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + availabilityResTyp: AvailabilityResultTypeAvailable, }, { applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, + availabilityResTyp: AvailabilityResultTypeNotTrackable, }, }, wantHash: "1a001803829ef5509d24d60806593cb5fbfb0445d32b9ab1301e5faea57bbaa9", @@ -1566,7 +1566,7 @@ func TestComputeProcessingResultHash(t *testing.T) { bundles: []*manifestProcessingBundle{ { applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + availabilityResTyp: AvailabilityResultTypeAvailable, }, { applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToApply, @@ -1574,7 +1574,7 @@ func TestComputeProcessingResultHash(t *testing.T) { }, { applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, + availabilityResTyp: AvailabilityResultTypeNotTrackable, }, }, // Note that different orders of the manifests result in different hashes. diff --git a/pkg/controllers/workapplier/controller.go b/pkg/controllers/workapplier/controller.go index 9bf469d43..b61ce7316 100644 --- a/pkg/controllers/workapplier/controller.go +++ b/pkg/controllers/workapplier/controller.go @@ -329,25 +329,26 @@ type ManifestProcessingAvailabilityResultType string const ( // The result type for availability check being skipped. - ManifestProcessingAvailabilityResultTypeSkipped ManifestProcessingAvailabilityResultType = "Skipped" + AvailabilityResultTypeSkipped ManifestProcessingAvailabilityResultType = "Skipped" // The result type for availability check failures. - ManifestProcessingAvailabilityResultTypeFailed ManifestProcessingAvailabilityResultType = "Failed" - - // The description for availability check failures. - ManifestProcessingAvailabilityResultTypeFailedDescription = "Failed to track the availability of the applied manifest (error = %s)" + AvailabilityResultTypeFailed ManifestProcessingAvailabilityResultType = "Failed" // The result types for completed availability checks. - ManifestProcessingAvailabilityResultTypeAvailable ManifestProcessingAvailabilityResultType = "Available" + AvailabilityResultTypeAvailable ManifestProcessingAvailabilityResultType = "Available" // Note that the reason string below uses the same value as kept in the old work applier. - ManifestProcessingAvailabilityResultTypeNotYetAvailable ManifestProcessingAvailabilityResultType = "ManifestNotAvailableYet" + AvailabilityResultTypeNotYetAvailable ManifestProcessingAvailabilityResultType = "ManifestNotAvailableYet" + AvailabilityResultTypeNotTrackable ManifestProcessingAvailabilityResultType = "NotTrackable" +) - ManifestProcessingAvailabilityResultTypeNotTrackable ManifestProcessingAvailabilityResultType = "NotTrackable" +const ( + // The description for availability check failures. + AvailabilityResultTypeFailedDescription = "Failed to track the availability of the applied manifest (error = %s)" // The descriptions for completed availability checks. - ManifestProcessingAvailabilityResultTypeAvailableDescription = "Manifest is available" - ManifestProcessingAvailabilityResultTypeNotYetAvailableDescription = "Manifest is not yet available; Fleet will check again later" - ManifestProcessingAvailabilityResultTypeNotTrackableDescription = "Manifest's availability is not trackable; Fleet assumes that the applied manifest is available" + AvailabilityResultTypeAvailableDescription = "Manifest is available" + AvailabilityResultTypeNotYetAvailableDescription = "Manifest is not yet available; Fleet will check again later" + AvailabilityResultTypeNotTrackableDescription = "Manifest's availability is not trackable; Fleet assumes that the applied manifest is available" ) type manifestProcessingBundle struct { diff --git a/pkg/controllers/workapplier/controller_integration_migrated_test.go b/pkg/controllers/workapplier/controller_integration_migrated_test.go index 20c27bf6d..1f25be53c 100644 --- a/pkg/controllers/workapplier/controller_integration_migrated_test.go +++ b/pkg/controllers/workapplier/controller_integration_migrated_test.go @@ -88,7 +88,7 @@ var _ = Describe("Work Controller", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), }, } Expect(controller.CompareConditions(expected, resultWork.Status.ManifestConditions[0].Conditions)).Should(BeEmpty()) diff --git a/pkg/controllers/workapplier/controller_integration_test.go b/pkg/controllers/workapplier/controller_integration_test.go index 3cdebfc1a..9aacfe71a 100644 --- a/pkg/controllers/workapplier/controller_integration_test.go +++ b/pkg/controllers/workapplier/controller_integration_test.go @@ -720,7 +720,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -745,7 +745,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -902,7 +902,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -927,7 +927,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -1020,7 +1020,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -1199,7 +1199,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -1358,7 +1358,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -1398,7 +1398,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), }, }, }, @@ -1552,7 +1552,7 @@ var _ = Describe("applying manifests", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -1717,7 +1717,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -1742,7 +1742,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -1988,7 +1988,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -2013,7 +2013,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -2037,7 +2037,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -2307,7 +2307,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -2332,7 +2332,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -2356,7 +2356,7 @@ var _ = Describe("work applier garbage collection", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -2623,7 +2623,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), }, }, }, @@ -2647,7 +2647,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 2, }, }, @@ -2900,7 +2900,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -3352,7 +3352,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -3377,7 +3377,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -3592,7 +3592,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -3755,7 +3755,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -3995,7 +3995,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -4121,7 +4121,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -4252,7 +4252,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -4482,7 +4482,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -4613,7 +4613,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -4941,7 +4941,7 @@ var _ = Describe("drift detection and takeover", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -6056,7 +6056,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -6081,7 +6081,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(AvailabilityResultTypeNotYetAvailable), ObservedGeneration: 2, }, }, @@ -6270,7 +6270,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -6295,7 +6295,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(AvailabilityResultTypeNotYetAvailable), ObservedGeneration: 1, }, }, @@ -6710,7 +6710,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -6950,7 +6950,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, @@ -6975,7 +6975,7 @@ var _ = Describe("handling different apply strategies", func() { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 0, }, }, diff --git a/pkg/controllers/workapplier/metrics_test.go b/pkg/controllers/workapplier/metrics_test.go index d4f63016f..802fb47a5 100644 --- a/pkg/controllers/workapplier/metrics_test.go +++ b/pkg/controllers/workapplier/metrics_test.go @@ -76,7 +76,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { }, { Type: placementv1beta1.WorkConditionTypeAvailable, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), Status: metav1.ConditionTrue, }, }, @@ -160,7 +160,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { }, { Type: placementv1beta1.WorkConditionTypeAvailable, - Reason: string(ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(AvailabilityResultTypeNotYetAvailable), Status: metav1.ConditionFalse, }, }, @@ -302,7 +302,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), }, }, }, @@ -482,7 +482,7 @@ func TestTrackWorkAndManifestProcessingRequestMetrics(t *testing.T) { { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeFailed), + Reason: string(AvailabilityResultTypeFailed), }, { Type: placementv1beta1.WorkConditionTypeDiffReported, diff --git a/pkg/controllers/workapplier/status.go b/pkg/controllers/workapplier/status.go index 5ace70115..d095c21d7 100644 --- a/pkg/controllers/workapplier/status.go +++ b/pkg/controllers/workapplier/status.go @@ -158,7 +158,7 @@ func (r *Reconciler) refreshWorkStatus( if isAppliedObjectAvailable(bundle.availabilityResTyp) { availableAppliedObjectsCount++ } - if bundle.availabilityResTyp == ManifestProcessingAvailabilityResultTypeNotTrackable { + if bundle.availabilityResTyp == AvailabilityResultTypeNotTrackable { untrackableAppliedObjectsCount++ } if isManifestObjectDiffReported(bundle.applyOrReportDiffResTyp) { @@ -233,7 +233,7 @@ func (r *Reconciler) refreshAppliedWorkStatus( // isManifestObjectAvailable returns if an availability result type indicates that a manifest // object in a bundle is available. func isAppliedObjectAvailable(availabilityResTyp ManifestProcessingAvailabilityResultType) bool { - return availabilityResTyp == ManifestProcessingAvailabilityResultTypeAvailable || availabilityResTyp == ManifestProcessingAvailabilityResultTypeNotTrackable + return availabilityResTyp == AvailabilityResultTypeAvailable || availabilityResTyp == AvailabilityResultTypeNotTrackable } // isManifestObjectDiffReported returns if a diff report result type indicates that a manifest @@ -331,35 +331,35 @@ func setManifestAvailableCondition( ) { var availableCond *metav1.Condition switch availabilityResTyp { - case ManifestProcessingAvailabilityResultTypeSkipped: + case AvailabilityResultTypeSkipped: // Availability check has been skipped for the manifest as it has not been applied yet. // // In this case, no availability condition is set. - case ManifestProcessingAvailabilityResultTypeFailed: + case AvailabilityResultTypeFailed: // Availability check has failed. availableCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeFailed), - Message: fmt.Sprintf(ManifestProcessingAvailabilityResultTypeFailedDescription, availabilityError), + Reason: string(AvailabilityResultTypeFailed), + Message: fmt.Sprintf(AvailabilityResultTypeFailedDescription, availabilityError), ObservedGeneration: inMemberClusterObjGeneration, } - case ManifestProcessingAvailabilityResultTypeNotYetAvailable: + case AvailabilityResultTypeNotYetAvailable: // The manifest is not yet available. availableCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeNotYetAvailable), - Message: ManifestProcessingAvailabilityResultTypeNotYetAvailableDescription, + Reason: string(AvailabilityResultTypeNotYetAvailable), + Message: AvailabilityResultTypeNotYetAvailableDescription, ObservedGeneration: inMemberClusterObjGeneration, } - case ManifestProcessingAvailabilityResultTypeNotTrackable: + case AvailabilityResultTypeNotTrackable: // Fleet cannot track the availability of the manifest. availableCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeNotTrackable), - Message: ManifestProcessingAvailabilityResultTypeNotTrackableDescription, + Reason: string(AvailabilityResultTypeNotTrackable), + Message: AvailabilityResultTypeNotTrackableDescription, ObservedGeneration: inMemberClusterObjGeneration, } default: @@ -367,8 +367,8 @@ func setManifestAvailableCondition( availableCond = &metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), - Message: ManifestProcessingAvailabilityResultTypeAvailableDescription, + Reason: string(AvailabilityResultTypeAvailable), + Message: AvailabilityResultTypeAvailableDescription, ObservedGeneration: inMemberClusterObjGeneration, } } diff --git a/pkg/controllers/workapplier/status_test.go b/pkg/controllers/workapplier/status_test.go index 423080118..d9555927b 100644 --- a/pkg/controllers/workapplier/status_test.go +++ b/pkg/controllers/workapplier/status_test.go @@ -96,7 +96,7 @@ func TestRefreshWorkStatus(t *testing.T) { }, inMemberClusterObj: toUnstructured(t, deploy1.DeepCopy()), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + availabilityResTyp: AvailabilityResultTypeAvailable, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -135,7 +135,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 2, }, }, @@ -165,7 +165,7 @@ func TestRefreshWorkStatus(t *testing.T) { }, inMemberClusterObj: toUnstructured(t, deploy2.DeepCopy()), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeAppliedWithFailedDriftDetection, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + availabilityResTyp: AvailabilityResultTypeSkipped, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -179,7 +179,7 @@ func TestRefreshWorkStatus(t *testing.T) { }, inMemberClusterObj: toUnstructured(t, deploy3.DeepCopy()), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToTakeOver, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + availabilityResTyp: AvailabilityResultTypeSkipped, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -252,7 +252,7 @@ func TestRefreshWorkStatus(t *testing.T) { }, inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeFailed, + availabilityResTyp: AvailabilityResultTypeFailed, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -266,7 +266,7 @@ func TestRefreshWorkStatus(t *testing.T) { }, inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + availabilityResTyp: AvailabilityResultTypeNotYetAvailable, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -280,7 +280,7 @@ func TestRefreshWorkStatus(t *testing.T) { }, inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeApplied, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, + availabilityResTyp: AvailabilityResultTypeNotTrackable, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -316,7 +316,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeFailed), + Reason: string(AvailabilityResultTypeFailed), }, }, }, @@ -339,7 +339,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(AvailabilityResultTypeNotYetAvailable), }, }, }, @@ -362,7 +362,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeNotTrackable), + Reason: string(AvailabilityResultTypeNotTrackable), }, }, }, @@ -446,7 +446,7 @@ func TestRefreshWorkStatus(t *testing.T) { }, inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDrifts, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + availabilityResTyp: AvailabilityResultTypeSkipped, drifts: []fleetv1beta1.PatchDetail{ { Path: "/spec/replicas", @@ -466,7 +466,7 @@ func TestRefreshWorkStatus(t *testing.T) { }, inMemberClusterObj: toUnstructured(t, deploy2.DeepCopy()), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToTakeOver, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + availabilityResTyp: AvailabilityResultTypeSkipped, diffs: []fleetv1beta1.PatchDetail{ { Path: "/spec/replicas", @@ -605,7 +605,7 @@ func TestRefreshWorkStatus(t *testing.T) { }, inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + availabilityResTyp: AvailabilityResultTypeSkipped, diffs: []fleetv1beta1.PatchDetail{ { Path: "/x", @@ -697,7 +697,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), }, }, }, @@ -717,7 +717,7 @@ func TestRefreshWorkStatus(t *testing.T) { }, inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + availabilityResTyp: AvailabilityResultTypeSkipped, diffs: []fleetv1beta1.PatchDetail{}, }, }, @@ -815,7 +815,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), }, }, }, @@ -835,7 +835,7 @@ func TestRefreshWorkStatus(t *testing.T) { }, inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDiff, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + availabilityResTyp: AvailabilityResultTypeSkipped, diffs: []fleetv1beta1.PatchDetail{ { @@ -855,7 +855,7 @@ func TestRefreshWorkStatus(t *testing.T) { }, inMemberClusterObj: toUnstructured(t, ns.DeepCopy()), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + availabilityResTyp: AvailabilityResultTypeSkipped, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -978,7 +978,7 @@ func TestRefreshWorkStatus(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), }, }, }, @@ -998,7 +998,7 @@ func TestRefreshWorkStatus(t *testing.T) { }, inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToReportDiff, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + availabilityResTyp: AvailabilityResultTypeSkipped, }, { id: &fleetv1beta1.WorkResourceIdentifier{ @@ -1011,7 +1011,7 @@ func TestRefreshWorkStatus(t *testing.T) { }, inMemberClusterObj: toUnstructured(t, ns.DeepCopy()), applyOrReportDiffResTyp: ApplyOrReportDiffResTypeNoDiffFound, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + availabilityResTyp: AvailabilityResultTypeSkipped, }, }, wantWorkStatus: &fleetv1beta1.WorkStatus{ @@ -1390,14 +1390,14 @@ func TestSetManifestAvailableCondition(t *testing.T) { { name: "available", manifestCond: &fleetv1beta1.ManifestCondition{}, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeAvailable, + availabilityResTyp: AvailabilityResultTypeAvailable, inMemberClusterObjGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, @@ -1410,19 +1410,19 @@ func TestSetManifestAvailableCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, }, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeFailed, + availabilityResTyp: AvailabilityResultTypeFailed, inMemberClusterObjGeneration: 2, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeFailed), + Reason: string(AvailabilityResultTypeFailed), ObservedGeneration: 2, }, }, @@ -1435,19 +1435,19 @@ func TestSetManifestAvailableCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(AvailabilityResultTypeAvailable), ObservedGeneration: 1, }, }, }, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotYetAvailable, + availabilityResTyp: AvailabilityResultTypeNotYetAvailable, inMemberClusterObjGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(AvailabilityResultTypeNotYetAvailable), ObservedGeneration: 1, }, }, @@ -1456,14 +1456,14 @@ func TestSetManifestAvailableCondition(t *testing.T) { { name: "untrackable", manifestCond: &fleetv1beta1.ManifestCondition{}, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeNotTrackable, + availabilityResTyp: AvailabilityResultTypeNotTrackable, inMemberClusterObjGeneration: 1, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(ManifestProcessingAvailabilityResultTypeNotTrackable), + Reason: string(AvailabilityResultTypeNotTrackable), ObservedGeneration: 1, }, }, @@ -1476,12 +1476,12 @@ func TestSetManifestAvailableCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(ManifestProcessingAvailabilityResultTypeFailed), + Reason: string(AvailabilityResultTypeFailed), ObservedGeneration: 1, }, }, }, - availabilityResTyp: ManifestProcessingAvailabilityResultTypeSkipped, + availabilityResTyp: AvailabilityResultTypeSkipped, inMemberClusterObjGeneration: 2, wantManifestCond: &fleetv1beta1.ManifestCondition{ Conditions: []metav1.Condition{}, diff --git a/pkg/controllers/workgenerator/controller_integration_test.go b/pkg/controllers/workgenerator/controller_integration_test.go index 5fdd7cbae..e388dd05f 100644 --- a/pkg/controllers/workgenerator/controller_integration_test.go +++ b/pkg/controllers/workgenerator/controller_integration_test.go @@ -2726,7 +2726,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(workapplier.AvailabilityResultTypeAvailable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -2942,7 +2942,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", Status: metav1.ConditionTrue, // As explained earlier, for this spec the ConfigMap object is // considered to be untrackable in availability check. - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeNotTrackable), + Reason: string(workapplier.AvailabilityResultTypeNotTrackable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3068,7 +3068,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(workapplier.AvailabilityResultTypeAvailable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3097,7 +3097,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(workapplier.AvailabilityResultTypeNotYetAvailable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3146,7 +3146,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", Condition: metav1.Condition{ Type: string(placementv1beta1.WorkConditionTypeAvailable), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(workapplier.AvailabilityResultTypeNotYetAvailable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3221,7 +3221,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(workapplier.AvailabilityResultTypeAvailable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3250,7 +3250,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(workapplier.AvailabilityResultTypeAvailable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, @@ -3357,7 +3357,7 @@ var _ = Describe("Test Work Generator Controller for clusterResourcePlacement", { Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(workapplier.AvailabilityResultTypeAvailable), ObservedGeneration: 1, Message: "", LastTransitionTime: now, diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index 23aed8944..d0d91cb6f 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -1330,7 +1330,7 @@ func safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResourceIdentifiers [ Condition: metav1.Condition{ Type: string(placementv1beta1.PerClusterAvailableConditionType), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(workapplier.AvailabilityResultTypeNotYetAvailable), ObservedGeneration: failedResourceObservedGeneration, }, }, diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index dbc056753..8825fb3da 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -325,7 +325,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { Condition: metav1.Condition{ Type: string(placementv1beta1.PerClusterAvailableConditionType), Status: metav1.ConditionFalse, - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(workapplier.AvailabilityResultTypeNotYetAvailable), ObservedGeneration: 1, }, }, diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index e03a97c24..e0f038a15 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -1122,7 +1122,7 @@ func verifyWorkPropagationAndMarkAsAvailable(memberClusterName, crpName string, Type: placementv1beta1.WorkConditionTypeAvailable, Status: metav1.ConditionTrue, LastTransitionTime: metav1.Now(), - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeAvailable), + Reason: string(workapplier.AvailabilityResultTypeAvailable), Message: "Set to be available", ObservedGeneration: w.Generation, }) diff --git a/test/upgrade/before/actuals_test.go b/test/upgrade/before/actuals_test.go index 287a72f02..553266fcc 100644 --- a/test/upgrade/before/actuals_test.go +++ b/test/upgrade/before/actuals_test.go @@ -428,7 +428,7 @@ func crpWithOneFailedAvailabilityCheckStatusUpdatedActual( Status: metav1.ConditionFalse, // The new and old applier uses the same reason string to make things // a bit easier. - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(workapplier.AvailabilityResultTypeNotYetAvailable), ObservedGeneration: wantFailedResourceObservedGeneration, }, }, @@ -578,7 +578,7 @@ func crpWithStuckRolloutDueToOneFailedAvailabilityCheckStatusUpdatedActual( Status: metav1.ConditionFalse, // The new and old applier uses the same reason string to make things // a bit easier. - Reason: string(workapplier.ManifestProcessingAvailabilityResultTypeNotYetAvailable), + Reason: string(workapplier.AvailabilityResultTypeNotYetAvailable), ObservedGeneration: failedResourceObservedGeneration, }, }, From cb52d4f3cbb9b0ccae31964a1d3fba76b919d577 Mon Sep 17 00:00:00 2001 From: Wantong Date: Thu, 21 Aug 2025 22:42:44 -0700 Subject: [PATCH 27/38] chore: run resourcePlacement e2e in a separate pipeline (#218) --------- Signed-off-by: Wantong Jiang --- .github/workflows/ci.yml | 10 ++++++++-- test/e2e/README.md | 9 +++++++-- test/e2e/resource_placement_pickall_test.go | 2 +- test/e2e/resource_placement_pickn_test.go | 2 +- 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 136e608be..f9697cf00 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -89,12 +89,16 @@ jobs: strategy: fail-fast: false matrix: - customized-settings: [default, joinleave, custom] + customized-settings: [default, resourceplacement, joinleave, custom] include: - customized-settings: default # to shorten the test duration, set the resource snapshot creation interval to 0 resource-snapshot-creation-minimum-interval: 0m resource-changes-collection-duration: 0m + - customized-settings: resourceplacement + # to shorten the test duration, set the resource snapshot creation interval to 0 + resource-snapshot-creation-minimum-interval: 0m + resource-changes-collection-duration: 0m - customized-settings: joinleave # to shorten the test duration, set the resource snapshot creation interval to 0 resource-snapshot-creation-minimum-interval: 0m @@ -136,7 +140,9 @@ jobs: - name: Run e2e tests run: | if [ "${{ matrix.customized-settings }}" = "default" ]; then - make e2e-tests LABEL_FILTER="!custom && !joinleave" + make e2e-tests LABEL_FILTER="!custom && !joinleave && !resourceplacement" + elif [ "${{ matrix.customized-settings }}" = "resourceplacement" ]; then + make e2e-tests LABEL_FILTER="!custom && resourceplacement" elif [ "${{ matrix.customized-settings }}" = "joinleave" ]; then make e2e-tests LABEL_FILTER="!custom && joinleave" else diff --git a/test/e2e/README.md b/test/e2e/README.md index 01e204a58..883185a2f 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -35,16 +35,21 @@ test suites, follow the steps below: ginkgo --label-filter="!custom" -v -p . ``` - or run the custom configuration e2e tests with the following command + or run the custom configuration e2e tests with the following command: ```sh ginkgo --label-filter="custom" -v -p . ``` - or run tests involving member cluster join/leave scenarios with the following command (serially) + or run tests involving member cluster join/leave scenarios with the following command (serially): ```sh ginkgo --label-filter="joinleave" -v . ``` + or run tests related to resourcePlacement (rp) only with the following command: + ```sh + ginkgo --label-filter="resourceplacement" -v -p . + ``` + or create a launch.json in your vscode workspace. ```yaml { diff --git a/test/e2e/resource_placement_pickall_test.go b/test/e2e/resource_placement_pickall_test.go index a035f71ac..5b3df61e8 100644 --- a/test/e2e/resource_placement_pickall_test.go +++ b/test/e2e/resource_placement_pickall_test.go @@ -31,7 +31,7 @@ import ( "github.com/kubefleet-dev/kubefleet/test/e2e/framework" ) -var _ = Describe("placing namespaced scoped resources using a RP with PickAll policy", func() { +var _ = Describe("placing namespaced scoped resources using a RP with PickAll policy", Label("resourceplacement"), func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) diff --git a/test/e2e/resource_placement_pickn_test.go b/test/e2e/resource_placement_pickn_test.go index a7732bfbd..78d8e4e53 100644 --- a/test/e2e/resource_placement_pickn_test.go +++ b/test/e2e/resource_placement_pickn_test.go @@ -31,7 +31,7 @@ import ( "github.com/kubefleet-dev/kubefleet/test/e2e/framework" ) -var _ = Describe("placing namespaced scoped resources using a RP with PickN policy", func() { +var _ = Describe("placing namespaced scoped resources using a RP with PickN policy", Label("resourceplacement"), func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) rpKey := types.NamespacedName{Name: rpName, Namespace: appNamespace().Name} From ff268e6589729eff82568c669bb86f46394761ba Mon Sep 17 00:00:00 2001 From: Wantong Date: Fri, 22 Aug 2025 01:16:16 -0700 Subject: [PATCH 28/38] test: enable pickFixed tests for RP (#202) Signed-off-by: Wantong Jiang --- test/e2e/resource_placement_pickfixed_test.go | 309 ++++++++++++++++++ 1 file changed, 309 insertions(+) create mode 100644 test/e2e/resource_placement_pickfixed_test.go diff --git a/test/e2e/resource_placement_pickfixed_test.go b/test/e2e/resource_placement_pickfixed_test.go new file mode 100644 index 000000000..21f29225a --- /dev/null +++ b/test/e2e/resource_placement_pickfixed_test.go @@ -0,0 +1,309 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/test/e2e/framework" +) + +var _ = Describe("placing namespaced scoped resources using an RP with PickFixed policy", Label("resourceplacement"), func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Name: rpName, Namespace: appNamespace().Name} + + BeforeEach(OncePerOrdered, func() { + // Create the resources. + createWorkResources() + + // Create the CRP with Namespace-only selector. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterEach(OncePerOrdered, func() { + ensureRPAndRelatedResourcesDeleted(rpKey, allMemberClusters) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("pick some clusters", Ordered, func() { + It("should create rp with pickFixed policy successfully", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on the picked clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1EastProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the picked clusters") + }) + }) + + Context("refreshing target clusters", Ordered, func() { + It("should should create an RP with pickFixed policy successfully", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should place resources on the specified clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1EastProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters") + }) + + It("update RP to pick a different cluster", func() { + rp := &placementv1beta1.ResourcePlacement{} + Eventually(func() error { + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return err + } + rp.Spec.Policy.ClusterNames = []string{memberCluster2EastCanaryName} + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on newly specified clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster2EastCanary) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters") + }) + + It("should remove resources from previously specified clusters", func() { + checkIfRemovedConfigMapFromMemberClusters([]*framework.Cluster{memberCluster1EastProd}) + }) + }) + + Context("pick unhealthy and non-existent clusters", Ordered, func() { + It("should create RP with pickFixed policy targeting unhealthy and non-existent clusters", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster4UnhealthyName, + memberCluster5LeftName, + memberCluster6NonExistentName, + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), nil, []string{memberCluster4UnhealthyName, memberCluster5LeftName, memberCluster6NonExistentName}, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) + + Context("switch to another cluster to simulate stuck deleting works", Ordered, func() { + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + var currentConfigMap corev1.ConfigMap + + It("should create RP with pickFixed policy successfully", func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place resources on specified clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1EastProd) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters") + }) + + It("should add finalizer to work resources on the specified clusters", func() { + Eventually(func() error { + if err := memberCluster1EastProd.KubeClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: appConfigMapName}, ¤tConfigMap); err != nil { + return err + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to get configmap") + // Add finalizer to block deletion to simulate work stuck + controllerutil.AddFinalizer(¤tConfigMap, "example.com/finalizer") + Expect(memberCluster1EastProd.KubeClient.Update(ctx, ¤tConfigMap)).To(Succeed(), "Failed to update configmap with finalizer") + }) + + It("update RP to pick another cluster", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return err + } + rp.Spec.Policy.ClusterNames = []string{memberCluster2EastCanaryName} + return hubClient.Update(ctx, rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP") + }) + + It("should update RP status as expected", func() { + // should successfully apply to the new cluster + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should have a deletion timestamp on work objects", func() { + work := &placementv1beta1.Work{} + workName := fmt.Sprintf("%s.%s-work", rpKey.Namespace, rpName) + Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: fmt.Sprintf("fleet-member-%s", memberCluster1EastProdName), Name: workName}, work)).Should(Succeed(), "Failed to get work") + Expect(work.DeletionTimestamp).ShouldNot(BeNil(), "Work should have a deletion timestamp") + }) + + It("configmap should still exists on previously specified cluster and be in deleting state", func() { + configMap := &corev1.ConfigMap{} + Expect(memberCluster1EastProd.KubeClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: appConfigMapName}, configMap)).Should(Succeed(), "Failed to get configmap") + Expect(configMap.DeletionTimestamp).ShouldNot(BeNil(), "ConfigMap should have a deletion timestamp") + }) + + It("should remove finalizer from work resources on the specified clusters", func() { + configMap := &corev1.ConfigMap{} + Expect(memberCluster1EastProd.KubeClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: appConfigMapName}, configMap)).Should(Succeed(), "Failed to get configmap") + controllerutil.RemoveFinalizer(configMap, "example.com/finalizer") + Expect(memberCluster1EastProd.KubeClient.Update(ctx, configMap)).To(Succeed(), "Failed to update configmap with finalizer") + }) + + It("should remove resources from previously specified clusters", func() { + checkIfRemovedConfigMapFromMemberClusters([]*framework.Cluster{memberCluster1EastProd}) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) +}) From 7fc654ce5353c2d778c157bf08303eb2e5a4b53a Mon Sep 17 00:00:00 2001 From: Zhiying Lin <54013513+zhiying-lin@users.noreply.github.com> Date: Sat, 23 Aug 2025 07:02:47 +0800 Subject: [PATCH 29/38] test: add rp tests with custom config (#221) --- ...ource_placement_with_custom_config_test.go | 209 ++++++++++++++++++ 1 file changed, 209 insertions(+) create mode 100644 test/e2e/resource_placement_with_custom_config_test.go diff --git a/test/e2e/resource_placement_with_custom_config_test.go b/test/e2e/resource_placement_with_custom_config_test.go new file mode 100644 index 000000000..b9d713bcc --- /dev/null +++ b/test/e2e/resource_placement_with_custom_config_test.go @@ -0,0 +1,209 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package e2e + +import ( + "fmt" + "math" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" +) + +var _ = Describe("validating RP when using customized resourceSnapshotCreationMinimumInterval and resourceChangesCollectionDuration", Label("custom"), Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + + // skip entire suite if interval is zero + BeforeEach(OncePerOrdered, func() { + if resourceSnapshotCreationMinimumInterval == 0 && resourceChangesCollectionDuration == 0 { + Skip("Skipping customized-config placement test when RESOURCE_SNAPSHOT_CREATION_MINIMUM_INTERVAL=0m and RESOURCE_CHANGES_COLLECTION_DURATION=0m") + } + + // Create the resources. + createWorkResources() + + // Create the CRP with Namespace-only selector. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + By("should update CRP status as expected") + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + + By("creating RP") + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "ConfigMap", + Version: "v1", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + workNamespaceLabelName: fmt.Sprintf("test-%d", GinkgoParallelProcess()), + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + + }) + + AfterEach(OncePerOrdered, func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, allMemberClusters) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("validating RP status and should not update immediately", func() { + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual([]placementv1beta1.ResourceIdentifier{}, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should not place work resources on member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + + It("updating the resources on the hub", func() { + configMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + configMap := &corev1.ConfigMap{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: appNamespace().Name}, configMap)).Should(Succeed(), "Failed to get the configMap %s", configMapName) + configMap.Labels = map[string]string{ + workNamespaceLabelName: fmt.Sprintf("test-%d", GinkgoParallelProcess()), + } + Expect(hubClient.Update(ctx, configMap)).Should(Succeed(), "Failed to update configMap %s", configMapName) + + }) + + It("should not update RP status immediately", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual([]placementv1beta1.ResourceIdentifier{}, allMemberClusterNames, nil, "0") + Consistently(rpStatusUpdatedActual, resourceSnapshotDelayDuration-3*time.Second, consistentlyInterval).Should(Succeed(), "RP %s status should be unchanged", rpName) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("validating the resourceSnapshots are created", func() { + var resourceSnapshotList placementv1beta1.ResourceSnapshotList + masterResourceSnapshotLabels := client.MatchingLabels{ + placementv1beta1.PlacementTrackingLabel: rpName, + } + Expect(hubClient.List(ctx, &resourceSnapshotList, masterResourceSnapshotLabels, client.InNamespace(appNamespace().Name))).Should(Succeed(), "Failed to list ResourceSnapshots for RP %s", rpName) + Expect(len(resourceSnapshotList.Items)).Should(Equal(2), "Expected 2 ResourceSnapshots for RP %s, got %d", rpName, len(resourceSnapshotList.Items)) + // Use math.Abs to get the absolute value of the time difference in seconds. + snapshotDiffInSeconds := resourceSnapshotList.Items[0].CreationTimestamp.Time.Sub(resourceSnapshotList.Items[1].CreationTimestamp.Time).Seconds() + diff := math.Abs(snapshotDiffInSeconds) + Expect(time.Duration(diff)*time.Second >= resourceSnapshotDelayDuration).To(BeTrue(), "The time difference between ResourceSnapshots should be more than resourceSnapshotDelayDuration") + }) + }) + + Context("validating that RP status can be updated after updating the resources", func() { + It("validating the resourceSnapshots are created", func() { + Eventually(func() error { + var resourceSnapshotList placementv1beta1.ResourceSnapshotList + masterResourceSnapshotLabels := client.MatchingLabels{ + placementv1beta1.PlacementTrackingLabel: rpName, + } + if err := hubClient.List(ctx, &resourceSnapshotList, masterResourceSnapshotLabels, client.InNamespace(appNamespace().Name)); err != nil { + return fmt.Errorf("failed to list ResourceSnapshots for RP %s: %w", rpName, err) + } + if len(resourceSnapshotList.Items) != 1 { + return fmt.Errorf("got %d ResourceSnapshot for RP %s, want 1", len(resourceSnapshotList.Items), rpName) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to wait for ResourceSnapshots to be created") + }) + + It("updating the resources on the hub", func() { + configMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + configMap := &corev1.ConfigMap{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: appNamespace().Name}, configMap)).Should(Succeed(), "Failed to get the configMap %s", configMapName) + configMap.Labels = map[string]string{ + workNamespaceLabelName: fmt.Sprintf("test-%d", GinkgoParallelProcess()), + } + Expect(hubClient.Update(ctx, configMap)).Should(Succeed(), "Failed to update configMap %s", configMapName) + + }) + + It("should update RP status for snapshot 0 as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual([]placementv1beta1.ResourceIdentifier{}, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should update RP status for snapshot 1 as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(rpStatusUpdatedActual, resourceSnapshotDelayDuration+eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("validating the resourceSnapshots are created", func() { + var resourceSnapshotList placementv1beta1.ResourceSnapshotList + masterResourceSnapshotLabels := client.MatchingLabels{ + placementv1beta1.PlacementTrackingLabel: rpName, + } + Expect(hubClient.List(ctx, &resourceSnapshotList, masterResourceSnapshotLabels, client.InNamespace(appNamespace().Name))).Should(Succeed(), "Failed to list ResourceSnapshots for RP %s", rpName) + Expect(len(resourceSnapshotList.Items)).Should(Equal(2), "Expected 2 ResourceSnapshots for RP %s, got %d", rpName, len(resourceSnapshotList.Items)) + // Use math.Abs to get the absolute value of the time difference in seconds. + snapshotDiffInSeconds := resourceSnapshotList.Items[0].CreationTimestamp.Time.Sub(resourceSnapshotList.Items[1].CreationTimestamp.Time).Seconds() + diff := math.Abs(snapshotDiffInSeconds) + Expect(time.Duration(diff)*time.Second >= resourceSnapshotDelayDuration).To(BeTrue(), "The time difference between ResourceSnapshots should be more than resourceSnapshotDelayDuration") + }) + }) +}) From 6313b8d15d086cccac147f6a36af902fd81dabf8 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Mon, 25 Aug 2025 16:06:44 +1000 Subject: [PATCH 30/38] feat: minor improvement to the pod watcher (azure property provider) for better performance in larger clusters (#156) --- pkg/propertyprovider/azure/controllers/pod.go | 11 +++- pkg/propertyprovider/azure/provider.go | 58 +++++++++++++++++++ 2 files changed, 67 insertions(+), 2 deletions(-) diff --git a/pkg/propertyprovider/azure/controllers/pod.go b/pkg/propertyprovider/azure/controllers/pod.go index 98640b974..540032654 100644 --- a/pkg/propertyprovider/azure/controllers/pod.go +++ b/pkg/propertyprovider/azure/controllers/pod.go @@ -52,7 +52,10 @@ func (p *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R klog.V(2).InfoS("Reconciliation ends for pod objects in the Azure property provider", "pod", podRef, "latency", latency) }() - // Retrieve the pod object. + // Retrieve the pod object from cache. + // + // Note that the transform func has removed fields that are irrelevant to the pod watcher + // from the retrieved objects at this moment. pod := &corev1.Pod{} if err := p.Client.Get(ctx, req.NamespacedName, pod); err != nil { // Failed to get the pod object. @@ -86,8 +89,12 @@ func (p *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R // This behavior is consistent with how the Kubernetes CLI tool reports requested capacity // on a specific node (`kubectl describe node` command). // - // Note that the tracker will attempt to track the pod even if it has been marked for deletion. + // The tracker will attempt to track the pod even if it has been marked for deletion (when it + // is actually gone, the pod will be untracked). if len(pod.Spec.NodeName) > 0 && pod.Status.Phase != corev1.PodSucceeded && pod.Status.Phase != corev1.PodFailed { + // The pod watcher has field selectors enabled, which will not see pods that should not + // be tracked (e.g., pods that are not assigned to a node, or pods that are in terminal states). + // The check is added here for completeness reasons. klog.V(2).InfoS("Attempt to track the pod", "pod", podRef) p.PT.AddOrUpdate(pod) } else { diff --git a/pkg/propertyprovider/azure/provider.go b/pkg/propertyprovider/azure/provider.go index 72bd7a2f9..cf86d8c67 100644 --- a/pkg/propertyprovider/azure/provider.go +++ b/pkg/propertyprovider/azure/provider.go @@ -24,12 +24,14 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -88,8 +90,64 @@ var _ propertyprovider.PropertyProvider = &PropertyProvider{} func (p *PropertyProvider) Start(ctx context.Context, config *rest.Config) error { klog.V(2).Info("Starting Azure property provider") + podObj := client.Object(&corev1.Pod{}) mgr, err := ctrl.NewManager(config, ctrl.Options{ Scheme: scheme.Scheme, + Cache: cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + podObj: { + // Set up field selectors so that API server will not send out watch events that + // are not relevant to the pod watcher. This is essentially a trade-off between + // in-memory check overhead and encoding/transmission overhead; for large clusters + // with frequent pod creation/deletion ops, the trade-off seems to be worth it based + // on current experimentation results. + Field: fields.AndSelectors( + fields.OneTermNotEqualSelector("spec.nodeName", ""), + fields.OneTermNotEqualSelector("status.phase", string(corev1.PodSucceeded)), + fields.OneTermNotEqualSelector("status.phase", string(corev1.PodFailed)), + ), + // Drop irrelevant fields from the pod object; this can significantly reduce the + // CPU and memory usage of the pod watcher, as less data is stored in cache. + Transform: func(obj interface{}) (interface{}, error) { + pod, ok := obj.(*corev1.Pod) + if !ok { + return nil, fmt.Errorf("failed to cast object to a pod object") + } + + // The pod watcher only cares about a very limited set of pod fields, + // specifically the pod's current phase, node name, and resource requests. + + // Drop unused metadata fields. + pod.ObjectMeta.Labels = nil + pod.ObjectMeta.Annotations = nil + pod.ObjectMeta.OwnerReferences = nil + pod.ObjectMeta.ManagedFields = nil + + // Drop the rest of the pod status as they are irrelevant to the pod watcher. + pod.Status = corev1.PodStatus{ + Phase: pod.Status.Phase, + } + + // Drop the unwanted pod spec fields. + rebuiltedContainers := make([]corev1.Container, 0, len(pod.Spec.Containers)) + for idx := range pod.Spec.Containers { + c := pod.Spec.Containers[idx] + rebuiltedContainers = append(rebuiltedContainers, corev1.Container{ + Name: c.Name, + Image: c.Image, + Resources: c.Resources, + ResizePolicy: c.ResizePolicy, + }) + } + pod.Spec = corev1.PodSpec{ + NodeName: pod.Spec.NodeName, + Containers: rebuiltedContainers, + } + return pod, nil + }, + }, + }, + }, // Disable metric serving for the Azure property provider controller manager. // // Note that this will not stop the metrics from being collected and exported; as they From c901571e82b55aed605dfb74eeff9225225740be Mon Sep 17 00:00:00 2001 From: Arvind Thirumurugan Date: Mon, 25 Aug 2025 14:02:38 -0700 Subject: [PATCH 31/38] interface: update ClusterResourcePlacementStatus API (#222) --- .../v1beta1/clusterresourceplacement_types.go | 32 +- .../v1beta1/zz_generated.deepcopy.go | 3 +- ...t.io_clusterresourceplacementstatuses.yaml | 1 + ...t.io_clusterresourceplacementstatuses.yaml | 370 +----------------- 4 files changed, 33 insertions(+), 373 deletions(-) create mode 120000 charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml diff --git a/apis/placement/v1beta1/clusterresourceplacement_types.go b/apis/placement/v1beta1/clusterresourceplacement_types.go index 8181aae11..e8ea458a0 100644 --- a/apis/placement/v1beta1/clusterresourceplacement_types.go +++ b/apis/placement/v1beta1/clusterresourceplacement_types.go @@ -1612,14 +1612,15 @@ func (rpl *ResourcePlacementList) GetPlacementObjs() []PlacementObj { // +genclient:Namespaced // +kubebuilder:object:root=true // +kubebuilder:resource:scope="Namespaced",shortName=crps,categories={fleet,fleet-placement} -// +kubebuilder:subresource:status // +kubebuilder:storageversion -// +kubebuilder:printcolumn:JSONPath=`.status.observedResourceIndex`,name="Resource-Index",type=string +// +kubebuilder:printcolumn:JSONPath=`.sourceStatus.observedResourceIndex`,name="Resource-Index",type=string +// +kubebuilder:printcolumn:JSONPath=`.lastUpdatedTime`,name="Last-Updated",type=string // +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterResourcePlacementStatus is a namespaced resource that mirrors the PlacementStatus of a corresponding // ClusterResourcePlacement object. This allows namespace-scoped access to cluster-scoped placement status. +// The LastUpdatedTime field is updated whenever the CRPS object is updated. // // This object will be created within the target namespace that contains resources being managed by the CRP. // When multiple ClusterResourcePlacements target the same namespace, each ClusterResourcePlacementStatus within that @@ -1631,11 +1632,16 @@ type ClusterResourcePlacementStatus struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - // The observed status of ClusterResourcePlacementStatus which mirrors the PlacementStatus of the corresponding ClusterResourcePlacement. - // This includes information about the namespace and resources within that namespace that are being managed by the placement. - // The status will show placement details for resources selected by the ClusterResourcePlacement's ResourceSelectors. - // +kubebuilder:validation:Optional - Status PlacementStatus `json:"status,omitempty"` + // Source status copied from the corresponding ClusterResourcePlacement. + // +kubebuilder:validation:Required + PlacementStatus `json:"sourceStatus,omitempty"` + + // LastUpdatedTime is the timestamp when this CRPS object was last updated. + // This field is set to the current time whenever the CRPS object is created or modified. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + LastUpdatedTime metav1.Time `json:"lastUpdatedTime,omitempty"` } // ClusterResourcePlacementStatusList contains a list of ClusterResourcePlacementStatus. @@ -1647,18 +1653,6 @@ type ClusterResourcePlacementStatusList struct { Items []ClusterResourcePlacementStatus `json:"items"` } -// SetConditions sets the conditions of the ClusterResourcePlacementStatus. -func (m *ClusterResourcePlacementStatus) SetConditions(conditions ...metav1.Condition) { - for _, c := range conditions { - meta.SetStatusCondition(&m.Status.Conditions, c) - } -} - -// GetCondition returns the condition of the ClusterResourcePlacementStatus objects. -func (m *ClusterResourcePlacementStatus) GetCondition(conditionType string) *metav1.Condition { - return meta.FindStatusCondition(m.Status.Conditions, conditionType) -} - func init() { SchemeBuilder.Register(&ClusterResourcePlacement{}, &ClusterResourcePlacementList{}, &ResourcePlacement{}, &ResourcePlacementList{}, &ClusterResourcePlacementStatus{}, &ClusterResourcePlacementStatusList{}) } diff --git a/apis/placement/v1beta1/zz_generated.deepcopy.go b/apis/placement/v1beta1/zz_generated.deepcopy.go index 45163b157..05fef4c3d 100644 --- a/apis/placement/v1beta1/zz_generated.deepcopy.go +++ b/apis/placement/v1beta1/zz_generated.deepcopy.go @@ -834,7 +834,8 @@ func (in *ClusterResourcePlacementStatus) DeepCopyInto(out *ClusterResourcePlace *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Status.DeepCopyInto(&out.Status) + in.PlacementStatus.DeepCopyInto(&out.PlacementStatus) + in.LastUpdatedTime.DeepCopyInto(&out.LastUpdatedTime) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourcePlacementStatus. diff --git a/charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml b/charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml new file mode 120000 index 000000000..967a3a706 --- /dev/null +++ b/charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml @@ -0,0 +1 @@ +../../../../config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml \ No newline at end of file diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml index f881f466c..b49d38120 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml @@ -19,355 +19,13 @@ spec: singular: clusterresourceplacementstatus scope: Namespaced versions: - - name: v1 - schema: - openAPIV3Schema: - description: ClusterResourcePlacementStatus defines the observed state of - the ClusterResourcePlacement object. - properties: - conditions: - description: Conditions is an array of current observed conditions for - ClusterResourcePlacement. - items: - description: Condition contains details for one aspect of the current - state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedResourceIndex: - description: |- - Resource index logically represents the generation of the selected resources. - We take a new snapshot of the selected resources whenever the selection or their content change. - Each snapshot has a different resource index. - One resource snapshot can contain multiple clusterResourceSnapshots CRs in order to store large amount of resources. - To get clusterResourceSnapshot of a given resource index, use the following command: - `kubectl get ClusterResourceSnapshot --selector=kubernetes-fleet.io/resource-index=$ObservedResourceIndex ` - ObservedResourceIndex is the resource index that the conditions in the ClusterResourcePlacementStatus observe. - For example, a condition of `ClusterResourcePlacementWorkSynchronized` type - is observing the synchronization status of the resource snapshot with the resource index $ObservedResourceIndex. - type: string - placementStatuses: - description: |- - PlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. - Each selected cluster according to the latest resource placement is guaranteed to have a corresponding placementStatuses. - In the pickN case, there are N placement statuses where N = NumberOfClusters; Or in the pickFixed case, there are - N placement statuses where N = ClusterNames. - In these cases, some of them may not have assigned clusters when we cannot fill the required number of clusters. - items: - description: ResourcePlacementStatus represents the placement status - of selected resources for one target cluster. - properties: - applicableClusterResourceOverrides: - description: |- - ApplicableClusterResourceOverrides contains a list of applicable ClusterResourceOverride snapshots associated with - the selected resources. - - This field is alpha-level and is for the override policy feature. - items: - type: string - type: array - applicableResourceOverrides: - description: |- - ApplicableResourceOverrides contains a list of applicable ResourceOverride snapshots associated with the selected - resources. - - This field is alpha-level and is for the override policy feature. - items: - description: NamespacedName comprises a resource name, with a - mandatory namespace. - properties: - name: - description: Name is the name of the namespaced scope resource. - type: string - namespace: - description: Namespace is namespace of the namespaced scope - resource. - type: string - required: - - name - - namespace - type: object - type: array - clusterName: - description: |- - ClusterName is the name of the cluster this resource is assigned to. - If it is not empty, its value should be unique cross all placement decisions for the Placement. - type: string - conditions: - description: Conditions is an array of current observed conditions - for ResourcePlacementStatus. - items: - description: Condition contains details for one aspect of the - current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - failedPlacements: - description: |- - FailedPlacements is a list of all the resources failed to be placed to the given cluster or the resource is unavailable. - Note that we only include 100 failed resource placements even if there are more than 100. - This field is only meaningful if the `ClusterName` is not empty. - items: - description: FailedResourcePlacement contains the failure details - of a failed resource placement. - properties: - condition: - description: The failed condition status. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - envelope: - description: Envelope identifies the envelope object that - contains this resource. - properties: - name: - description: Name of the envelope object. - type: string - namespace: - description: Namespace is the namespace of the envelope - object. Empty if the envelope object is cluster scoped. - type: string - type: - default: ConfigMap - description: Type of the envelope object. - enum: - - ConfigMap - type: string - required: - - name - type: object - group: - description: Group is the group name of the selected resource. - type: string - kind: - description: Kind represents the Kind of the selected resources. - type: string - name: - description: Name of the target resource. - type: string - namespace: - description: Namespace is the namespace of the resource. Empty - if the resource is cluster scoped. - type: string - version: - description: Version is the version of the selected resource. - type: string - required: - - condition - - kind - - name - - version - type: object - maxItems: 100 - type: array - type: object - type: array - selectedResources: - description: SelectedResources contains a list of resources selected by - ResourceSelectors. - items: - description: ResourceIdentifier identifies one Kubernetes resource. - properties: - envelope: - description: Envelope identifies the envelope object that contains - this resource. - properties: - name: - description: Name of the envelope object. - type: string - namespace: - description: Namespace is the namespace of the envelope object. - Empty if the envelope object is cluster scoped. - type: string - type: - default: ConfigMap - description: Type of the envelope object. - enum: - - ConfigMap - type: string - required: - - name - type: object - group: - description: Group is the group name of the selected resource. - type: string - kind: - description: Kind represents the Kind of the selected resources. - type: string - name: - description: Name of the target resource. - type: string - namespace: - description: Namespace is the namespace of the resource. Empty if - the resource is cluster scoped. - type: string - version: - description: Version is the version of the selected resource. - type: string - required: - - kind - - name - - version - type: object - type: array - type: object - served: true - storage: false - additionalPrinterColumns: - - jsonPath: .status.observedResourceIndex + - jsonPath: .sourceStatus.observedResourceIndex name: Resource-Index type: string + - jsonPath: .lastUpdatedTime + name: Last-Updated + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date @@ -377,6 +35,7 @@ spec: description: |- ClusterResourcePlacementStatus is a namespaced resource that mirrors the PlacementStatus of a corresponding ClusterResourcePlacement object. This allows namespace-scoped access to cluster-scoped placement status. + The LastUpdatedTime field is updated whenever the CRPS object is updated. This object will be created within the target namespace that contains resources being managed by the CRP. When multiple ClusterResourcePlacements target the same namespace, each ClusterResourcePlacementStatus within that @@ -400,13 +59,16 @@ spec: In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string + lastUpdatedTime: + description: |- + LastUpdatedTime is the timestamp when this CRPS object was last updated. + This field is set to the current time whenever the CRPS object is created or modified. + format: date-time + type: string metadata: type: object - status: - description: |- - The observed status of ClusterResourcePlacementStatus which mirrors the PlacementStatus of the corresponding ClusterResourcePlacement. - This includes information about the namespace and resources within that namespace that are being managed by the placement. - The status will show placement details for resources selected by the ClusterResourcePlacement's ResourceSelectors. + sourceStatus: + description: Source status copied from the corresponding ClusterResourcePlacement. properties: conditions: description: |- @@ -1005,8 +667,10 @@ spec: type: object type: array type: object + required: + - lastUpdatedTime + - sourceStatus type: object served: true storage: true - subresources: - status: {} + subresources: {} From f6c14d1d4ffcc8f104b2f977f6c9df6c14d92c39 Mon Sep 17 00:00:00 2001 From: Zhiying Lin <54013513+zhiying-lin@users.noreply.github.com> Date: Tue, 26 Aug 2025 11:03:48 +0800 Subject: [PATCH 32/38] fix: fix the resource change bug (#220) --------- Signed-off-by: Zhiying Lin --- .../resourcechange_controller.go | 23 +- .../resourcechange_controller_test.go | 250 +++++++++++++++++- 2 files changed, 248 insertions(+), 25 deletions(-) diff --git a/pkg/controllers/resourcechange/resourcechange_controller.go b/pkg/controllers/resourcechange/resourcechange_controller.go index 3738bb8dc..237a74beb 100644 --- a/pkg/controllers/resourcechange/resourcechange_controller.go +++ b/pkg/controllers/resourcechange/resourcechange_controller.go @@ -292,7 +292,7 @@ func (r *Reconciler) getUnstructuredObject(objectKey keys.ClusterWideKey) (runti } // triggerAffectedPlacementsForUpdatedRes find the affected placements for a given updated cluster scoped or namespace scoped resources. -// If the key is namespace scoped, res will be the namespace object. +// If the key is namespace scoped, res will be the namespace object for the clusterResourcePlacement. // If triggerCRP is true, it will trigger the cluster resource placement controller, otherwise it will trigger the resource placement controller. func (r *Reconciler) triggerAffectedPlacementsForUpdatedRes(key keys.ClusterWideKey, res *unstructured.Unstructured, triggerCRP bool) error { if triggerCRP { @@ -326,7 +326,7 @@ func (r *Reconciler) triggerAffectedPlacementsForUpdatedRes(key keys.ClusterWide } // Find all matching CRPs. - matchedCRPs := collectAllAffectedPlacementsV1Beta1(key.Namespace == "", res, convertToClusterResourcePlacements(crpList)) + matchedCRPs := collectAllAffectedPlacementsV1Beta1(key, res, convertToClusterResourcePlacements(crpList)) if len(matchedCRPs) == 0 { klog.V(2).InfoS("Change in object does not affect any v1beta1 cluster resource placement", "obj", key) return nil @@ -350,7 +350,7 @@ func (r *Reconciler) triggerAffectedPlacementsForUpdatedRes(key keys.ClusterWide } // Find all matching ResourcePlacements. - matchedRPs := collectAllAffectedPlacementsV1Beta1(key.Namespace == "", res, convertToResourcePlacements(rpList)) + matchedRPs := collectAllAffectedPlacementsV1Beta1(key, res, convertToResourcePlacements(rpList)) if len(matchedRPs) == 0 { klog.V(2).InfoS("Change in object does not affect any resource placement", "obj", key) return nil @@ -408,17 +408,18 @@ func isSelectNamespaceOnly(selector placementv1beta1.ResourceSelectorTerm) bool return selector.Group == "" && selector.Version == "v1" && selector.Kind == "Namespace" && selector.SelectionScope == placementv1beta1.NamespaceOnly } -// collectAllAffectedPlacementsV1Beta1 goes through all v1beta1 placements and collect the ones whose resource selector matches the object given its gvk -func collectAllAffectedPlacementsV1Beta1(isClusterScoped bool, res *unstructured.Unstructured, placementList []placementv1beta1.PlacementObj) map[string]bool { +// collectAllAffectedPlacementsV1Beta1 goes through all v1beta1 placements and collect the ones whose resource selector matches the object given its gvk. +// If the key is namespace scoped, res will be the namespace object for the clusterResourcePlacement. +func collectAllAffectedPlacementsV1Beta1(key keys.ClusterWideKey, res *unstructured.Unstructured, placementList []placementv1beta1.PlacementObj) map[string]bool { placements := make(map[string]bool) for _, placement := range placementList { match := false // find the placements selected this resource (before this change) - // For the resource placement, we do not compare the namespace in the selectedResources status. - // We assume the namespace is the same as the resource placement's namespace. + // If the namespaced scope resource is in the clusterResourcePlacement status and placement is namespaceOnly, + // the placement should be triggered to create a new resourceSnapshot. for _, selectedRes := range placement.GetPlacementStatus().SelectedResources { - if selectedRes.Group == res.GroupVersionKind().Group && selectedRes.Version == res.GroupVersionKind().Version && - selectedRes.Kind == res.GroupVersionKind().Kind && selectedRes.Name == res.GetName() { + if selectedRes.Group == key.GroupVersionKind().Group && selectedRes.Version == key.GroupVersionKind().Version && + selectedRes.Kind == key.GroupVersionKind().Kind && selectedRes.Name == key.Name && selectedRes.Namespace == key.Namespace { placements[placement.GetName()] = true match = true break @@ -433,9 +434,9 @@ func collectAllAffectedPlacementsV1Beta1(isClusterScoped bool, res *unstructured // will validate the resource placement's namespace matches the resource's namespace. for _, selector := range placement.GetPlacementSpec().ResourceSelectors { // For the clusterResourcePlacement, we skip the namespace scoped resources if the placement is cluster scoped. - if !isClusterScoped && isSelectNamespaceOnly(selector) && placement.GetNamespace() == "" { + if key.Namespace != "" && isSelectNamespaceOnly(selector) && placement.GetNamespace() == "" { // If the selector is namespace only, we skip the namespace scoped resources. - klog.V(2).InfoS("Skipping namespace scoped resource for namespace only selector", "obj", klog.KRef(res.GetNamespace(), res.GetName()), "selector", selector, "placement", klog.KObj(placement)) + klog.V(2).InfoS("Skipping namespace scoped resource for namespace only selector", "key", key, "obj", klog.KRef(res.GetNamespace(), res.GetName()), "selector", selector, "placement", klog.KObj(placement)) continue } diff --git a/pkg/controllers/resourcechange/resourcechange_controller_test.go b/pkg/controllers/resourcechange/resourcechange_controller_test.go index 7f31f548c..02a2d41f9 100644 --- a/pkg/controllers/resourcechange/resourcechange_controller_test.go +++ b/pkg/controllers/resourcechange/resourcechange_controller_test.go @@ -671,20 +671,41 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: "test-nameSpace", + Name: "test-namespace", Labels: map[string]string{ "region": rand.String(10), "version": rand.String(4), }, }, } + + // Common ResourceIdentifier for Namespace tests (cluster-scoped) + namespaceResourceIdentifier := fleetv1alpha1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-namespace", + } + + // Common ResourceIdentifier for namespace-scoped resource tests + namespaceScopedResourceIdentifier := fleetv1alpha1.ResourceIdentifier{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "test-namespace", + } + tests := map[string]struct { - isClusterScoped bool - res *corev1.Namespace - crpList []*placementv1beta1.ClusterResourcePlacement - wantCRP map[string]bool + key keys.ClusterWideKey + res *corev1.Namespace + crpList []*placementv1beta1.ClusterResourcePlacement + wantCRP map[string]bool }{ "match a place with the matching label": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -708,6 +729,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "Skip a placement with selecting ns only": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceScopedResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -732,8 +756,10 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: make(map[string]bool), }, "does not match a place with no selector": { - isClusterScoped: true, - res: matchRes, + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, + res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { ObjectMeta: metav1.ObjectMeta{ @@ -747,8 +773,10 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: make(map[string]bool), }, "match a place with the name selector": { - isClusterScoped: true, - res: matchRes, + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, + res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { ObjectMeta: metav1.ObjectMeta{ @@ -769,6 +797,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "match a place with a match Expressions label": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -797,6 +828,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "match a place with a single matching label": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -820,6 +854,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "does not match a place with a miss matching label": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -847,6 +884,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: make(map[string]bool), }, "match a place with multiple matching resource selectors": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -883,6 +923,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "match a place with only one matching resource selectors": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -920,6 +963,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "match a place with a miss matching label but was selected": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -964,6 +1010,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "does not match a place with a miss matching label and was not selected": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -1000,6 +1049,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: make(map[string]bool), }, "don't select placement with name, nil label selector for namespace with different name": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -1021,6 +1073,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: make(map[string]bool), }, "select placement with empty name, nil label selector for namespace": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -1041,6 +1096,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"resource-selected": true}, }, "match placement through status SelectedResources when selector does not match": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -1079,6 +1137,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing wantCRP: map[string]bool{"status-matched-placement": true}, }, "does not match placement with different GVK selector": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceResourceIdentifier, + }, res: matchRes, crpList: []*placementv1beta1.ClusterResourcePlacement{ { @@ -1101,6 +1162,124 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing }, wantCRP: make(map[string]bool), }, + "match ClusterResourcePlacement with previously selected resource": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceScopedResourceIdentifier, + }, + res: matchRes, + crpList: []*placementv1beta1.ClusterResourcePlacement{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "crp-with-selected-resource", + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: corev1.GroupName, + Version: "v1", + Kind: matchRes.Kind, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "nonexistent": "label", + }, + }, + }, + }, + }, + Status: placementv1beta1.PlacementStatus{ + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "test-namespace", + }, + }, + }, + }, + }, + wantCRP: map[string]bool{"crp-with-selected-resource": true}, + }, + "match ClusterResourcePlacement (even with namespace only) with previously selected resource": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceScopedResourceIdentifier, + }, + res: matchRes, + crpList: []*placementv1beta1.ClusterResourcePlacement{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "crp-with-selected-resource", + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: corev1.GroupName, + Version: "v1", + Kind: matchRes.Kind, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "nonexistent": "label", + }, + }, + SelectionScope: placementv1beta1.NamespaceOnly, + }, + }, + }, + Status: placementv1beta1.PlacementStatus{ + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "test-namespace", + }, + }, + }, + }, + }, + wantCRP: map[string]bool{"crp-with-selected-resource": true}, + }, + "does not match ClusterResourcePlacement with previously selected resource when namespace is different": { + key: keys.ClusterWideKey{ + ResourceIdentifier: namespaceScopedResourceIdentifier, + }, + res: matchRes, + crpList: []*placementv1beta1.ClusterResourcePlacement{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "crp-with-selected-resource", + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: corev1.GroupName, + Version: "v1", + Kind: matchRes.Kind, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "nonexistent": "label", + }, + }, + }, + }, + }, + Status: placementv1beta1.PlacementStatus{ + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "different-namespace", + }, + }, + }, + }, + }, + wantCRP: make(map[string]bool), + }, } for name, tt := range tests { t.Run(name, func(t *testing.T) { @@ -1111,7 +1290,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ClusterResourcePlacement(t *testing } uRes, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(tt.res) clusterPlacements := convertToClusterResourcePlacements(crpList) - got := collectAllAffectedPlacementsV1Beta1(tt.isClusterScoped, &unstructured.Unstructured{Object: uRes}, clusterPlacements) + got := collectAllAffectedPlacementsV1Beta1(tt.key, &unstructured.Unstructured{Object: uRes}, clusterPlacements) if !reflect.DeepEqual(got, tt.wantCRP) { t.Errorf("test case `%s` got = %v, wantResult %v", name, got, tt.wantCRP) } @@ -1142,12 +1321,25 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Kind: "Deployment", }) + // Common ResourceIdentifier for Deployment tests + deploymentResourceIdentifier := fleetv1alpha1.ResourceIdentifier{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "test-namespace", + } + tests := map[string]struct { + key keys.ClusterWideKey res *unstructured.Unstructured rpList []*placementv1beta1.ResourcePlacement wantRP map[string]bool }{ "match ResourcePlacement with the matching label": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1174,6 +1366,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { wantRP: map[string]bool{"resource-selected": true}, }, "does not match ResourcePlacement with no selector": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1189,6 +1384,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { wantRP: make(map[string]bool), }, "match ResourcePlacement with the name selector": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1212,6 +1410,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { wantRP: map[string]bool{"resource-selected": true}, }, "does not match ResourcePlacement with different name": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1234,6 +1435,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { wantRP: make(map[string]bool), }, "match ResourcePlacement with previously selected resource": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1242,6 +1446,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Namespace: "test-namespace", }, Spec: placementv1beta1.PlacementSpec{ + // Selector that does not match the resource ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "apps", @@ -1256,6 +1461,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { }, }, Status: placementv1beta1.PlacementStatus{ + // But the resource is in the selected resources status SelectedResources: []placementv1beta1.ResourceIdentifier{ { Group: "apps", @@ -1271,6 +1477,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { wantRP: map[string]bool{"resource-selected": true}, }, "select ResourcePlacement with empty name, nil label selector for deployment": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1292,6 +1501,9 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { wantRP: map[string]bool{"resource-selected": true}, }, "does not match ResourcePlacement with different GVK selector": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1318,7 +1530,10 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { }, wantRP: make(map[string]bool), }, - "match ResourcePlacement through status SelectedResources when selector does not match": { + "does not match ResourcePlacement through status SelectedResources when name is different": { + key: keys.ClusterWideKey{ + ResourceIdentifier: deploymentResourceIdentifier, + }, res: matchRes, rpList: []*placementv1beta1.ResourcePlacement{ { @@ -1348,14 +1563,14 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { Group: "apps", Version: "v1", Kind: "Deployment", - Name: "test-deployment", + Name: "different-deployment", Namespace: "test-namespace", }, }, }, }, }, - wantRP: map[string]bool{"status-matched-rp": true}, + wantRP: make(map[string]bool), }, } @@ -1367,7 +1582,7 @@ func TestCollectAllAffectedPlacementsV1Beta1_ResourcePlacement(t *testing.T) { rpList = append(rpList, &unstructured.Unstructured{Object: uMap}) } resourcePlacements := convertToResourcePlacements(rpList) - got := collectAllAffectedPlacementsV1Beta1(false, tt.res, resourcePlacements) + got := collectAllAffectedPlacementsV1Beta1(tt.key, tt.res, resourcePlacements) if !reflect.DeepEqual(got, tt.wantRP) { t.Errorf("test case `%s` got = %v, wantResult %v", name, got, tt.wantRP) } @@ -2487,6 +2702,13 @@ func TestHandleDeletedResource(t *testing.T) { Name: "test-namespace", Namespace: "", }, + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "test-namespace", + }, }, }, } From 4d3904fab2124436ef8bc1acb97e00763bc4f6fd Mon Sep 17 00:00:00 2001 From: Arvind Thirumurugan Date: Tue, 26 Aug 2025 11:50:16 -0700 Subject: [PATCH 33/38] feat: make StatusReportingScope immutable (#207) --- .../v1beta1/clusterresourceplacement_types.go | 3 +- ...es-fleet.io_clusterresourceplacements.yaml | 3 + ...ubernetes-fleet.io_resourceplacements.yaml | 5 - .../api_validation_integration_test.go | 376 +++++++++++++++--- 4 files changed, 332 insertions(+), 55 deletions(-) diff --git a/apis/placement/v1beta1/clusterresourceplacement_types.go b/apis/placement/v1beta1/clusterresourceplacement_types.go index e8ea458a0..a76a73758 100644 --- a/apis/placement/v1beta1/clusterresourceplacement_types.go +++ b/apis/placement/v1beta1/clusterresourceplacement_types.go @@ -114,6 +114,8 @@ type ClusterResourcePlacement struct { // The desired state of ClusterResourcePlacement. // +kubebuilder:validation:Required // +kubebuilder:validation:XValidation:rule="!((has(oldSelf.policy) && !has(self.policy)) || (has(oldSelf.policy) && has(self.policy) && has(self.policy.placementType) && has(oldSelf.policy.placementType) && self.policy.placementType != oldSelf.policy.placementType))",message="placement type is immutable" + // +kubebuilder:validation:XValidation:rule="!(self.statusReportingScope == 'NamespaceAccessible' && size(self.resourceSelectors.filter(x, x.kind == 'Namespace')) != 1)",message="when statusReportingScope is NamespaceAccessible, exactly one resourceSelector with kind 'Namespace' is required" + // +kubebuilder:validation:XValidation:rule="!has(oldSelf.statusReportingScope) || self.statusReportingScope == oldSelf.statusReportingScope",message="statusReportingScope is immutable" Spec PlacementSpec `json:"spec"` // The observed status of ClusterResourcePlacement. @@ -122,7 +124,6 @@ type ClusterResourcePlacement struct { } // PlacementSpec defines the desired state of ClusterResourcePlacement and ResourcePlacement. -// +kubebuilder:validation:XValidation:rule="!(self.statusReportingScope == 'NamespaceAccessible' && size(self.resourceSelectors.filter(x, x.kind == 'Namespace')) != 1)",message="when statusReportingScope is NamespaceAccessible, exactly one resourceSelector with kind 'Namespace' is required" type PlacementSpec struct { // ResourceSelectors is an array of selectors used to select cluster scoped resources. The selectors are `ORed`. // You can have 1-100 selectors. diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml index d7cb2bd4e..186898038 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml @@ -2022,6 +2022,9 @@ spec: resourceSelector with kind 'Namespace' is required rule: '!(self.statusReportingScope == ''NamespaceAccessible'' && size(self.resourceSelectors.filter(x, x.kind == ''Namespace'')) != 1)' + - message: statusReportingScope is immutable + rule: '!has(oldSelf.statusReportingScope) || self.statusReportingScope + == oldSelf.statusReportingScope' status: description: The observed status of ClusterResourcePlacement. properties: diff --git a/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml b/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml index 0583faa71..743ad2355 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml @@ -948,11 +948,6 @@ spec: required: - resourceSelectors type: object - x-kubernetes-validations: - - message: when statusReportingScope is NamespaceAccessible, exactly one - resourceSelector with kind 'Namespace' is required - rule: '!(self.statusReportingScope == ''NamespaceAccessible'' && size(self.resourceSelectors.filter(x, - x.kind == ''Namespace'')) != 1)' status: description: The observed status of ResourcePlacement. properties: diff --git a/test/apis/placement/v1beta1/api_validation_integration_test.go b/test/apis/placement/v1beta1/api_validation_integration_test.go index 776a689ec..57f60e4bf 100644 --- a/test/apis/placement/v1beta1/api_validation_integration_test.go +++ b/test/apis/placement/v1beta1/api_validation_integration_test.go @@ -27,7 +27,6 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" @@ -42,9 +41,11 @@ const ( invalidupdateRunStageNameTemplate = "stage012345678901234567890123456789012345678901234567890123456789%d%d" approveRequestNameTemplate = "test-approve-request-%d" crpNameTemplate = "test-crp-%d" + rpNameTemplate = "test-rp-%d" croNameTemplate = "test-cro-%d" roNameTemplate = "test-ro-%d" testNamespace = "test-ns" + unknownScope = "UnknownScope" ) // createValidClusterResourceOverride creates a valid ClusterResourceOverride for testing purposes. @@ -151,7 +152,6 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ClusterResourcePlacement with nil policy", func() { - Expect(hubClient.Get(ctx, types.NamespacedName{Name: crpName}, &crp)).Should(Succeed(), "Get CRP call failed") crp.Spec.Policy = nil err := hubClient.Update(ctx, &crp) var statusErr *k8sErrors.StatusError @@ -160,7 +160,6 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ClusterResourcePlacement with different placement type", func() { - Expect(hubClient.Get(ctx, types.NamespacedName{Name: crpName}, &crp)).Should(Succeed(), "Get CRP call failed") crp.Spec.Policy.PlacementType = placementv1beta1.PickAllPlacementType err := hubClient.Update(ctx, &crp) var statusErr *k8sErrors.StatusError @@ -304,13 +303,51 @@ var _ = Describe("Test placement v1beta1 API validation", func() { } Expect(hubClient.Create(ctx, &crp)).Should(Succeed()) }) + + It("should allow creation of ClusterResourcePlacement with empty string as StatusReportingScope and multiple namespace selectors plus other cluster-scoped resources", func() { + crp = placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns-1", + }, + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns-2", + }, + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-cluster-role", + }, + { + Group: "", + Version: "v1", + Kind: "PersistentVolume", + Name: "test-pv", + }, + }, + StatusReportingScope: "", // defaults to ClusterScopeOnly. + }, + } + Expect(hubClient.Create(ctx, &crp)).Should(Succeed()) + }) }) Context("Test ClusterResourcePlacement StatusReportingScope validation - create, deny cases", func() { var crp placementv1beta1.ClusterResourcePlacement crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) - It("should deny creation of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible and multiple namespace selectors", func() { + It("should deny creation of ClusterResourcePlacement with Unknown StatusReportingScope and multiple namespace selectors", func() { crp = placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, @@ -323,17 +360,34 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Kind: "Namespace", Name: "test-ns-1", }, + }, + StatusReportingScope: unknownScope, // Invalid scope + }, + } + err := hubClient.Create(ctx, &crp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("supported values: \"ClusterScopeOnly\", \"NamespaceAccessible\"")) + }) + + It("should deny creation of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible and multiple namespace selectors", func() { + crp = placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ { Group: "", Version: "v1", Kind: "Namespace", - Name: "test-ns-2", + Name: "test-ns-1", }, { - Group: "rbac.authorization.k8s.io", + Group: "", Version: "v1", - Kind: "ClusterRole", - Name: "test-cluster-role", + Kind: "Namespace", + Name: "test-ns-2", }, }, StatusReportingScope: placementv1beta1.NamespaceAccessible, @@ -375,7 +429,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) }) - Context("Test ClusterResourcePlacement StatusReportingScope validation - update cases", func() { + Context("Test ClusterResourcePlacement ClusterScopeOnly StatusReportingScope validation - update cases", func() { var crp placementv1beta1.ClusterResourcePlacement crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) @@ -393,6 +447,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: "test-ns-1", }, }, + // By default, StatusReportingScope is ClusterScopeOnly }, } Expect(hubClient.Create(ctx, &crp)).Should(Succeed()) @@ -402,31 +457,26 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Expect(hubClient.Delete(ctx, &crp)).Should(Succeed()) }) - It("should allow update of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible, one namespace selector", func() { - crp.Spec.StatusReportingScope = placementv1beta1.NamespaceAccessible + It("should allow empty string for StatusReportingScope in a ClusterResourcePlacement when StatusReportingScope is not set", func() { + Expect(crp.Spec.StatusReportingScope).To(Equal(placementv1beta1.ClusterScopeOnly), "CRP should have default StatusReportingScope ClusterScopeOnly") + crp.Spec.StatusReportingScope = "" // Empty string should default to ClusterScopeOnly Expect(hubClient.Update(ctx, &crp)).Should(Succeed()) + Expect(crp.Spec.StatusReportingScope).To(Equal(placementv1beta1.ClusterScopeOnly), "CRP should have default StatusReportingScope ClusterScopeOnly") }) - It("should allow update of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible, one namespace plus other cluster-scoped resources", func() { + It("should allow update of ClusterResourcePlacement which has default StatusReportingScope, multiple namespace resource selectors", func() { crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ResourceSelectorTerm{ - { - Group: "rbac.authorization.k8s.io", - Version: "v1", - Kind: "ClusterRole", - Name: "test-cluster-role", - }, { Group: "", Version: "v1", - Kind: "PersistentVolume", - Name: "test-pv", + Kind: "Namespace", + Name: "test-ns-2", }, }...) - crp.Spec.StatusReportingScope = placementv1beta1.NamespaceAccessible Expect(hubClient.Update(ctx, &crp)).Should(Succeed()) }) - It("should allow update of ClusterResourcePlacement with StatusReportingScope ClusterScopeOnly, multiple namespace selectors", func() { + It("should allow update of ClusterResourcePlacement with StatusReportingScope ClusterScopeOnly, multiple namespace resource selectors", func() { crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ResourceSelectorTerm{ { Group: "", @@ -434,31 +484,58 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Kind: "Namespace", Name: "test-ns-2", }, - { - Group: "rbac.authorization.k8s.io", - Version: "v1", - Kind: "ClusterRole", - Name: "test-cluster-role", - }, - { - Group: "", - Version: "v1", - Kind: "PersistentVolume", - Name: "test-pv", - }, }...) crp.Spec.StatusReportingScope = placementv1beta1.ClusterScopeOnly Expect(hubClient.Update(ctx, &crp)).Should(Succeed()) }) - It("should allow update of ClusterResourcePlacement with default StatusReportingScope, multiple namespace selectors", func() { - crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - Name: "test-ns-2", + It("should deny update of ClusterResourcePlacement StatusReportingScope to NamespaceAccessible due to immutability", func() { + crp.Spec.StatusReportingScope = placementv1beta1.NamespaceAccessible + err := hubClient.Update(ctx, &crp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("statusReportingScope is immutable")) + }) + + It("should deny update of ClusterResourcePlacement StatusReportingScope to unknown scope", func() { + crp.Spec.StatusReportingScope = unknownScope // Invalid scope + err := hubClient.Update(ctx, &crp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("supported values: \"ClusterScopeOnly\", \"NamespaceAccessible\"")) + }) + }) + + Context("Test ClusterResourcePlacement NamespaceAccessible StatusReportingScope validation - update cases", func() { + var crp placementv1beta1.ClusterResourcePlacement + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + + BeforeEach(func() { + crp = placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns-1", + }, + }, + StatusReportingScope: placementv1beta1.NamespaceAccessible, + }, + } + Expect(hubClient.Create(ctx, &crp)).Should(Succeed()) + }) + + AfterEach(func() { + Expect(hubClient.Delete(ctx, &crp)).Should(Succeed()) + }) + + It("should allow update of ClusterResourcePlacement with StatusReportingScope NamespaceAccessible, one namespace plus other cluster-scoped resources", func() { + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, []placementv1beta1.ResourceSelectorTerm{ { Group: "rbac.authorization.k8s.io", Version: "v1", @@ -483,14 +560,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Kind: "Namespace", Name: "test-ns-2", }, - { - Group: "rbac.authorization.k8s.io", - Version: "v1", - Kind: "ClusterRole", - Name: "test-cluster-role", - }, }...) - crp.Spec.StatusReportingScope = placementv1beta1.NamespaceAccessible err := hubClient.Update(ctx, &crp) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) @@ -512,12 +582,220 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: "test-pv", }, } - crp.Spec.StatusReportingScope = placementv1beta1.NamespaceAccessible err := hubClient.Update(ctx, &crp) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("when statusReportingScope is NamespaceAccessible, exactly one resourceSelector with kind 'Namespace' is required")) }) + + It("should deny update of ClusterResourcePlacement StatusReportingScope to ClusterScopeOnly due to immutability", func() { + crp.Spec.StatusReportingScope = placementv1beta1.ClusterScopeOnly + err := hubClient.Update(ctx, &crp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("statusReportingScope is immutable")) + }) + + It("should deny update of ClusterResourcePlacement StatusReportingScope to empty string", func() { + crp.Spec.StatusReportingScope = "" + err := hubClient.Update(ctx, &crp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("statusReportingScope is immutable")) + }) + + It("should deny update of ClusterResourcePlacement StatusReportingScope to unknown scope", func() { + crp.Spec.StatusReportingScope = unknownScope // Invalid scope + err := hubClient.Update(ctx, &crp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("supported values: \"ClusterScopeOnly\", \"NamespaceAccessible\"")) + }) + }) + + Context("Test ResourcePlacement StatusReportingScope validation, allow cases", func() { + var rp placementv1beta1.ResourcePlacement + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + + AfterEach(func() { + Expect(hubClient.Delete(ctx, &rp)).Should(Succeed()) + }) + + It("should allow creation of ResourcePlacement with StatusReportingScope NamespaceAccessible, with no namespace resource selected", func() { + rp = placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm-1", + }, + { + Group: "", + Version: "v1", + Kind: "Secret", + Name: "test-secret", + }, + }, + StatusReportingScope: placementv1beta1.NamespaceAccessible, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed()) + }) + + It("should allow creation of ResourcePlacement with StatusReportingScope ClusterScopeOnly", func() { + rp = placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + { + Group: "", + Version: "v1", + Kind: "Secret", + Name: "test-secret", + }, + }, + StatusReportingScope: placementv1beta1.ClusterScopeOnly, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed()) + }) + + It("should allow creation of ResourcePlacement with StatusReportingScope set to empty string", func() { + rp = placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + }, + StatusReportingScope: "", + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed()) + }) + + It("should allow creation of ResourcePlacement with StatusReportingScope not specified", func() { + rp = placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + }, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed()) + }) + + It("should allow update of ResourcePlacement StatusReportingScope, no immutability constraint", func() { + rp = placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + }, + StatusReportingScope: placementv1beta1.ClusterScopeOnly, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed()) + rp.Spec.StatusReportingScope = placementv1beta1.NamespaceAccessible + Expect(hubClient.Update(ctx, &rp)).Should(Succeed()) + }) + }) + + Context("Test ResourcePlacement StatusReportingScope validation, deny cases", func() { + var rp placementv1beta1.ResourcePlacement + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + + It("should deny creation of ResourcePlacement with Unknown StatusReportingScope value", func() { + rp = placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + }, + StatusReportingScope: unknownScope, // Invalid scope + }, + } + err := hubClient.Create(ctx, &rp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create RP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("supported values: \"ClusterScopeOnly\", \"NamespaceAccessible\"")) + }) + + It("should deny update of ResourcePlacement StatusReportingScope to unknown scope due to enum validation", func() { + rp = placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-cm", + }, + }, + StatusReportingScope: placementv1beta1.ClusterScopeOnly, + }, + } + Expect(hubClient.Create(ctx, &rp)).Should(Succeed()) + rp.Spec.StatusReportingScope = unknownScope // Invalid scope - should fail due to enum validation + err := hubClient.Update(ctx, &rp) + var statusErr *k8sErrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update RP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("supported values: \"ClusterScopeOnly\", \"NamespaceAccessible\"")) + + // Cleanup after the test. + Expect(hubClient.Delete(ctx, &rp)).Should(Succeed()) + }) }) Context("Test ClusterPlacementDisruptionBudget API validation - valid cases", func() { From 46e58012e76b59c3a059d1bf394b450bc6455684 Mon Sep 17 00:00:00 2001 From: Yoo Bin Shin <44984496+nibooy@users.noreply.github.com> Date: Tue, 26 Aug 2025 15:46:05 -0400 Subject: [PATCH 34/38] fix: fix azure msi client to not reuse tcp connection (#223) --- pkg/authtoken/providers/azure/azure_msi.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/pkg/authtoken/providers/azure/azure_msi.go b/pkg/authtoken/providers/azure/azure_msi.go index 4c8decaca..f36c8c1d5 100644 --- a/pkg/authtoken/providers/azure/azure_msi.go +++ b/pkg/authtoken/providers/azure/azure_msi.go @@ -18,6 +18,7 @@ package azure import ( "context" "fmt" + "net/http" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" @@ -50,7 +51,14 @@ func New(clientID, scope string) authtoken.Provider { // FetchToken gets a new token to make request to the associated fleet' hub cluster. func (a *AuthTokenProvider) FetchToken(ctx context.Context) (authtoken.AuthToken, error) { token := authtoken.AuthToken{} - opts := &azidentity.ManagedIdentityCredentialOptions{ID: azidentity.ClientID(a.ClientID)} + + httpClient := &http.Client{} + opts := &azidentity.ManagedIdentityCredentialOptions{ + ClientOptions: azcore.ClientOptions{ + Transport: httpClient, + }, + ID: azidentity.ClientID(a.ClientID), + } klog.V(2).InfoS("FetchToken", "client ID", a.ClientID) credential, err := azidentity.NewManagedIdentityCredential(opts) @@ -68,6 +76,16 @@ func (a *AuthTokenProvider) FetchToken(ctx context.Context) (authtoken.AuthToken }) if err != nil { klog.ErrorS(err, "Failed to GetToken", "scope", a.Scope) + // We may race at startup with a sidecar which inserts an iptables rule + // to intercept IMDS calls. If we get here before such an iptables rule + // is inserted, we will inadvertently connect to real IMDS, which won't + // be able to service our request. IMDS does not set 'Connection: + // close' on 400 errors. Default Go HTTP client behavior will keep the + // underlying TCP connection open for reuse, unaffected by iptables, + // causing all further requests to continue to be sent to real IMDS and + // fail. If an error is returned from the IMDS call, explicitly close the + // connection used by the HTTP client. + httpClient.CloseIdleConnections() } return err }) From 96f4cb6978bd420c3a2aa62215e477db50332071 Mon Sep 17 00:00:00 2001 From: Wantong Date: Tue, 26 Aug 2025 19:17:11 -0700 Subject: [PATCH 35/38] test: support override e2e tests for RP (#224) --------- Signed-off-by: Wantong Jiang --- test/e2e/actuals_test.go | 198 +++- test/e2e/placement_cro_test.go | 243 +++++ test/e2e/placement_ro_test.go | 142 ++- test/e2e/resource_placement_ro_test.go | 1174 ++++++++++++++++++++++++ test/e2e/utils_test.go | 25 +- 5 files changed, 1749 insertions(+), 33 deletions(-) create mode 100644 test/e2e/resource_placement_ro_test.go diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index d0d91cb6f..9f8d18df9 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -217,6 +217,22 @@ func placementScheduleFailedConditions(placementKey types.NamespacedName, genera } } +func placementOverrideFailedConditions(placementKey types.NamespacedName, generation int64) []metav1.Condition { + if placementKey.Namespace == "" { + return crpOverrideFailedConditions(generation) + } else { + return rpOverrideFailedConditions(generation) + } +} + +func placementWorkSynchronizedFailedConditions(placementKey types.NamespacedName, generation int64, hasOverrides bool) []metav1.Condition { + if placementKey.Namespace == "" { + return crpWorkSynchronizedFailedConditions(generation, hasOverrides) + } else { + return rpWorkSynchronizedFailedConditions(generation, hasOverrides) + } +} + func rpRolloutCompletedConditions(generation int64, hasOverride bool) []metav1.Condition { overrideConditionReason := condition.OverrideNotSpecifiedReason if hasOverride { @@ -325,6 +341,62 @@ func rpScheduledConditions(generation int64) []metav1.Condition { } } +func rpOverrideFailedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementOverriddenConditionType), + Status: metav1.ConditionFalse, + Reason: condition.OverriddenFailedReason, + ObservedGeneration: generation, + }, + } +} + +func rpWorkSynchronizedFailedConditions(generation int64, hasOverrides bool) []metav1.Condition { + overridenCondReason := condition.OverrideNotSpecifiedReason + if hasOverrides { + overridenCondReason = condition.OverriddenSucceededReason + } + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: overridenCondReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementWorkSynchronizedConditionType), + Status: metav1.ConditionFalse, + Reason: condition.WorkNotSynchronizedYetReason, + ObservedGeneration: generation, + }, + } +} + func crpScheduledConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { @@ -986,31 +1058,54 @@ func crpStatusWithOverrideUpdatedActual( wantObservedResourceIndex string, wantClusterResourceOverrides []string, wantResourceOverrides []placementv1beta1.NamespacedName) func() error { - crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())} + return placementStatusWithOverrideUpdatedActual(crpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, + wantObservedResourceIndex, wantClusterResourceOverrides, wantResourceOverrides) +} + +func rpStatusWithOverrideUpdatedActual( + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + wantSelectedClusters []string, + wantObservedResourceIndex string, + wantClusterResourceOverrides []string, + wantResourceOverrides []placementv1beta1.NamespacedName) func() error { + rpKey := types.NamespacedName{Name: fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()), Namespace: appNamespace().Name} + return placementStatusWithOverrideUpdatedActual(rpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, + wantObservedResourceIndex, wantClusterResourceOverrides, wantResourceOverrides) +} + +func placementStatusWithOverrideUpdatedActual( + placementKey types.NamespacedName, + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + wantSelectedClusters []string, + wantObservedResourceIndex string, + wantClusterResourceOverrides []string, + wantResourceOverrides []placementv1beta1.NamespacedName, +) func() error { return func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - hasOverride := len(wantResourceOverrides) > 0 || len(wantClusterResourceOverrides) > 0 - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + placement, err := retrievePlacement(placementKey) + if err != nil { return err } + hasOverride := len(wantResourceOverrides) > 0 || len(wantClusterResourceOverrides) > 0 var wantPlacementStatus []placementv1beta1.PerClusterPlacementStatus for _, name := range wantSelectedClusters { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: perClusterRolloutCompletedConditions(crp.Generation, true, hasOverride), + Conditions: perClusterRolloutCompletedConditions(placement.GetGeneration(), true, hasOverride), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, ObservedResourceIndex: wantObservedResourceIndex, }) } - wantStatus := placementv1beta1.PlacementStatus{ - Conditions: crpRolloutCompletedConditions(crp.Generation, hasOverride), + wantStatus := &placementv1beta1.PlacementStatus{ + Conditions: placementRolloutCompletedConditions(placementKey, placement.GetGeneration(), hasOverride), PerClusterPlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, ObservedResourceIndex: wantObservedResourceIndex, } - if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { - return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + if diff := cmp.Diff(placement.GetPlacementStatus(), wantStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("Placement status diff (-got, +want): %s", diff) } return nil } @@ -1032,77 +1127,118 @@ func crpStatusWithOverrideUpdatedFailedActual( wantObservedResourceIndex string, wantClusterResourceOverrides []string, wantResourceOverrides []placementv1beta1.NamespacedName) func() error { - crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpKey := types.NamespacedName{Name: fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())} + return placementStatusWithOverrideUpdatedFailedActual(crpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, + wantObservedResourceIndex, wantClusterResourceOverrides, wantResourceOverrides) +} +func rpStatusWithOverrideUpdatedFailedActual( + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + wantSelectedClusters []string, + wantObservedResourceIndex string, + wantClusterResourceOverrides []string, + wantResourceOverrides []placementv1beta1.NamespacedName) func() error { + rpKey := types.NamespacedName{Name: fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()), Namespace: appNamespace().Name} + return placementStatusWithOverrideUpdatedFailedActual(rpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, + wantObservedResourceIndex, wantClusterResourceOverrides, wantResourceOverrides) +} + +func placementStatusWithOverrideUpdatedFailedActual( + placementKey types.NamespacedName, + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + wantSelectedClusters []string, + wantObservedResourceIndex string, + wantClusterResourceOverrides []string, + wantResourceOverrides []placementv1beta1.NamespacedName, +) func() error { return func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + placement, err := retrievePlacement(placementKey) + if err != nil { return err } - var wantPlacementStatus []placementv1beta1.PerClusterPlacementStatus for _, name := range wantSelectedClusters { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: perClusterOverrideFailedConditions(crp.Generation), + Conditions: perClusterOverrideFailedConditions(placement.GetGeneration()), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, ObservedResourceIndex: wantObservedResourceIndex, }) } - - wantStatus := placementv1beta1.PlacementStatus{ - Conditions: crpOverrideFailedConditions(crp.Generation), + wantStatus := &placementv1beta1.PlacementStatus{ + Conditions: placementOverrideFailedConditions(placementKey, placement.GetGeneration()), PerClusterPlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, ObservedResourceIndex: wantObservedResourceIndex, } - if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { - return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + if diff := cmp.Diff(placement.GetPlacementStatus(), wantStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("Placement status diff (-got, +want): %s", diff) } return nil } } -func crpStatusWithWorkSynchronizedUpdatedFailedActual( +func rpStatusWithWorkSynchronizedUpdatedFailedActual( wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, wantSelectedClusters []string, wantObservedResourceIndex string, wantClusterResourceOverrides []string, - wantResourceOverrides []placementv1beta1.NamespacedName) func() error { - crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + wantResourceOverrides []placementv1beta1.NamespacedName, +) func() error { + rpKey := types.NamespacedName{Name: fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()), Namespace: appNamespace().Name} + return placementStatusWithWorkSynchronizedUpdatedFailedActual(rpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, + wantObservedResourceIndex, wantClusterResourceOverrides, wantResourceOverrides) +} +func placementStatusWithWorkSynchronizedUpdatedFailedActual( + placementKey types.NamespacedName, + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + wantSelectedClusters []string, + wantObservedResourceIndex string, + wantClusterResourceOverrides []string, + wantResourceOverrides []placementv1beta1.NamespacedName, +) func() error { return func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + placement, err := retrievePlacement(placementKey) + if err != nil { return err } - var wantPlacementStatus []placementv1beta1.PerClusterPlacementStatus hasOverrides := len(wantResourceOverrides) > 0 || len(wantClusterResourceOverrides) > 0 for _, name := range wantSelectedClusters { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.PerClusterPlacementStatus{ ClusterName: name, - Conditions: perClusterWorkSynchronizedFailedConditions(crp.Generation, hasOverrides), + Conditions: perClusterWorkSynchronizedFailedConditions(placement.GetGeneration(), hasOverrides), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, ObservedResourceIndex: wantObservedResourceIndex, }) } - - wantStatus := placementv1beta1.PlacementStatus{ - Conditions: crpWorkSynchronizedFailedConditions(crp.Generation, hasOverrides), + wantStatus := &placementv1beta1.PlacementStatus{ + Conditions: placementWorkSynchronizedFailedConditions(placementKey, placement.GetGeneration(), hasOverrides), PerClusterPlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, ObservedResourceIndex: wantObservedResourceIndex, } - if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { - return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + if diff := cmp.Diff(placement.GetPlacementStatus(), wantStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("Placement status diff (-got, +want): %s", diff) } return nil } } +func crpStatusWithWorkSynchronizedUpdatedFailedActual( + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + wantSelectedClusters []string, + wantObservedResourceIndex string, + wantClusterResourceOverrides []string, + wantResourceOverrides []placementv1beta1.NamespacedName) func() error { + crpKey := types.NamespacedName{Name: fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())} + return placementStatusWithWorkSynchronizedUpdatedFailedActual(crpKey, wantSelectedResourceIdentifiers, wantSelectedClusters, + wantObservedResourceIndex, wantClusterResourceOverrides, wantResourceOverrides) +} + func crpStatusWithExternalStrategyActual( wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, wantObservedResourceIndex string, diff --git a/test/e2e/placement_cro_test.go b/test/e2e/placement_cro_test.go index 8b9822224..fb08032d3 100644 --- a/test/e2e/placement_cro_test.go +++ b/test/e2e/placement_cro_test.go @@ -707,3 +707,246 @@ var _ = Context("creating clusterResourceOverride with delete rules for one clus }, consistentlyDuration, eventuallyInterval).Should(BeTrue(), "Failed to delete work resources on member cluster %s", memberCluster.ClusterName) }) }) + +var _ = Context("creating clusterResourceOverride with cluster-scoped placementRef", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + croName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) + croSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, croName, 0) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the working CRO with proper PlacementRef before CRP so that the observed resource index is predictable. + croWorking := &placementv1beta1.ClusterResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: croName, + }, + Spec: placementv1beta1.ClusterResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: crpName, // correct CRP name + Scope: placementv1beta1.ClusterScoped, + }, + ClusterResourceSelectors: workResourceSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, croTestAnnotationKey, croTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating working clusterResourceOverride %s", croName)) + Expect(hubClient.Create(ctx, croWorking)).To(Succeed(), "Failed to create clusterResourceOverride %s", croName) + + // This is to make sure the working cro snapshot is created before the CRP + Eventually(func() error { + croSnap := &placementv1beta1.ClusterResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: croSnapShotName}, croSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cro snapshot as expected", croName) + + // Create the CRP. + createCRP(crpName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + + By(fmt.Sprintf("deleting clusterResourceOverride %s", croName)) + cleanupClusterResourceOverride(croName) + }) + + It("should update CRP status as expected", func() { + wantCRONames := []string{croSnapShotName} + crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", wantCRONames, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have override annotations from working CRO on the placed resources", func() { + want := map[string]string{croTestAnnotationKey: croTestAnnotationValue} + checkIfOverrideAnnotationsOnAllMemberClusters(true, want) + }) +}) + +var _ = Context("creating clusterResourceOverride with cluster-scoped placementRef but pointing to a different crp", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + croNotWorkingName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) + fakeCRPName := "fake-crp-name" + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the not working CRO with incorrect PlacementRef + croNotWorking := &placementv1beta1.ClusterResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: croNotWorkingName, + }, + Spec: placementv1beta1.ClusterResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: fakeCRPName, // fake CRP name + Scope: placementv1beta1.ClusterScoped, + }, + ClusterResourceSelectors: workResourceSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, croTestAnnotationKey1, croTestAnnotationValue1))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating not working clusterResourceOverride %s", croNotWorkingName)) + Expect(hubClient.Create(ctx, croNotWorking)).To(Succeed(), "Failed to create clusterResourceOverride %s", croNotWorkingName) + + // Create the CRP. + createCRP(crpName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + + By(fmt.Sprintf("deleting clusterResourceOverride %s", croNotWorkingName)) + cleanupClusterResourceOverride(croNotWorkingName) + }) + + It("should update CRP status with no overrides", func() { + crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should not have annotations from not working CRO on the placed resources", func() { + for _, memberCluster := range allMemberClusters { + Expect(validateNamespaceNoAnnotationOnCluster(memberCluster, croTestAnnotationKey1)).Should(Succeed(), "CRO pointing to a different CRP should not add annotations on %s", memberCluster.ClusterName) + Expect(validateConfigMapNoAnnotationKeyOnCluster(memberCluster, croTestAnnotationKey1)).Should(Succeed(), "CRO pointing to a different CRP should not add annotations on config map on %s", memberCluster.ClusterName) + } + }) +}) + +var _ = Context("creating clusterResourceOverride for a namespace-only CRP", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + croName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) + croSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, croName, 0) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the CRO before CRP so that the observed resource index is predictable. + cro := &placementv1beta1.ClusterResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: croName, + }, + Spec: placementv1beta1.ClusterResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: crpName, // assigned CRP name + Scope: placementv1beta1.ClusterScoped, + }, + ClusterResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "Namespace", + Version: "v1", + Name: fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()), + }, + }, + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, croTestAnnotationKey, croTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating clusterResourceOverride %s", croName)) + Expect(hubClient.Create(ctx, cro)).To(Succeed(), "Failed to create clusterResourceOverride %s", croName) + + // This is to make sure the CRO snapshot is created before the CRP + Eventually(func() error { + croSnap := &placementv1beta1.ClusterResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: croSnapShotName}, croSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRO snapshot as expected", croName) + + // Create the namespace-only CRP. + createNamespaceOnlyCRP(crpName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + + By(fmt.Sprintf("deleting clusterResourceOverride %s", croName)) + cleanupClusterResourceOverride(croName) + }) + + It("should update CRP status as expected", func() { + wantCRONames := []string{croSnapShotName} + crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, "0", wantCRONames, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + It("should place only the namespace on member clusters", func() { + for _, memberCluster := range allMemberClusters { + workNamespacePlacedActual := workNamespacePlacedOnClusterActual(memberCluster) + Eventually(workNamespacePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work namespace on member cluster %s", memberCluster.ClusterName) + } + }) + + It("should have override annotations on the namespace only", func() { + want := map[string]string{croTestAnnotationKey: croTestAnnotationValue} + for _, memberCluster := range allMemberClusters { + Expect(validateAnnotationOfWorkNamespaceOnCluster(memberCluster, want)).Should(Succeed(), "Failed to override the annotation of work namespace on %s", memberCluster.ClusterName) + } + }) + + It("should not place configmap or other resources on member clusters", func() { + for _, memberCluster := range allMemberClusters { + // Verify configmap is not placed + Consistently(func() bool { + configMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + configMap := &corev1.ConfigMap{} + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: configMapName}, configMap) + return errors.IsNotFound(err) + }, consistentlyDuration, eventuallyInterval).Should(BeTrue(), "ConfigMap should not be placed on member cluster %s", memberCluster.ClusterName) + } + }) +}) diff --git a/test/e2e/placement_ro_test.go b/test/e2e/placement_ro_test.go index f3775a64d..e4c662c0e 100644 --- a/test/e2e/placement_ro_test.go +++ b/test/e2e/placement_ro_test.go @@ -51,7 +51,8 @@ var _ = Context("creating resourceOverride (selecting all clusters) to override }, Spec: placementv1beta1.ResourceOverrideSpec{ Placement: &placementv1beta1.PlacementRef{ - Name: crpName, // assigned CRP name + Name: crpName, // assigned CRP name + Scope: placementv1beta1.ClusterScoped, }, ResourceSelectors: configMapOverrideSelector(), Policy: &placementv1beta1.OverridePolicy{ @@ -1097,3 +1098,142 @@ var _ = Context("creating resourceOverride with non-exist label", Ordered, func( // This check will ignore the annotation of resources. It("should not place the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) }) + +var _ = Context("creating resourceOverride with namespace scope should not apply override", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + roNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + // Create the CRP. + createCRP(crpName) + // Create the ro with namespace scope. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: roNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: crpName, // assigned CRP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, roNamespace) + }) + + It("should update CRP status as expected without override", func() { + crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should not have override annotations on the configmap", func() { + for _, memberCluster := range allMemberClusters { + Expect(validateConfigMapNoAnnotationKeyOnCluster(memberCluster, roTestAnnotationKey)).Should(Succeed(), "Failed to validate no override annotation on config map on %s", memberCluster.ClusterName) + } + }) +}) + +var _ = Context("creating resourceOverride but namespace-only CRP should not apply override", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + roNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + // Create the namespace-only CRP. + createNamespaceOnlyCRP(crpName) + // Create the ro with cluster scope referring to the namespace-only CRP. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: roNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: crpName, // assigned CRP name + Scope: placementv1beta1.ClusterScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, roNamespace) + }) + + It("should update CRP status as expected without override", func() { + // Since the CRP is namespace-only, configMap is not placed, so no override should be applied. + crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, "0", nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + // This check will verify that only namespace is placed, not the configmap. + It("should place only the namespace on member clusters", checkIfPlacedNamespaceResourceOnAllMemberClusters) + + It("should not place the configmap on member clusters since CRP is namespace-only", func() { + for _, memberCluster := range allMemberClusters { + namespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + configMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + configMap := corev1.ConfigMap{} + err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: namespaceName}, &configMap) + Expect(errors.IsNotFound(err)).To(BeTrue(), "ConfigMap should not be placed on member cluster %s since CRP is namespace-only", memberCluster.ClusterName) + } + }) +}) diff --git a/test/e2e/resource_placement_ro_test.go b/test/e2e/resource_placement_ro_test.go new file mode 100644 index 000000000..32be49c67 --- /dev/null +++ b/test/e2e/resource_placement_ro_test.go @@ -0,0 +1,1174 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package e2e + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + scheduler "github.com/kubefleet-dev/kubefleet/pkg/scheduler/framework" + "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" +) + +var _ = Describe("placing namespaced scoped resources using a RP with ResourceOverride", Label("resourceplacement"), func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + + BeforeEach(OncePerOrdered, func() { + By("creating work resources") + createWorkResources() + + // Create the CRP with Namespace-only selector. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + By("should update CRP status as expected") + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterEach(OncePerOrdered, func() { + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("creating resourceOverride (selecting all clusters) to override configMap for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the RP in the same namespace selecting namespaced resources. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + + // Create the ro. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: rpName, // assigned RP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + + By("should update RP status to not select any override") + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, nil) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s/%s status as expected", workNamespace, rpName) + + By("should not have annotations on the configmap") + for _, memberCluster := range allMemberClusters { + Expect(validateConfigMapNoAnnotationKeyOnCluster(memberCluster, roTestAnnotationKey)).Should(Succeed(), "Failed to remove the annotation of config map on %s", memberCluster.ClusterName) + } + + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + }) + + It("should update RP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0)}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should place the resources on all member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have override annotations on the configmap", func() { + want := map[string]string{roTestAnnotationKey: roTestAnnotationValue} + checkIfOverrideAnnotationsOnAllMemberClusters(false, want) + }) + + It("update ro and change annotation value", func() { + Eventually(func() error { + ro := &placementv1beta1.ResourceOverride{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: roName, Namespace: workNamespace}, ro); err != nil { + return err + } + ro.Spec = placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: rpName, // assigned RP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, roTestAnnotationKey, roTestAnnotationValue1))}, + }, + }, + }, + }, + }, + } + return hubClient.Update(ctx, ro) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) + }) + + It("should update RP status as expected after RO update", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 1)}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters after RO update", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have updated override annotations on the configmap", func() { + want := map[string]string{roTestAnnotationKey: roTestAnnotationValue1} + checkIfOverrideAnnotationsOnAllMemberClusters(false, want) + }) + + It("update ro and no update on the configmap itself", func() { + Eventually(func() error { + ro := &placementv1beta1.ResourceOverride{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: roName, Namespace: workNamespace}, ro); err != nil { + return err + } + ro.Spec.Policy.OverrideRules = append(ro.Spec.Policy.OverrideRules, placementv1beta1.OverrideRule{ + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "invalid-key": "invalid-value", + }, + }, + }, + }, + }, + OverrideType: placementv1beta1.DeleteOverrideType, + }) + return hubClient.Update(ctx, ro) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", crpName) + }) + + It("should refresh the RP status even as there is no change on the resources", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 2)}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have override annotations on the configmap", func() { + want := map[string]string{roTestAnnotationKey: roTestAnnotationValue1} + checkIfOverrideAnnotationsOnAllMemberClusters(false, want) + }) + }) + + Context("creating resourceOverride with multiple jsonPatchOverrides to override configMap for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) + + BeforeAll(func() { + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: fmt.Sprintf("/metadata/annotations/%s", roTestAnnotationKey1), + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s"`, roTestAnnotationValue1))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + // wait until the snapshot is created so that the observed resource index is predictable. + Eventually(func() error { + roSnap := &placementv1beta1.ResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: roSnapShotName, Namespace: workNamespace}, roSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: roSnapShotName}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have override annotations on the configmap", func() { + wantAnnotations := map[string]string{roTestAnnotationKey: roTestAnnotationValue, roTestAnnotationKey1: roTestAnnotationValue1} + checkIfOverrideAnnotationsOnAllMemberClusters(false, wantAnnotations) + }) + }) + + Context("creating resourceOverride with different rules for each cluster to override configMap for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + + // Create the ro. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: rpName, // assigned RP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{regionLabelName: regionEast, envLabelName: envProd}, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s-0"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + }, + }, + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{regionLabelName: regionEast, envLabelName: envCanary}, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s-1"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + }, + }, + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{regionLabelName: regionWest, envLabelName: envProd}, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s-2"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0)}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have override annotations on the configmap", func() { + for i, cluster := range allMemberClusters { + wantAnnotations := map[string]string{roTestAnnotationKey: fmt.Sprintf("%s-%d", roTestAnnotationValue, i)} + Expect(validateOverrideAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) + } + }) + }) + + Context("creating resourceOverride with incorrect path for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) + + BeforeAll(func() { + // Create the bad ro. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: rpName, // assigned RP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: fmt.Sprintf("/metadata/annotations/%s", roTestAnnotationKey), + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s"`, roTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating the bad resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + // wait until the snapshot is created so that failed override won't block the rollout + Eventually(func() error { + roSnap := &placementv1beta1.ResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: roSnapShotName, Namespace: workNamespace}, roSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) + + // Create the RP later + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as failed to override", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: roSnapShotName}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedFailedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should not place the selected resources on member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + }) + + Context("creating resourceOverride and resource becomes invalid after override for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + + // Create the ro. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: rpName, // assigned RP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{}, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s"`, roTestAnnotationValue))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0)}, + } + rpStatusUpdatedActual := rpStatusWithWorkSynchronizedUpdatedFailedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should not place the selected resources on member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + }) + + Context("creating resourceOverride with templated rules with cluster name to override configMap for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) + + BeforeAll(func() { + // Create the ro before rp so that the observed resource index is predictable. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpReplace, + Path: "/data/data", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s"`, placementv1beta1.OverrideClusterNameVariable))}, + }, + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/data/newField", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"new-%s"`, placementv1beta1.OverrideClusterNameVariable))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + Eventually(func() error { + roSnap := &placementv1beta1.ResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: roSnapShotName, Namespace: workNamespace}, roSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: roSnapShotName}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should have override configMap on the member clusters", func() { + cmName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + cmNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + for _, cluster := range allMemberClusters { + wantConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: cmNamespace, + }, + Data: map[string]string{ + "data": cluster.ClusterName, + "newField": fmt.Sprintf("new-%s", cluster.ClusterName), + }, + } + configMapActual := configMapPlacedOnClusterActual(cluster, wantConfigMap) + Eventually(configMapActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update configmap %s data as expected", cmName) + } + }) + }) + + Context("creating resourceOverride with delete configMap for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) + + BeforeAll(func() { + // Create the ro before rp so that the observed resource index is predictable. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{regionLabelName: regionEast}, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/metadata/annotations", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`{"%s": "%s"}`, roTestAnnotationKey, roTestAnnotationValue))}, + }, + }, + }, + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{regionLabelName: regionWest}, + }, + }, + }, + }, + OverrideType: placementv1beta1.DeleteOverrideType, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + Eventually(func() error { + roSnap := &placementv1beta1.ResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: roSnapShotName, Namespace: workNamespace}, roSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: roSnapShotName}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the configmap on member clusters that are patched", func() { + for idx := 0; idx < 2; idx++ { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("should have override annotations on the configmap on the member clusters that are patched", func() { + for idx := 0; idx < 2; idx++ { + cluster := allMemberClusters[idx] + wantAnnotations := map[string]string{roTestAnnotationKey: roTestAnnotationValue} + Expect(validateOverrideAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) + } + }) + + It("should not place the configmap on the member clusters that are deleted", func() { + memberCluster := allMemberClusters[2] + Consistently(func() bool { + namespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + configMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + configMap := corev1.ConfigMap{} + err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: namespaceName}, &configMap) + return errors.IsNotFound(err) + }, consistentlyDuration, consistentlyInterval).Should(BeTrue(), "Failed to delete work resources on member cluster %s", memberCluster.ClusterName) + }) + }) + + Context("creating resourceOverride with templated rules with cluster label key replacement for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the ro before rp so that the observed resource index is predictable. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: rpName, // assigned RP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/data/region", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s%s}"`, placementv1beta1.OverrideClusterLabelKeyVariablePrefix, regionLabelName))}, + }, + { + Operator: placementv1beta1.JSONPatchOverrideOpReplace, + Path: "/data/data", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"newdata-%s%s}"`, placementv1beta1.OverrideClusterLabelKeyVariablePrefix, envLabelName))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0)}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should replace the cluster label key in the configMap", func() { + cmName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + cmNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + for _, cluster := range allMemberClusters { + wantConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: cmNamespace, + }, + Data: map[string]string{ + "data": fmt.Sprintf("newdata-%s", labelsByClusterName[cluster.ClusterName][envLabelName]), + "region": labelsByClusterName[cluster.ClusterName][regionLabelName], + }, + } + configMapActual := configMapPlacedOnClusterActual(cluster, wantConfigMap) + Eventually(configMapActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update configmap %s data as expected", cmName) + } + }) + + It("should handle non-existent cluster label key gracefully", func() { + By("Update the ResourceOverride to use a non-existent label key") + Eventually(func() error { + ro := &placementv1beta1.ResourceOverride{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: roName, Namespace: workNamespace}, ro); err != nil { + return err + } + ro.Spec.Policy.OverrideRules[0].JSONPatchOverrides[0].Value = apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%snon-existent-label}"`, placementv1beta1.OverrideClusterLabelKeyVariablePrefix))} + return hubClient.Update(ctx, ro) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update resourceOverride %s with non-existent label key", roName) + + By("Verify the RP status should have one cluster failed to override while the rest stuck in rollout") + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: workNamespace}, rp); err != nil { + return err + } + wantCondition := []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: rp.Generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionFalse, + Reason: condition.RolloutNotStartedYetReason, + ObservedGeneration: rp.Generation, + }, + } + if diff := cmp.Diff(rp.Status.Conditions, wantCondition, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("RP condition diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "RP %s failed to show the override failed and stuck in rollout", rpName) + + By("Verify the configMap remains unchanged") + cmName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + cmNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + for _, cluster := range allMemberClusters { + wantConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: cmNamespace, + }, + Data: map[string]string{ + "data": fmt.Sprintf("newdata-%s", labelsByClusterName[cluster.ClusterName][envLabelName]), + "region": labelsByClusterName[cluster.ClusterName][regionLabelName], + }, + } + configMapActual := configMapPlacedOnClusterActual(cluster, wantConfigMap) + Consistently(configMapActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "ConfigMap %s should remain unchanged", cmName) + } + }) + }) + + Context("creating resourceOverride with non-exist label for ResourcePlacement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) + + BeforeAll(func() { + // Create the bad ro. + ro := &placementv1beta1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: workNamespace, + }, + Spec: placementv1beta1.ResourceOverrideSpec{ + Placement: &placementv1beta1.PlacementRef{ + Name: rpName, // assigned RP name + Scope: placementv1beta1.NamespaceScoped, + }, + ResourceSelectors: configMapOverrideSelector(), + Policy: &placementv1beta1.OverridePolicy{ + OverrideRules: []placementv1beta1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1beta1.JSONPatchOverride{ + { + Operator: placementv1beta1.JSONPatchOverrideOpAdd, + Path: "/data/region", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s%s}"`, placementv1beta1.OverrideClusterLabelKeyVariablePrefix, "non-existent-label"))}, + }, + { + Operator: placementv1beta1.JSONPatchOverrideOpReplace, + Path: "/data/data", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"newdata-%s%s}"`, placementv1beta1.OverrideClusterLabelKeyVariablePrefix, envLabelName))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating the bad resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + Eventually(func() error { + roSnap := &placementv1beta1.ResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: roSnapShotName, Namespace: workNamespace}, roSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) + + // Create the RP later so that failed override won't block the rollout + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s/%s and related resources", workNamespace, rpName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, workNamespace) + }) + + It("should update RP status as failed to override", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: workNamespace, Name: roSnapShotName}, + } + rpStatusUpdatedActual := rpStatusWithOverrideUpdatedFailedActual(appConfigMapIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should not place the selected resources on member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + }) +}) diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index e0f038a15..6a8ec44b5 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -906,7 +906,7 @@ func cleanupPlacement(placementKey types.NamespacedName) { } Eventually(func() bool { for i := range allMemberClusters { - workNS := fmt.Sprintf("fleet-member-%s", allMemberClusterNames[i]) + workNS = fmt.Sprintf("fleet-member-%s", allMemberClusterNames[i]) if err := hubClient.Get(ctx, types.NamespacedName{Name: work.Name, Namespace: workNS}, work); err != nil && k8serrors.IsNotFound(err) { // Work resource is not found, which is expected. continue @@ -1420,6 +1420,29 @@ func createCRP(crpName string) { createCRPWithApplyStrategy(crpName, nil) } +// createNamespaceOnlyCRP creates a ClusterResourcePlacement with namespace-only selector. +func createNamespaceOnlyCRP(crpName string) { + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + By(fmt.Sprintf("creating namespace-only placement %s", crpName)) + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create namespace-only CRP %s", crpName) +} + // ensureUpdateRunDeletion deletes the update run with the given name and checks all related approval requests are also deleted. func ensureUpdateRunDeletion(updateRunName string) { updateRun := &placementv1beta1.ClusterStagedUpdateRun{ From 43fdbf0b815853d9dfc85574fe432dfbc33a4e07 Mon Sep 17 00:00:00 2001 From: Wantong Date: Tue, 26 Aug 2025 22:49:11 -0700 Subject: [PATCH 36/38] test: enable rollout e2e tests for RP (#217) Signed-off-by: Wantong Jiang --- .../api_validation_integration_test.go | 30 +- test/e2e/actuals_test.go | 98 +- test/e2e/resource_placement_rollout_test.go | 1193 +++++++++++++++++ test/e2e/setup_test.go | 2 +- test/e2e/utils_test.go | 7 +- 5 files changed, 1276 insertions(+), 54 deletions(-) create mode 100644 test/e2e/resource_placement_rollout_test.go diff --git a/test/apis/placement/v1beta1/api_validation_integration_test.go b/test/apis/placement/v1beta1/api_validation_integration_test.go index 57f60e4bf..095152d25 100644 --- a/test/apis/placement/v1beta1/api_validation_integration_test.go +++ b/test/apis/placement/v1beta1/api_validation_integration_test.go @@ -1452,18 +1452,16 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ClusterResourceOverride placement name", func() { - updatedCRO := cro.DeepCopy() - updatedCRO.Spec.Placement.Name = "different-placement" - err := hubClient.Update(ctx, updatedCRO) + cro.Spec.Placement.Name = "different-placement" + err := hubClient.Update(ctx, &cro) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) }) It("should deny update of ClusterResourceOverride placement scope", func() { - updatedCRO := cro.DeepCopy() - updatedCRO.Spec.Placement.Scope = placementv1beta1.NamespaceScoped - err := hubClient.Update(ctx, updatedCRO) + cro.Spec.Placement.Scope = placementv1beta1.NamespaceScoped + err := hubClient.Update(ctx, &cro) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(ContainSubstring("placement reference cannot be Namespaced scope")) @@ -1489,9 +1487,8 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ClusterResourceOverride placement from non-nil to nil", func() { - updatedCRO := cro.DeepCopy() - updatedCRO.Spec.Placement = nil - err := hubClient.Update(ctx, updatedCRO) + cro.Spec.Placement = nil + err := hubClient.Update(ctx, &cro) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) @@ -1572,9 +1569,8 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ResourceOverride placement name", func() { - updatedRO := ro.DeepCopy() - updatedRO.Spec.Placement.Name = "different-placement" - err := hubClient.Update(ctx, updatedRO) + ro.Spec.Placement.Name = "different-placement" + err := hubClient.Update(ctx, &ro) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) @@ -1601,18 +1597,16 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should deny update of ResourceOverride placement from non-nil to nil", func() { - updatedRO := ro.DeepCopy() - updatedRO.Spec.Placement = nil - err := hubClient.Update(ctx, updatedRO) + ro.Spec.Placement = nil + err := hubClient.Update(ctx, &ro) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) }) It("should deny update of ResourceOverride placement from cluster-scoped to namespace-scoped", func() { - updatedRO := ro.DeepCopy() - updatedRO.Spec.Placement.Scope = placementv1beta1.NamespaceScoped - err := hubClient.Update(ctx, updatedRO) + ro.Spec.Placement.Scope = placementv1beta1.NamespaceScoped + err := hubClient.Update(ctx, &ro) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ResourceOverride call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("The placement field is immutable")) diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index 9f8d18df9..93ae85545 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -233,6 +233,14 @@ func placementWorkSynchronizedFailedConditions(placementKey types.NamespacedName } } +func placementRolloutStuckConditions(placementKey types.NamespacedName, generation int64) []metav1.Condition { + if placementKey.Namespace == "" { + return crpRolloutStuckConditions(generation) + } else { + return rpRolloutStuckConditions(generation) + } +} + func rpRolloutCompletedConditions(generation int64, hasOverride bool) []metav1.Condition { overrideConditionReason := condition.OverrideNotSpecifiedReason if hasOverride { @@ -331,6 +339,17 @@ func rpScheduleFailedConditions(generation int64) []metav1.Condition { } func rpScheduledConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + } +} + +func rpRolloutStuckConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), @@ -338,6 +357,12 @@ func rpScheduledConditions(generation int64) []metav1.Condition { ObservedGeneration: generation, Reason: scheduler.FullyScheduledReason, }, + { + Type: string(placementv1beta1.ResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionFalse, + Reason: condition.RolloutNotStartedYetReason, + ObservedGeneration: generation, + }, } } @@ -1411,10 +1436,26 @@ func customizedPlacementStatusUpdatedActual( } func safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, failedWorkloadResourceIdentifier placementv1beta1.ResourceIdentifier, wantSelectedClusters []string, wantObservedResourceIndex string, failedResourceObservedGeneration int64) func() error { + crpKey := types.NamespacedName{Name: fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())} + return safeRolloutWorkloadPlacementStatusUpdatedActual(crpKey, wantSelectedResourceIdentifiers, failedWorkloadResourceIdentifier, wantSelectedClusters, wantObservedResourceIndex, failedResourceObservedGeneration) +} + +func safeRolloutWorkloadRPStatusUpdatedActual(wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, failedWorkloadResourceIdentifier placementv1beta1.ResourceIdentifier, wantSelectedClusters []string, wantObservedResourceIndex string, failedResourceObservedGeneration int64) func() error { + rpKey := types.NamespacedName{Name: fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()), Namespace: appNamespace().Name} + return safeRolloutWorkloadPlacementStatusUpdatedActual(rpKey, wantSelectedResourceIdentifiers, failedWorkloadResourceIdentifier, wantSelectedClusters, wantObservedResourceIndex, failedResourceObservedGeneration) +} + +func safeRolloutWorkloadPlacementStatusUpdatedActual( + placementKey types.NamespacedName, + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + failedWorkloadResourceIdentifier placementv1beta1.ResourceIdentifier, + wantSelectedClusters []string, + wantObservedResourceIndex string, + failedResourceObservedGeneration int64, +) func() error { return func() error { - crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + placement, err := retrievePlacement(placementKey) + if err != nil { return err } @@ -1427,37 +1468,37 @@ func safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResourceIdentifiers [ Type: string(placementv1beta1.PerClusterScheduledConditionType), Status: metav1.ConditionTrue, Reason: condition.ScheduleSucceededReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, { Type: string(placementv1beta1.PerClusterRolloutStartedConditionType), Status: metav1.ConditionTrue, Reason: condition.RolloutStartedReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, { Type: string(placementv1beta1.PerClusterOverriddenConditionType), Status: metav1.ConditionTrue, Reason: condition.OverrideNotSpecifiedReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, { Type: string(placementv1beta1.PerClusterWorkSynchronizedConditionType), Status: metav1.ConditionTrue, Reason: condition.AllWorkSyncedReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, { Type: string(placementv1beta1.PerClusterAppliedConditionType), Status: metav1.ConditionTrue, Reason: condition.AllWorkAppliedReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, { Type: string(placementv1beta1.PerClusterAvailableConditionType), Status: metav1.ConditionFalse, Reason: condition.WorkNotAvailableReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, }, FailedPlacements: []placementv1beta1.FailedResourcePlacement{ @@ -1482,13 +1523,13 @@ func safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResourceIdentifiers [ Type: string(placementv1beta1.PerClusterScheduledConditionType), Status: metav1.ConditionTrue, Reason: condition.ScheduleSucceededReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, { Type: string(placementv1beta1.PerClusterRolloutStartedConditionType), Status: metav1.ConditionFalse, Reason: condition.RolloutNotStartedYetReason, - ObservedGeneration: crp.Generation, + ObservedGeneration: placement.GetGeneration(), }, }, } @@ -1497,30 +1538,15 @@ func safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResourceIdentifiers [ wantPlacementStatus = append(wantPlacementStatus, rolloutBlockedPlacementStatus) } - wantCRPConditions := []metav1.Condition{ - { - Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), - Status: metav1.ConditionTrue, - Reason: scheduler.FullyScheduledReason, - ObservedGeneration: crp.Generation, - }, - { - Type: string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType), - Status: metav1.ConditionFalse, - Reason: condition.RolloutNotStartedYetReason, - ObservedGeneration: crp.Generation, - }, - } - - wantStatus := placementv1beta1.PlacementStatus{ - Conditions: wantCRPConditions, + wantStatus := &placementv1beta1.PlacementStatus{ + Conditions: placementRolloutStuckConditions(placementKey, placement.GetGeneration()), PerClusterPlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, ObservedResourceIndex: wantObservedResourceIndex, } - if diff := cmp.Diff(crp.Status, wantStatus, safeRolloutCRPStatusCmpOptions...); diff != "" { - return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + if diff := cmp.Diff(placement.GetPlacementStatus(), wantStatus, safeRolloutPlacementStatusCmpOptions...); diff != "" { + return fmt.Errorf("Placement status diff (-got, +want): %s", diff) } return nil } @@ -1565,11 +1591,17 @@ func workNamespaceRemovedFromClusterActual(cluster *framework.Cluster) func() er } } -func namespacedResourcesRemovedFromClusterActual(cluster *framework.Cluster) func() error { +// namespacedResourcesRemovedFromClusterActual checks that resources in the specified namespace have been removed from the cluster. +// It checks if the placed configMap is removed by default, as this is tested in most of the test cases. +// For tests with additional resources placed, e.g. deployments, daemonSets, add those to placedResources. +func namespacedResourcesRemovedFromClusterActual(cluster *framework.Cluster, placedResources ...client.Object) func() error { cm := appConfigMap() + placedResources = append(placedResources, &cm) return func() error { - if err := cluster.KubeClient.Get(ctx, types.NamespacedName{Name: cm.Name, Namespace: cm.Namespace}, &cm); !errors.IsNotFound(err) { - return fmt.Errorf("ConfigMap %s/%s still exists on cluster %s or get encountered an error: %w", cm.Namespace, cm.Name, cluster.ClusterName, err) + for _, resource := range placedResources { + if err := cluster.KubeClient.Get(ctx, types.NamespacedName{Name: resource.GetName(), Namespace: appNamespace().Name}, resource); !errors.IsNotFound(err) { + return fmt.Errorf("%s %s/%s still exists on cluster %s or get encountered an error: %w", resource.GetObjectKind().GroupVersionKind(), appNamespace().Name, resource.GetName(), cluster.ClusterName, err) + } } return nil } diff --git a/test/e2e/resource_placement_rollout_test.go b/test/e2e/resource_placement_rollout_test.go new file mode 100644 index 000000000..6548a2a15 --- /dev/null +++ b/test/e2e/resource_placement_rollout_test.go @@ -0,0 +1,1193 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils" + "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" + testv1alpha1 "github.com/kubefleet-dev/kubefleet/test/apis/v1alpha1" + "github.com/kubefleet-dev/kubefleet/test/utils/controller" +) + +const ( + valFoo1 = "foo1" + valBar1 = "bar1" +) + +var ( + testDaemonSet appv1.DaemonSet + testStatefulSet appv1.StatefulSet + testService corev1.Service + testJob batchv1.Job + testCustomResource testv1alpha1.TestResource +) + +var _ = Describe("placing namespaced scoped resources using a RP with rollout", Label("resourceplacement"), func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Name: rpName, Namespace: appNamespace().Name} + + BeforeEach(OncePerOrdered, func() { + testDeployment = appv1.Deployment{} + readDeploymentTestManifest(&testDeployment) + testDaemonSet = appv1.DaemonSet{} + readDaemonSetTestManifest(&testDaemonSet) + testStatefulSet = appv1.StatefulSet{} + readStatefulSetTestManifest(&testStatefulSet, false) + testService = corev1.Service{} + readServiceTestManifest(&testService) + testJob = batchv1.Job{} + readJobTestManifest(&testJob) + + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: namespaceOnlySelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + crpStatusUpdatedActual := crpStatusUpdatedActual(nil, allMemberClusterNames, nil, "0") // nil as no resources created yet + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterEach(OncePerOrdered, func() { + // Remove the custom deletion blocker finalizer from the RP and CRP. + ensureRPAndRelatedResourcesDeleted(rpKey, allMemberClusters, &testDeployment, &testDaemonSet, &testStatefulSet, &testService, &testJob) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("Test an RP place enveloped objects successfully", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + var testDeploymentEnvelope placementv1beta1.ResourceEnvelope + + BeforeAll(func() { + readEnvelopeResourceTestManifest(&testDeploymentEnvelope) + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testDeploymentEnvelope.Name, + Namespace: workNamespace.Name, + }, + } + }) + + It("Create the wrapped deployment resources in the namespace", func() { + createWrappedResourcesForRollout(&testDeploymentEnvelope, &testDeployment, utils.DeploymentKind, workNamespace) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("Create the RP that select the enveloped objects", func() { + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace.Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testDeploymentEnvelope.Name, + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(wantSelectedResources, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForDeploymentPlacementToReady(memberCluster, &testDeployment) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("should mark the work as available", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + var works placementv1beta1.WorkList + listOpts := []client.ListOption{ + client.InNamespace(fmt.Sprintf(utils.NamespaceNameFormat, memberCluster.ClusterName)), + // This test spec runs in parallel with other suites; there might be unrelated + // Work objects in the namespace. + client.MatchingLabels{ + placementv1beta1.PlacementTrackingLabel: rpName, + placementv1beta1.ParentNamespaceLabel: workNamespace.Name, + }, + } + Eventually(func() string { + if err := hubClient.List(ctx, &works, listOpts...); err != nil { + return err.Error() + } + for i := range works.Items { + work := works.Items[i] + wantConditions := []metav1.Condition{ + { + Type: placementv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: condition.WorkAllManifestsAppliedReason, + ObservedGeneration: 1, + }, + { + Type: placementv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: condition.WorkAllManifestsAvailableReason, + ObservedGeneration: 1, + }, + } + diff := controller.CompareConditions(wantConditions, work.Status.Conditions) + if len(diff) != 0 { + return diff + } + } + if len(works.Items) == 0 { + return "no available work found" + } + return "" + }, eventuallyDuration, eventuallyInterval).Should(BeEmpty(), + "work condition mismatch for work %s (-want, +got):", memberCluster.ClusterName) + } + }) + }) + + Context("Test an RP place workload objects successfully, block rollout based on deployment availability", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + + BeforeAll(func() { + // Create the test resources. + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: appv1.SchemeGroupVersion.Group, + Version: appv1.SchemeGroupVersion.Version, + Kind: utils.DeploymentKind, + Name: testDeployment.Name, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the deployment resource in the namespace", func() { + Expect(hubClient.Create(ctx, &workNamespace)).To(Succeed(), "Failed to create namespace %s", workNamespace.Name) + testDeployment.Namespace = workNamespace.Name + Expect(hubClient.Create(ctx, &testDeployment)).To(Succeed(), "Failed to create test deployment %s", testDeployment.Name) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("create the RP that select the deployment", func() { + rp := buildRPForSafeRollout(workNamespace.Name) + rp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ + { + Group: appv1.SchemeGroupVersion.Group, + Kind: utils.DeploymentKind, + Version: appv1.SchemeGroupVersion.Version, + Name: testDeployment.Name, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(wantSelectedResources, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForDeploymentPlacementToReady(memberCluster, &testDeployment) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("change the image name in deployment, to make it unavailable", func() { + Eventually(func() error { + var dep appv1.Deployment + err := hubClient.Get(ctx, types.NamespacedName{Name: testDeployment.Name, Namespace: testDeployment.Namespace}, &dep) + if err != nil { + return err + } + dep.Spec.Template.Spec.Containers[0].Image = randomImageName + return hubClient.Update(ctx, &dep) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to change the image name in deployment") + }) + + It("should update RP status as expected", func() { + failedDeploymentResourceIdentifier := placementv1beta1.ResourceIdentifier{ + Group: appv1.SchemeGroupVersion.Group, + Version: appv1.SchemeGroupVersion.Version, + Kind: utils.DeploymentKind, + Name: testDeployment.Name, + Namespace: testDeployment.Namespace, + } + rpStatusActual := safeRolloutWorkloadRPStatusUpdatedActual(wantSelectedResources, failedDeploymentResourceIdentifier, allMemberClusterNames, "1", 2) + Eventually(rpStatusActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) + + Context("Test an RP place workload objects successfully, block rollout based on daemonset availability", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + var testDaemonSetEnvelope placementv1beta1.ResourceEnvelope + + BeforeAll(func() { + // Create the test resources. + readEnvelopeResourceTestManifest(&testDaemonSetEnvelope) + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testDaemonSetEnvelope.Name, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the daemonset resource in the namespace", func() { + createWrappedResourcesForRollout(&testDaemonSetEnvelope, &testDaemonSet, utils.DaemonSetKind, workNamespace) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("create the RP that select the enveloped daemonset", func() { + rp := buildRPForSafeRollout(workNamespace.Name) + rp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testDaemonSetEnvelope.Name, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(wantSelectedResources, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForDaemonSetPlacementToReady(memberCluster, &testDaemonSet) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("change the image name in daemonset, to make it unavailable", func() { + Eventually(func() error { + testDaemonSet.Spec.Template.Spec.Containers[0].Image = randomImageName + daemonSetByte, err := json.Marshal(testDaemonSet) + if err != nil { + return nil + } + testDaemonSetEnvelope.Data["daemonset.yaml"] = runtime.RawExtension{Raw: daemonSetByte} + return hubClient.Update(ctx, &testDaemonSetEnvelope) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to change the image name of daemonset in envelope object") + }) + + It("should update RP status as expected", func() { + failedDaemonSetResourceIdentifier := placementv1beta1.ResourceIdentifier{ + Group: appv1.SchemeGroupVersion.Group, + Version: appv1.SchemeGroupVersion.Version, + Kind: utils.DaemonSetKind, + Name: testDaemonSet.Name, + Namespace: testDaemonSet.Namespace, + Envelope: &placementv1beta1.EnvelopeIdentifier{ + Name: testDaemonSetEnvelope.Name, + Namespace: testDaemonSetEnvelope.Namespace, + Type: placementv1beta1.ResourceEnvelopeType, + }, + } + rpStatusActual := safeRolloutWorkloadRPStatusUpdatedActual(wantSelectedResources, failedDaemonSetResourceIdentifier, allMemberClusterNames, "1", 2) + Eventually(rpStatusActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) + + Context("Test an RP place workload objects successfully, block rollout based on statefulset availability", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + var testStatefulSetEnvelope placementv1beta1.ResourceEnvelope + + BeforeAll(func() { + // Create the test resources. + readEnvelopeResourceTestManifest(&testStatefulSetEnvelope) + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testStatefulSetEnvelope.Name, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the statefulset resource in the namespace", func() { + createWrappedResourcesForRollout(&testStatefulSetEnvelope, &testStatefulSet, utils.StatefulSetKind, workNamespace) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("create the RP that select the enveloped statefulset", func() { + rp := buildRPForSafeRollout(workNamespace.Name) + rp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testStatefulSetEnvelope.Name, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(wantSelectedResources, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, 2*workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForStatefulSetPlacementToReady(memberCluster, &testStatefulSet) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("change the image name in statefulset, to make it unavailable", func() { + Eventually(func() error { + testStatefulSet.Spec.Template.Spec.Containers[0].Image = randomImageName + statefulSetByte, err := json.Marshal(testStatefulSet) + if err != nil { + return nil + } + testStatefulSetEnvelope.Data["statefulset.yaml"] = runtime.RawExtension{Raw: statefulSetByte} + return hubClient.Update(ctx, &testStatefulSetEnvelope) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to change the image name in statefulset") + }) + + It("should update RP status as expected", func() { + failedStatefulSetResourceIdentifier := placementv1beta1.ResourceIdentifier{ + Group: appv1.SchemeGroupVersion.Group, + Version: appv1.SchemeGroupVersion.Version, + Kind: utils.StatefulSetKind, + Name: testStatefulSet.Name, + Namespace: testStatefulSet.Namespace, + Envelope: &placementv1beta1.EnvelopeIdentifier{ + Name: testStatefulSetEnvelope.Name, + Namespace: testStatefulSetEnvelope.Namespace, + Type: placementv1beta1.ResourceEnvelopeType, + }, + } + rpStatusActual := safeRolloutWorkloadRPStatusUpdatedActual(wantSelectedResources, failedStatefulSetResourceIdentifier, allMemberClusterNames, "1", 2) + Eventually(rpStatusActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) + + Context("Test an RP place workload objects successfully, block rollout based on service availability", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + + BeforeAll(func() { + // Create the test resources. + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Kind: utils.ServiceKind, + Name: testService.Name, + Version: corev1.SchemeGroupVersion.Version, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the service resource in the namespace", func() { + Expect(hubClient.Create(ctx, &workNamespace)).To(Succeed(), "Failed to create namespace %s", workNamespace.Name) + testService.Namespace = workNamespace.Name + Expect(hubClient.Create(ctx, &testService)).To(Succeed(), "Failed to create test service %s", testService.Name) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("create the RP that select the service", func() { + rp := buildRPForSafeRollout(workNamespace.Name) + rp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ + { + Kind: utils.ServiceKind, + Version: corev1.SchemeGroupVersion.Version, + Name: testService.Name, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(wantSelectedResources, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForServiceToReady(memberCluster, &testService) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("change service to LoadBalancer, to make it unavailable", func() { + Eventually(func() error { + var service corev1.Service + err := hubClient.Get(ctx, types.NamespacedName{Name: testService.Name, Namespace: testService.Namespace}, &service) + if err != nil { + return err + } + service.Spec.Type = corev1.ServiceTypeLoadBalancer + return hubClient.Update(ctx, &service) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to change the service type to LoadBalancer") + }) + + It("should update RP status as expected", func() { + failedServiceResourceIdentifier := placementv1beta1.ResourceIdentifier{ + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, + Kind: utils.ServiceKind, + Name: testService.Name, + Namespace: testService.Namespace, + } + // failedResourceObservedGeneration is set to 0 because generation is not populated for service. + rpStatusActual := safeRolloutWorkloadRPStatusUpdatedActual(wantSelectedResources, failedServiceResourceIdentifier, allMemberClusterNames, "1", 0) + Eventually(rpStatusActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) + + Context("Test an RP place workload successful and update it to be failed and then delete the resource snapshot,"+ + "rollout should eventually be successful after we correct the image", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + + BeforeAll(func() { + // Create the test resources. + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: appv1.SchemeGroupVersion.Group, + Version: appv1.SchemeGroupVersion.Version, + Kind: utils.DeploymentKind, + Name: testDeployment.Name, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the deployment resource in the namespace", func() { + Expect(hubClient.Create(ctx, &workNamespace)).To(Succeed(), "Failed to create namespace %s", workNamespace.Name) + testDeployment.Namespace = workNamespace.Name + Expect(hubClient.Create(ctx, &testDeployment)).To(Succeed(), "Failed to create test deployment %s", testDeployment.Name) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("create the RP that select the deployment", func() { + rp := buildRPForSafeRollout(workNamespace.Name) + rp.Spec.RevisionHistoryLimit = ptr.To(int32(1)) + rp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ + { + Group: appv1.SchemeGroupVersion.Group, + Kind: utils.DeploymentKind, + Version: appv1.SchemeGroupVersion.Version, + Name: testDeployment.Name, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(wantSelectedResources, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForDeploymentPlacementToReady(memberCluster, &testDeployment) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("change the image name in deployment, to make it unavailable", func() { + Eventually(func() error { + var dep appv1.Deployment + err := hubClient.Get(ctx, types.NamespacedName{Name: testDeployment.Name, Namespace: testDeployment.Namespace}, &dep) + if err != nil { + return err + } + dep.Spec.Template.Spec.Containers[0].Image = randomImageName + return hubClient.Update(ctx, &dep) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to change the image name in deployment") + }) + + It("should update RP status on deployment failed as expected", func() { + failedDeploymentResourceIdentifier := placementv1beta1.ResourceIdentifier{ + Group: appv1.SchemeGroupVersion.Group, + Version: appv1.SchemeGroupVersion.Version, + Kind: utils.DeploymentKind, + Name: testDeployment.Name, + Namespace: testDeployment.Namespace, + } + rpStatusActual := safeRolloutWorkloadRPStatusUpdatedActual(wantSelectedResources, failedDeploymentResourceIdentifier, allMemberClusterNames, "1", 2) + Eventually(rpStatusActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("update work to trigger a work generator reconcile", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx].ClusterName + namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, memberCluster) + workName := fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, fmt.Sprintf(placementv1beta1.WorkNameBaseFmt, workNamespace.Name, rpName)) + work := placementv1beta1.Work{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: namespaceName}, &work)).Should(Succeed(), "Failed to get the work") + if work.Status.ManifestConditions != nil { + work.Status.ManifestConditions = nil + } else { + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{ + Type: placementv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + Reason: "WorkNotAvailable", + }) + } + Expect(hubClient.Status().Update(ctx, &work)).Should(Succeed(), "Failed to update the work") + } + }) + + It("change the image name in deployment, to roll over the resourcesnapshot", func() { + rsList := &placementv1beta1.ResourceSnapshotList{} + listOptions := &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{placementv1beta1.PlacementTrackingLabel: rpName}), + Namespace: workNamespace.Name, + } + Expect(hubClient.List(ctx, rsList, listOptions)).Should(Succeed(), "Failed to list the resourcesnapshot") + Expect(len(rsList.Items) == 1).Should(BeTrue()) + oldRS := rsList.Items[0].Name + Expect(hubClient.Get(ctx, types.NamespacedName{Name: testDeployment.Name, Namespace: testDeployment.Namespace}, &testDeployment)).Should(Succeed(), "Failed to get deployment") + testDeployment.Spec.Template.Spec.Containers[0].Image = "extra-snapshot" + Expect(hubClient.Update(ctx, &testDeployment)).Should(Succeed(), "Failed to change the image name in deployment") + // wait for the new resourcesnapshot to be created + Eventually(func() bool { + Expect(hubClient.List(ctx, rsList, listOptions)).Should(Succeed(), "Failed to list the resourcesnapshot") + Expect(len(rsList.Items) == 1).Should(BeTrue()) + return rsList.Items[0].Name != oldRS + }, eventuallyDuration, eventuallyInterval).Should(BeTrue(), "Failed to remove the old resourcesnapshot") + }) + + It("update work to trigger a work generator reconcile", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx].ClusterName + namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, memberCluster) + workName := fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, fmt.Sprintf(placementv1beta1.WorkNameBaseFmt, workNamespace.Name, rpName)) + Eventually(func() error { + work := placementv1beta1.Work{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: namespaceName}, &work); err != nil { + return err + } + if work.Status.ManifestConditions != nil { + work.Status.ManifestConditions = nil + } else { + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{ + Type: placementv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + Reason: "WorkNotAvailable", + }) + } + return hubClient.Status().Update(ctx, &work) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the work") + } + }) + + It("change the image name in deployment, to make it available again", func() { + Eventually(func() error { + err := hubClient.Get(ctx, types.NamespacedName{Name: testDeployment.Name, Namespace: testDeployment.Namespace}, &testDeployment) + if err != nil { + return err + } + testDeployment.Spec.Template.Spec.Containers[0].Image = "nginx:1.26.2" + return hubClient.Update(ctx, &testDeployment) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to change the image name in deployment") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForDeploymentPlacementToReady(memberCluster, &testDeployment) + Eventually(workResourcesPlacedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + }) + + Context("Test an RP place workload objects successfully, don't block rollout based on job availability", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + unAvailablePeriodSeconds := 15 + + BeforeAll(func() { + // Create the test resources. + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: batchv1.SchemeGroupVersion.Group, + Version: batchv1.SchemeGroupVersion.Version, + Kind: utils.JobKind, + Name: testJob.Name, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the job resource in the namespace", func() { + Expect(hubClient.Create(ctx, &workNamespace)).To(Succeed(), "Failed to create namespace %s", workNamespace.Name) + testJob.Namespace = workNamespace.Name + Expect(hubClient.Create(ctx, &testJob)).To(Succeed(), "Failed to create test job %s", testJob.Name) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("create the RP that select the job", func() { + rp := buildRPForSafeRollout(workNamespace.Name) + // the job we are trying to propagate takes 10s to complete. MaxUnavailable is set to 1. So setting UnavailablePeriodSeconds to 15s + // so that after each rollout phase we only wait for 15s before proceeding to the next since Job is not trackable, + // we want rollout to finish in a reasonable time. + rp.Spec.Strategy.RollingUpdate.UnavailablePeriodSeconds = ptr.To(unAvailablePeriodSeconds) + rp.Spec.ResourceSelectors = []placementv1beta1.ResourceSelectorTerm{ + { + Group: batchv1.SchemeGroupVersion.Group, + Kind: utils.JobKind, + Version: batchv1.SchemeGroupVersion.Version, + Name: testJob.Name, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantSelectedResources, allMemberClusterNames, nil, "0", false) + Eventually(rpStatusUpdatedActual, 2*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForJobToBePlaced(memberCluster, &testJob) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("suspend job", func() { + Eventually(func() error { + var job batchv1.Job + err := hubClient.Get(ctx, types.NamespacedName{Name: testJob.Name, Namespace: testJob.Namespace}, &job) + if err != nil { + return err + } + job.Spec.Suspend = ptr.To(true) + return hubClient.Update(ctx, &job) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to suspend job") + }) + + // job is not trackable, so we need to wait for a bit longer for each roll out + It("should update RP status as expected", func() { + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantSelectedResources, allMemberClusterNames, nil, "1", false) + Eventually(rpStatusUpdatedActual, 5*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) +}) + +// These 2 testcases need to run in ordered because they are going to place the same CRD, +// and if they in parallel, a resource conflict may occur. +var _ = Describe("placing namespaced custom resources using a RP with rollout", Label("resourceplacement"), Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Name: rpName, Namespace: appNamespace().Name} + testCustomResourceKind := "" + + BeforeEach(OncePerOrdered, func() { + testCustomResource = testv1alpha1.TestResource{} + readTestCustomResource(&testCustomResource) + // I need to initialize the kind here because the returned obj after creating has Kind field emptied. + testCustomResourceKind = testCustomResource.Kind + + // Create the test resources, the CRD is already installed in BeforeSuite. + workNamespace := appNamespace() + Expect(hubClient.Create(ctx, &workNamespace)).To(Succeed(), "Failed to create namespace %s", workNamespace.Name) + testCustomResource.Namespace = workNamespace.Name + // Create the custom resource at the very beginning because our resource detect runs every 30s to detect new resources, + // thus giving it some grace period. + Expect(hubClient.Create(ctx, &testCustomResource)).To(Succeed(), "Failed to create test custom resource %s", testCustomResource.GetName()) + + // Create a namespace-only CRP that selects both namespace and CRD for custom resource placement + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: utils.NamespaceKind, + Version: corev1.SchemeGroupVersion.Version, + Name: appNamespace().Name, + SelectionScope: placementv1beta1.NamespaceOnly, + }, + { + Group: utils.CRDMetaGVK.Group, + Kind: utils.CRDMetaGVK.Kind, + Version: utils.CRDMetaGVK.Version, + Name: testResourceCRDName, + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + crpStatusUpdatedActual := crpStatusUpdatedActual([]placementv1beta1.ResourceIdentifier{ + { + Kind: utils.NamespaceKind, + Name: appNamespace().Name, + Version: corev1.SchemeGroupVersion.Version, + }, + { + Group: utils.CRDMetaGVK.Group, + Kind: utils.CRDMetaGVK.Kind, + Name: testResourceCRDName, + Version: utils.CRDMetaGVK.Version, + }, + }, allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterEach(OncePerOrdered, func() { + // Remove the custom deletion blocker finalizer from the RP and CRP. + ensureRPAndRelatedResourcesDeleted(rpKey, allMemberClusters, &testCustomResource) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("Test an RP place custom resource successfully, should wait to update resource", Ordered, func() { + var wantSelectedResources []placementv1beta1.ResourceIdentifier + var rp *placementv1beta1.ResourcePlacement + var observedResourceIdx string + unAvailablePeriodSeconds := 30 + workNamespace := appNamespace() + + BeforeAll(func() { + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: testv1alpha1.GroupVersion.Group, + Kind: testCustomResourceKind, + Name: testCustomResource.Name, + Version: testv1alpha1.GroupVersion.Version, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the RP that select the custom resource", func() { + rp = &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace.Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: testv1alpha1.GroupVersion.Group, + Kind: testCustomResourceKind, + Version: testv1alpha1.GroupVersion.Version, + Name: testCustomResource.Name, + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 1, + }, + UnavailablePeriodSeconds: ptr.To(unAvailablePeriodSeconds), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + // Wait until all the expected resources have been selected. + // + // This is to address a flakiness situation where it might take a while for Fleet + // to recognize the custom resource (even if it is created before the RP). + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return fmt.Errorf("failed to get RP: %w", err) + } + + if diff := cmp.Diff(rp.Status.SelectedResources, wantSelectedResources, cmpopts.SortSlices(utils.LessFuncResourceIdentifier)); diff != "" { + return fmt.Errorf("selected resources mismatched (-got, +want): %s", diff) + } + // Use the fresh observed resource index to verify the RP status later. + observedResourceIdx = rp.Status.ObservedResourceIndex + return nil + }, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to select all the expected resources") + + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantSelectedResources, []string{memberCluster1EastProdName}, nil, observedResourceIdx, false) + Eventually(rpStatusUpdatedActual, 2*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on member cluster", func() { + workResourcesPlacedActual := waitForTestResourceToBePlaced(memberCluster1EastProd, &testCustomResource) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster1EastProd.ClusterName) + }) + + It("update the custom resource", func() { + Eventually(func() error { + var cr testv1alpha1.TestResource + err := hubClient.Get(ctx, types.NamespacedName{Name: testCustomResource.Name, Namespace: workNamespace.Name}, &cr) + if err != nil { + return err + } + cr.Spec.Foo = valBar1 // Previously was "foo1" + return hubClient.Update(ctx, &cr) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update custom resource") + }) + + It("should not update the resource on member cluster before the unavailable second", func() { + // subtracting 5 seconds because transition between IT takes ~1 second + unavailablePeriod := time.Duration(*rp.Spec.Strategy.RollingUpdate.UnavailablePeriodSeconds)*time.Second - (5 * time.Second) + Consistently(func() bool { + var cr testv1alpha1.TestResource + err := memberCluster1EastProd.KubeClient.Get(ctx, types.NamespacedName{Name: testCustomResource.Name, Namespace: workNamespace.Name}, &cr) + if err != nil { + klog.Errorf("Failed to get custom resource %s/%s: %v", workNamespace.Name, testCustomResource.Name, err) + return false + } + if cr.Spec.Foo == valFoo1 { // Previously was "foo1" + return true + } + return false + }, unavailablePeriod, consistentlyInterval).Should(BeTrue(), "Test resource was updated when it shouldn't be") + }) + + It("should update RP status as expected", func() { + // Refresh the observed resource index. + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return fmt.Errorf("failed to get RP: %w", err) + } + + if rp.Status.ObservedResourceIndex == observedResourceIdx { + // It is expected that the observed resource index has been bumped by 1 + // due to the resource change. + return fmt.Errorf("observed resource index is not updated") + } + // Use the fresh observed resource index to verify the RP status later. + observedResourceIdx = rp.Status.ObservedResourceIndex + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to select all the expected resources") + + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantSelectedResources, []string{memberCluster1EastProdName}, nil, observedResourceIdx, false) + Eventually(rpStatusUpdatedActual, 4*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("delete the RP and related resources", func() { + }) + }) + + Context("Test an RP place custom resource successfully, should wait to update resource on multiple member clusters", Ordered, func() { + workNamespace := appNamespace() + var wantSelectedResources []placementv1beta1.ResourceIdentifier + var rp *placementv1beta1.ResourcePlacement + unAvailablePeriodSeconds := 30 + var observedResourceIdx string + + BeforeAll(func() { + // Create the test resources. + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Group: testv1alpha1.GroupVersion.Group, + Kind: testCustomResourceKind, + Name: testCustomResource.Name, + Version: testv1alpha1.GroupVersion.Version, + Namespace: workNamespace.Name, + }, + } + }) + + It("create the RP that select the custom resource", func() { + rp = &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace.Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: testv1alpha1.GroupVersion.Group, + Kind: testCustomResourceKind, + Version: testv1alpha1.GroupVersion.Version, + Name: testCustomResource.Name, + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 1, + }, + UnavailablePeriodSeconds: ptr.To(unAvailablePeriodSeconds), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + }) + + It("should update RP status as expected", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return fmt.Errorf("failed to get RP: %w", err) + } + + if diff := cmp.Diff(rp.Status.SelectedResources, wantSelectedResources, cmpopts.SortSlices(utils.LessFuncResourceIdentifier)); diff != "" { + return fmt.Errorf("selected resources mismatched (-got, +want): %s", diff) + } + // Use the fresh observed resource index to verify the RP status later. + observedResourceIdx = rp.Status.ObservedResourceIndex + return nil + }, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to select all the expected resources") + + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantSelectedResources, allMemberClusterNames, nil, observedResourceIdx, false) + Eventually(rpStatusUpdatedActual, 2*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForTestResourceToBePlaced(memberCluster, &testCustomResource) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("update the custom resource", func() { + Eventually(func() error { + var cr testv1alpha1.TestResource + err := hubClient.Get(ctx, types.NamespacedName{Name: testCustomResource.Name, Namespace: workNamespace.Name}, &cr) + if err != nil { + return err + } + cr.Spec.Foo = valBar1 // Previously was "foo1" + return hubClient.Update(ctx, &cr) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update custom resource") + }) + + It("should update one member cluster", func() { + // adding a buffer of 5 seconds + unavailablePeriod := time.Duration(*rp.Spec.Strategy.RollingUpdate.UnavailablePeriodSeconds)*time.Second + (5 * time.Second) + Eventually(func() bool { + // Check the number of clusters meeting the condition + countClustersMeetingCondition := func() int { + count := 0 + for _, cluster := range allMemberClusters { + if !checkCluster(cluster, testCustomResource.Name, workNamespace.Name) { + // resource field updated to "bar1" + count++ + } + } + return count + } + return countClustersMeetingCondition() == 1 + }, unavailablePeriod, eventuallyInterval).Should(BeTrue(), "Test resource was updated when it shouldn't be") + }) + + It("should not rollout update to the next member cluster before unavailable second", func() { + // subtracting a buffer of 5 seconds + unavailablePeriod := time.Duration(*rp.Spec.Strategy.RollingUpdate.UnavailablePeriodSeconds)*time.Second - (5 * time.Second) + Consistently(func() bool { + // Check the number of clusters meeting the condition + countClustersMeetingCondition := func() int { + count := 0 + for _, cluster := range allMemberClusters { + if !checkCluster(cluster, testCustomResource.Name, workNamespace.Name) { + // resource field updated to "bar1" + count++ + } + } + return count + } + return countClustersMeetingCondition() == 1 + }, unavailablePeriod, consistentlyInterval).Should(BeTrue(), "Test resource was updated when it shouldn't be") + }) + + It("should update RP status as expected", func() { + // Refresh the observed resource index. + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, rpKey, rp); err != nil { + return fmt.Errorf("failed to get RP: %w", err) + } + + if rp.Status.ObservedResourceIndex == observedResourceIdx { + // It is expected that the observed resource index has been bumped by 1 + // due to the resource change. + return fmt.Errorf("observed resource index is not updated") + } + // Use the fresh observed resource index to verify the RP status later. + observedResourceIdx = rp.Status.ObservedResourceIndex + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to select all the expected resources") + + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantSelectedResources, allMemberClusterNames, nil, observedResourceIdx, false) + Eventually(rpStatusUpdatedActual, 4*time.Duration(unAvailablePeriodSeconds)*time.Second, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + }) +}) + +func buildRPForSafeRollout(namespace string) *placementv1beta1.ResourcePlacement { + return &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()), + Namespace: namespace, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 1, + }, + }, + }, + }, + } +} diff --git a/test/e2e/setup_test.go b/test/e2e/setup_test.go index 8eda2dad9..ebf6c326c 100644 --- a/test/e2e/setup_test.go +++ b/test/e2e/setup_test.go @@ -238,7 +238,7 @@ var ( // We don't sort ResourcePlacementStatus by their name since we don't know which cluster will become unavailable first, // prompting the rollout to be blocked for remaining clusters. - safeRolloutCRPStatusCmpOptions = cmp.Options{ + safeRolloutPlacementStatusCmpOptions = cmp.Options{ cmpopts.SortSlices(lessFuncCondition), cmpopts.SortSlices(lessFuncPlacementStatusByConditions), cmpopts.SortSlices(utils.LessFuncResourceIdentifier), diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index 6a8ec44b5..44d3b2871 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -1468,7 +1468,10 @@ func ensureUpdateRunStrategyDeletion(strategyName string) { Eventually(removedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "ClusterStagedUpdateStrategy still exists") } -func ensureRPAndRelatedResourcesDeleted(rpKey types.NamespacedName, memberClusters []*framework.Cluster) { +// ensureRPAndRelatedResourcesDeleted deletes rp and verifies resources in the specified namespace placed by the rp are removed from the cluster. +// It checks if the placed configMap is removed by default, as this is tested in most of the test cases. +// For tests with additional resources placed, e.g. deployments, daemonSets, add those to placedResources. +func ensureRPAndRelatedResourcesDeleted(rpKey types.NamespacedName, memberClusters []*framework.Cluster, placedResources ...client.Object) { // Delete the ResourcePlacement. rp := &placementv1beta1.ResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -1482,7 +1485,7 @@ func ensureRPAndRelatedResourcesDeleted(rpKey types.NamespacedName, memberCluste for idx := range memberClusters { memberCluster := memberClusters[idx] - workResourcesRemovedActual := namespacedResourcesRemovedFromClusterActual(memberCluster) + workResourcesRemovedActual := namespacedResourcesRemovedFromClusterActual(memberCluster, placedResources...) Eventually(workResourcesRemovedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove work resources from member cluster %s", memberCluster.ClusterName) } From 9806af6e0cf78768dc54f67295be6e35dedf2269 Mon Sep 17 00:00:00 2001 From: Wantong Jiang Date: Wed, 27 Aug 2025 22:52:53 +0000 Subject: [PATCH 37/38] bump crd-installer builder golang version to 1.24.6 --- docker/crd-installer.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/crd-installer.Dockerfile b/docker/crd-installer.Dockerfile index 95c99b884..0f90216b3 100644 --- a/docker/crd-installer.Dockerfile +++ b/docker/crd-installer.Dockerfile @@ -1,5 +1,5 @@ # Build the crdinstaller binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.4 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.6 AS builder WORKDIR /workspace # Copy the Go Modules manifests From 15ead1ab8b246d4cc29fd9bb8af472aa88f28825 Mon Sep 17 00:00:00 2001 From: Wantong Jiang Date: Thu, 28 Aug 2025 01:56:22 +0000 Subject: [PATCH 38/38] remove unnecessary CRD from chart --- ...ent.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml | 1 - 1 file changed, 1 deletion(-) delete mode 120000 charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml diff --git a/charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml b/charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml deleted file mode 120000 index 967a3a706..000000000 --- a/charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacementstatuses.yaml \ No newline at end of file