diff --git a/cmd/operator-controller/main.go b/cmd/operator-controller/main.go index c3241ce632..fba7c39af5 100644 --- a/cmd/operator-controller/main.go +++ b/cmd/operator-controller/main.go @@ -107,7 +107,10 @@ type config struct { globalPullSecret string } -const authFilePrefix = "operator-controller-global-pull-secrets" +const ( + authFilePrefix = "operator-controller-global-pull-secrets" + fieldOwnerPrefix = "olm.operatorframework.io" +) // podNamespace checks whether the controller is running in a Pod vs. // being run locally by inspecting the namespace file that gets mounted @@ -560,6 +563,7 @@ func setupBoxcutter( Scheme: mgr.GetScheme(), RevisionGenerator: rg, Preflights: preflights, + FieldOwner: fmt.Sprintf("%s/clusterextension-controller", fieldOwnerPrefix), } ceReconciler.RevisionStatesGetter = &controllers.BoxcutterRevisionStatesGetter{Reader: mgr.GetClient()} ceReconciler.StorageMigrator = &applier.BoxcutterStorageMigrator{ @@ -568,11 +572,6 @@ func setupBoxcutter( RevisionGenerator: rg, } - // Boxcutter - const ( - boxcutterSystemPrefixFieldOwner = "olm.operatorframework.io" - ) - discoveryClient, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig()) if err != nil { return fmt.Errorf("unable to create discovery client: %w", err) @@ -599,8 +598,8 @@ func setupBoxcutter( machinery.NewObjectEngine( mgr.GetScheme(), trackingCache, mgr.GetClient(), ownerhandling.NewNative(mgr.GetScheme()), - machinery.NewComparator(ownerhandling.NewNative(mgr.GetScheme()), discoveryClient, mgr.GetScheme(), boxcutterSystemPrefixFieldOwner), - boxcutterSystemPrefixFieldOwner, boxcutterSystemPrefixFieldOwner, + machinery.NewComparator(ownerhandling.NewNative(mgr.GetScheme()), discoveryClient, mgr.GetScheme(), fieldOwnerPrefix), + fieldOwnerPrefix, fieldOwnerPrefix, ), validation.NewClusterPhaseValidator(mgr.GetRESTMapper(), mgr.GetClient()), ), diff --git a/internal/operator-controller/applier/boxcutter.go b/internal/operator-controller/applier/boxcutter.go index 14159c1803..fa3f85e790 100644 --- a/internal/operator-controller/applier/boxcutter.go +++ b/internal/operator-controller/applier/boxcutter.go @@ -27,11 +27,9 @@ import ( ocv1 "github.com/operator-framework/operator-controller/api/v1" "github.com/operator-framework/operator-controller/internal/operator-controller/controllers" "github.com/operator-framework/operator-controller/internal/operator-controller/labels" - hashutil "github.com/operator-framework/operator-controller/internal/shared/util/hash" ) const ( - RevisionHashAnnotation = "olm.operatorframework.io/hash" ClusterExtensionRevisionPreviousLimit = 5 ) @@ -200,6 +198,7 @@ type Boxcutter struct { Scheme *runtime.Scheme RevisionGenerator ClusterExtensionRevisionGenerator Preflights []Preflight + FieldOwner string } func (bc *Boxcutter) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.ClusterExtension, objectLabels, revisionAnnotations map[string]string) (bool, string, error) { @@ -216,6 +215,17 @@ func (bc *Boxcutter) getObjects(rev *ocv1.ClusterExtensionRevision) []client.Obj return objs } +func (bc *Boxcutter) createOrUpdate(ctx context.Context, obj client.Object) error { + if obj.GetObjectKind().GroupVersionKind().Empty() { + gvk, err := apiutil.GVKForObject(obj, bc.Scheme) + if err != nil { + return err + } + obj.GetObjectKind().SetGroupVersionKind(gvk) + } + return bc.Client.Patch(ctx, obj, client.Apply, client.FieldOwner(bc.FieldOwner), client.ForceOwnership) +} + func (bc *Boxcutter) apply(ctx context.Context, contentFS fs.FS, ext *ocv1.ClusterExtension, objectLabels, revisionAnnotations map[string]string) (bool, string, error) { // Generate desired revision desiredRevision, err := bc.RevisionGenerator.GenerateRevision(contentFS, ext, objectLabels, revisionAnnotations) @@ -223,27 +233,38 @@ func (bc *Boxcutter) apply(ctx context.Context, contentFS fs.FS, ext *ocv1.Clust return false, "", err } + if err := controllerutil.SetControllerReference(ext, desiredRevision, bc.Scheme); err != nil { + return false, "", fmt.Errorf("set ownerref: %w", err) + } + // List all existing revisions existingRevisions, err := bc.getExistingRevisions(ctx, ext.GetName()) if err != nil { return false, "", err } - desiredHash := hashutil.DeepHashObject(desiredRevision.Spec.Phases) - // Sort into current and previous revisions. - var ( - currentRevision *ocv1.ClusterExtensionRevision - ) + currentRevision := &ocv1.ClusterExtensionRevision{} state := StateNeedsInstall + // check if we can update the current revision. if len(existingRevisions) > 0 { - maybeCurrentRevision := existingRevisions[len(existingRevisions)-1] - annotations := maybeCurrentRevision.GetAnnotations() - if annotations != nil { - if revisionHash, ok := annotations[RevisionHashAnnotation]; ok && revisionHash == desiredHash { - currentRevision = &maybeCurrentRevision - } + // try first to update the current revision. + currentRevision = &existingRevisions[len(existingRevisions)-1] + desiredRevision.Spec.Previous = currentRevision.Spec.Previous + desiredRevision.Spec.Revision = currentRevision.Spec.Revision + desiredRevision.Name = currentRevision.Name + + err := bc.createOrUpdate(ctx, desiredRevision) + switch { + case apierrors.IsInvalid(err): + // We could not update the current revision due to trying to update an immutable field. + // Therefore, we need to create a new revision. + state = StateNeedsUpgrade + case err == nil: + // inplace patch was successful, no changes in phases + state = StateUnchanged + default: + return false, "", fmt.Errorf("patching %s Revision: %w", desiredRevision.Name, err) } - state = StateNeedsUpgrade } // Preflights @@ -270,30 +291,22 @@ func (bc *Boxcutter) apply(ctx context.Context, contentFS fs.FS, ext *ocv1.Clust } } - if currentRevision == nil { - // all Revisions are outdated => create a new one. + if state != StateUnchanged { + // need to create new revision prevRevisions := existingRevisions revisionNumber := latestRevisionNumber(prevRevisions) + 1 - newRevision := desiredRevision - newRevision.Name = fmt.Sprintf("%s-%d", ext.Name, revisionNumber) - if newRevision.GetAnnotations() == nil { - newRevision.Annotations = map[string]string{} - } - newRevision.Annotations[RevisionHashAnnotation] = desiredHash - newRevision.Spec.Revision = revisionNumber + desiredRevision.Name = fmt.Sprintf("%s-%d", ext.Name, revisionNumber) + desiredRevision.Spec.Revision = revisionNumber - if err = bc.setPreviousRevisions(ctx, newRevision, prevRevisions); err != nil { + if err = bc.setPreviousRevisions(ctx, desiredRevision, prevRevisions); err != nil { return false, "", fmt.Errorf("garbage collecting old Revisions: %w", err) } - if err := controllerutil.SetControllerReference(ext, newRevision, bc.Scheme); err != nil { - return false, "", fmt.Errorf("set ownerref: %w", err) - } - if err := bc.Client.Create(ctx, newRevision); err != nil { + if err := bc.createOrUpdate(ctx, desiredRevision); err != nil { return false, "", fmt.Errorf("creating new Revision: %w", err) } - currentRevision = newRevision + currentRevision = desiredRevision } progressingCondition := meta.FindStatusCondition(currentRevision.Status.Conditions, ocv1.TypeProgressing) diff --git a/internal/operator-controller/applier/boxcutter_test.go b/internal/operator-controller/applier/boxcutter_test.go index 5679f4b8ac..9da1ddb4a0 100644 --- a/internal/operator-controller/applier/boxcutter_test.go +++ b/internal/operator-controller/applier/boxcutter_test.go @@ -20,9 +20,11 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation/field" k8scheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" ocv1 "github.com/operator-framework/operator-controller/api/v1" "github.com/operator-framework/operator-controller/internal/operator-controller/applier" @@ -303,14 +305,10 @@ func TestBoxcutter_Apply(t *testing.T) { UID: "test-uid", }, } - defaultDesiredHash := "gvvp8nzq5sbila80hkiv69am8hdr7o68qkk8n084gdn" defaultDesiredRevision := &ocv1.ClusterExtensionRevision{ ObjectMeta: metav1.ObjectMeta{ Name: "test-ext-1", UID: "rev-uid-1", - Annotations: map[string]string{ - applier.RevisionHashAnnotation: defaultDesiredHash, - }, Labels: map[string]string{ controllers.ClusterExtensionRevisionOwnerLabel: ext.Name, }, @@ -338,12 +336,29 @@ func TestBoxcutter_Apply(t *testing.T) { }, } + allowedRevisionValue := func(revNum int64) *interceptor.Funcs { + return &interceptor.Funcs{ + Patch: func(ctx context.Context, client client.WithWatch, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + cer, ok := obj.(*ocv1.ClusterExtensionRevision) + if !ok { + return fmt.Errorf("expected ClusterExtensionRevision, got %T", obj) + } + fmt.Println(cer.Spec.Revision) + if cer.Spec.Revision != revNum { + fmt.Println("AAA") + return apierrors.NewInvalid(cer.GroupVersionKind().GroupKind(), cer.GetName(), field.ErrorList{field.Invalid(field.NewPath("spec.phases"), "immutable", "spec.phases is immutable")}) + } + return client.Patch(ctx, obj, patch, opts...) + }, + } + } testCases := []struct { - name string - mockBuilder applier.ClusterExtensionRevisionGenerator - existingObjs []client.Object - expectedErr string - validate func(t *testing.T, c client.Client) + name string + mockBuilder applier.ClusterExtensionRevisionGenerator + existingObjs []client.Object + expectedErr string + validate func(t *testing.T, c client.Client) + clientIterceptor *interceptor.Funcs }{ { name: "first revision", @@ -388,7 +403,6 @@ func TestBoxcutter_Apply(t *testing.T) { rev := revList.Items[0] assert.Equal(t, "test-ext-1", rev.Name) assert.Equal(t, int64(1), rev.Spec.Revision) - assert.Equal(t, defaultDesiredHash, rev.Annotations[applier.RevisionHashAnnotation]) assert.Len(t, rev.OwnerReferences, 1) assert.Equal(t, ext.Name, rev.OwnerReferences[0].Name) assert.Equal(t, ext.UID, rev.OwnerReferences[0].UID) @@ -441,7 +455,7 @@ func TestBoxcutter_Apply(t *testing.T) { }, }, { - name: "new revision created when hash differs", + name: "new revision created when objects in new revision are different", mockBuilder: &mockBundleRevisionBuilder{ makeRevisionFunc: func(bundleFS fs.FS, ext *ocv1.ClusterExtension, objectLabels, revisionAnnotations map[string]string) (*ocv1.ClusterExtensionRevision, error) { return &ocv1.ClusterExtensionRevision{ @@ -474,6 +488,7 @@ func TestBoxcutter_Apply(t *testing.T) { }, nil }, }, + clientIterceptor: allowedRevisionValue(2), existingObjs: []client.Object{ defaultDesiredRevision, }, @@ -495,7 +510,6 @@ func TestBoxcutter_Apply(t *testing.T) { assert.Equal(t, "test-ext-2", newRev.Name) assert.Equal(t, int64(2), newRev.Spec.Revision) - assert.Equal(t, "1fqrim12vefkogp3pwxwhcs7c0pi1z1t2fw4roxu81sv", newRev.Annotations[applier.RevisionHashAnnotation]) require.Len(t, newRev.Spec.Previous, 1) assert.Equal(t, "test-ext-1", newRev.Spec.Previous[0].Name) assert.Equal(t, types.UID("rev-uid-1"), newRev.Spec.Previous[0].UID) @@ -518,7 +532,7 @@ func TestBoxcutter_Apply(t *testing.T) { }, }, { - name: "sixth revision", + name: "keep at most 5 past revisions", mockBuilder: &mockBundleRevisionBuilder{ makeRevisionFunc: func(bundleFS fs.FS, ext *ocv1.ClusterExtension, objectLabels, revisionAnnotations map[string]string) (*ocv1.ClusterExtensionRevision, error) { return &ocv1.ClusterExtensionRevision{ @@ -542,6 +556,7 @@ func TestBoxcutter_Apply(t *testing.T) { }, Spec: ocv1.ClusterExtensionRevisionSpec{ LifecycleState: ocv1.ClusterExtensionRevisionLifecycleStateArchived, + Revision: 1, }, }, &ocv1.ClusterExtensionRevision{ @@ -553,6 +568,7 @@ func TestBoxcutter_Apply(t *testing.T) { }, Spec: ocv1.ClusterExtensionRevisionSpec{ LifecycleState: ocv1.ClusterExtensionRevisionLifecycleStateArchived, + Revision: 2, }, }, &ocv1.ClusterExtensionRevision{ @@ -564,6 +580,7 @@ func TestBoxcutter_Apply(t *testing.T) { }, Spec: ocv1.ClusterExtensionRevisionSpec{ LifecycleState: ocv1.ClusterExtensionRevisionLifecycleStateArchived, + Revision: 3, }, }, &ocv1.ClusterExtensionRevision{ @@ -575,6 +592,7 @@ func TestBoxcutter_Apply(t *testing.T) { }, Spec: ocv1.ClusterExtensionRevisionSpec{ LifecycleState: ocv1.ClusterExtensionRevisionLifecycleStateArchived, + Revision: 4, }, }, &ocv1.ClusterExtensionRevision{ @@ -586,6 +604,7 @@ func TestBoxcutter_Apply(t *testing.T) { }, Spec: ocv1.ClusterExtensionRevisionSpec{ LifecycleState: ocv1.ClusterExtensionRevisionLifecycleStateArchived, + Revision: 5, }, }, &ocv1.ClusterExtensionRevision{ @@ -597,9 +616,11 @@ func TestBoxcutter_Apply(t *testing.T) { }, Spec: ocv1.ClusterExtensionRevisionSpec{ LifecycleState: ocv1.ClusterExtensionRevisionLifecycleStateArchived, + Revision: 6, }, }, }, + clientIterceptor: allowedRevisionValue(7), validate: func(t *testing.T, c client.Client) { rev1 := &ocv1.ClusterExtensionRevision{} err := c.Get(t.Context(), client.ObjectKey{Name: "rev-1"}, rev1) @@ -607,13 +628,13 @@ func TestBoxcutter_Apply(t *testing.T) { assert.True(t, apierrors.IsNotFound(err)) latest := &ocv1.ClusterExtensionRevision{} - err = c.Get(t.Context(), client.ObjectKey{Name: "test-ext-1"}, latest) + err = c.Get(t.Context(), client.ObjectKey{Name: "test-ext-7"}, latest) require.NoError(t, err) assert.Len(t, latest.Spec.Previous, applier.ClusterExtensionRevisionPreviousLimit) }, }, { - name: "len([]revisions) > limit but contains active revisions with index beyond limit", + name: "keep active revisions when they are out of limit", mockBuilder: &mockBundleRevisionBuilder{ makeRevisionFunc: func(bundleFS fs.FS, ext *ocv1.ClusterExtension, objectLabels, revisionAnnotations map[string]string) (*ocv1.ClusterExtensionRevision, error) { return &ocv1.ClusterExtensionRevision{ @@ -637,6 +658,7 @@ func TestBoxcutter_Apply(t *testing.T) { }, Spec: ocv1.ClusterExtensionRevisionSpec{ LifecycleState: ocv1.ClusterExtensionRevisionLifecycleStateArchived, + Revision: 1, }, }, &ocv1.ClusterExtensionRevision{ @@ -649,6 +671,7 @@ func TestBoxcutter_Apply(t *testing.T) { Spec: ocv1.ClusterExtensionRevisionSpec{ // index beyond the retention limit but active; should be preserved LifecycleState: ocv1.ClusterExtensionRevisionLifecycleStateActive, + Revision: 2, }, }, &ocv1.ClusterExtensionRevision{ @@ -660,6 +683,7 @@ func TestBoxcutter_Apply(t *testing.T) { }, Spec: ocv1.ClusterExtensionRevisionSpec{ LifecycleState: ocv1.ClusterExtensionRevisionLifecycleStateActive, + Revision: 3, }, }, &ocv1.ClusterExtensionRevision{ @@ -672,6 +696,7 @@ func TestBoxcutter_Apply(t *testing.T) { Spec: ocv1.ClusterExtensionRevisionSpec{ // archived but should be preserved since it is within the limit LifecycleState: ocv1.ClusterExtensionRevisionLifecycleStateArchived, + Revision: 4, }, }, &ocv1.ClusterExtensionRevision{ @@ -683,6 +708,7 @@ func TestBoxcutter_Apply(t *testing.T) { }, Spec: ocv1.ClusterExtensionRevisionSpec{ LifecycleState: ocv1.ClusterExtensionRevisionLifecycleStateActive, + Revision: 5, }, }, &ocv1.ClusterExtensionRevision{ @@ -694,6 +720,7 @@ func TestBoxcutter_Apply(t *testing.T) { }, Spec: ocv1.ClusterExtensionRevisionSpec{ LifecycleState: ocv1.ClusterExtensionRevisionLifecycleStateActive, + Revision: 6, }, }, &ocv1.ClusterExtensionRevision{ @@ -705,9 +732,11 @@ func TestBoxcutter_Apply(t *testing.T) { }, Spec: ocv1.ClusterExtensionRevisionSpec{ LifecycleState: ocv1.ClusterExtensionRevisionLifecycleStateActive, + Revision: 7, }, }, }, + clientIterceptor: allowedRevisionValue(8), validate: func(t *testing.T, c client.Client) { rev1 := &ocv1.ClusterExtensionRevision{} err := c.Get(t.Context(), client.ObjectKey{Name: "rev-1"}, rev1) @@ -723,7 +752,7 @@ func TestBoxcutter_Apply(t *testing.T) { require.NoError(t, err) latest := &ocv1.ClusterExtensionRevision{} - err = c.Get(t.Context(), client.ObjectKey{Name: "test-ext-1"}, latest) + err = c.Get(t.Context(), client.ObjectKey{Name: "test-ext-8"}, latest) require.NoError(t, err) assert.Len(t, latest.Spec.Previous, 6) assert.Contains(t, latest.Spec.Previous, ocv1.ClusterExtensionRevisionPrevious{Name: "rev-4"}) @@ -734,12 +763,17 @@ func TestBoxcutter_Apply(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { // Setup - fakeClient := fake.NewClientBuilder().WithScheme(testScheme).WithObjects(tc.existingObjs...).Build() + cb := fake.NewClientBuilder().WithScheme(testScheme).WithObjects(tc.existingObjs...) + if tc.clientIterceptor != nil { + cb.WithInterceptorFuncs(*tc.clientIterceptor) + } + fakeClient := cb.Build() boxcutter := &applier.Boxcutter{ Client: fakeClient, Scheme: testScheme, RevisionGenerator: tc.mockBuilder, + FieldOwner: "test-owner", } // We need a dummy fs.FS diff --git a/test/e2e/cluster_extension_install_test.go b/test/e2e/cluster_extension_install_test.go index 26b0cb7a0e..ab0bf48b1c 100644 --- a/test/e2e/cluster_extension_install_test.go +++ b/test/e2e/cluster_extension_install_test.go @@ -13,335 +13,26 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" - rbacv1 "k8s.io/api/rbac/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/client" ocv1 "github.com/operator-framework/operator-controller/api/v1" utils "github.com/operator-framework/operator-controller/internal/shared/util/testutils" + . "github.com/operator-framework/operator-controller/test/helpers" ) const ( - artifactName = "operator-controller-e2e" + artifactName = "operator-controller-e2e" + pollDuration = time.Minute + pollInterval = time.Second + testCatalogRefEnvVar = "CATALOG_IMG" + testCatalogName = "test-catalog" ) -var pollDuration = time.Minute -var pollInterval = time.Second - -func createNamespace(ctx context.Context, name string) (*corev1.Namespace, error) { - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - } - err := c.Create(ctx, ns) - if err != nil { - return nil, err - } - return ns, nil -} - -func createServiceAccount(ctx context.Context, name types.NamespacedName, clusterExtensionName string) (*corev1.ServiceAccount, error) { - sa := &corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: name.Name, - Namespace: name.Namespace, - }, - } - err := c.Create(ctx, sa) - if err != nil { - return nil, err - } - - return sa, createClusterRoleAndBindingForSA(ctx, name.Name, sa, clusterExtensionName) -} - -func createClusterRoleAndBindingForSA(ctx context.Context, name string, sa *corev1.ServiceAccount, clusterExtensionName string) error { - cr := &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{ - "olm.operatorframework.io", - }, - Resources: []string{ - "clusterextensions/finalizers", - }, - Verbs: []string{ - "update", - }, - ResourceNames: []string{clusterExtensionName}, - }, - { - APIGroups: []string{ - "", - }, - Resources: []string{ - "configmaps", - "secrets", // for helm - "services", - "serviceaccounts", - }, - Verbs: []string{ - "create", - "update", - "delete", - "patch", - "get", - "list", - "watch", - }, - }, - { - APIGroups: []string{ - "apiextensions.k8s.io", - }, - Resources: []string{ - "customresourcedefinitions", - }, - Verbs: []string{ - "create", - "update", - "delete", - "patch", - "get", - "list", - "watch", - }, - }, - { - APIGroups: []string{ - "apps", - }, - Resources: []string{ - "deployments", - }, - Verbs: []string{ - "create", - "update", - "delete", - "patch", - "get", - "list", - "watch", - }, - }, - { - APIGroups: []string{ - "rbac.authorization.k8s.io", - }, - Resources: []string{ - "clusterroles", - "roles", - "clusterrolebindings", - "rolebindings", - }, - Verbs: []string{ - "create", - "update", - "delete", - "patch", - "get", - "list", - "watch", - "bind", - "escalate", - }, - }, - { - APIGroups: []string{ - "networking.k8s.io", - }, - Resources: []string{ - "networkpolicies", - }, - Verbs: []string{ - "get", - "list", - "watch", - "create", - "update", - "patch", - "delete", - }, - }, - }, - } - err := c.Create(ctx, cr) - if err != nil { - return err - } - crb := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Name: sa.Name, - Namespace: sa.Namespace, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: name, - }, - } - err = c.Create(ctx, crb) - if err != nil { - return err - } - - return nil -} - -func testInit(t *testing.T) (*ocv1.ClusterExtension, *ocv1.ClusterCatalog, *corev1.ServiceAccount, *corev1.Namespace) { - ce, cc := testInitClusterExtensionClusterCatalog(t) - sa, ns := testInitServiceAccountNamespace(t, ce.Name) - return ce, cc, sa, ns -} - -func testInitClusterExtensionClusterCatalog(t *testing.T) (*ocv1.ClusterExtension, *ocv1.ClusterCatalog) { - ceName := fmt.Sprintf("clusterextension-%s", rand.String(8)) - - ce := &ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Name: ceName, - }, - } - - cc, err := createTestCatalog(context.Background(), testCatalogName, os.Getenv(testCatalogRefEnvVar)) - require.NoError(t, err) - - validateCatalogUnpack(t) - - return ce, cc -} - -func testInitServiceAccountNamespace(t *testing.T, clusterExtensionName string) (*corev1.ServiceAccount, *corev1.Namespace) { - var err error - - ns, err := createNamespace(context.Background(), clusterExtensionName) - require.NoError(t, err) - - name := types.NamespacedName{ - Name: clusterExtensionName, - Namespace: ns.GetName(), - } - - sa, err := createServiceAccount(context.Background(), name, clusterExtensionName) - require.NoError(t, err) - - return sa, ns -} - -func validateCatalogUnpack(t *testing.T) { - catalog := &ocv1.ClusterCatalog{} - t.Log("Ensuring ClusterCatalog has Status.Condition of Progressing with a status == True and reason == Succeeded") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - err := c.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) - require.NoError(ct, err) - cond := apimeta.FindStatusCondition(catalog.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("Checking that catalog has the expected metadata label") - require.NotNil(t, catalog.Labels) - require.Contains(t, catalog.Labels, "olm.operatorframework.io/metadata.name") - require.Equal(t, testCatalogName, catalog.Labels["olm.operatorframework.io/metadata.name"]) - - t.Log("Ensuring ClusterCatalog has Status.Condition of Type = Serving with status == True") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - err := c.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) - require.NoError(ct, err) - cond := apimeta.FindStatusCondition(catalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) -} - -func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) { - ls := labels.Set{"olm.operatorframework.io/owner-name": clusterExtensionName} - - // CRDs may take an extra long time to be deleted, and may run into the following error: - // Condition=Terminating Status=True Reason=InstanceDeletionFailed Message="could not list instances: storage is (re)initializing" - t.Logf("By waiting for CustomResourceDefinitions of %q to be deleted", clusterExtensionName) - require.EventuallyWithT(t, func(ct *assert.CollectT) { - list := &apiextensionsv1.CustomResourceDefinitionList{} - err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) - require.NoError(ct, err) - require.Empty(ct, list.Items) - }, 5*pollDuration, pollInterval) - - t.Logf("By waiting for ClusterRoleBindings of %q to be deleted", clusterExtensionName) - require.EventuallyWithT(t, func(ct *assert.CollectT) { - list := &rbacv1.ClusterRoleBindingList{} - err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) - require.NoError(ct, err) - require.Empty(ct, list.Items) - }, 2*pollDuration, pollInterval) - - t.Logf("By waiting for ClusterRoles of %q to be deleted", clusterExtensionName) - require.EventuallyWithT(t, func(ct *assert.CollectT) { - list := &rbacv1.ClusterRoleList{} - err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) - require.NoError(ct, err) - require.Empty(ct, list.Items) - }, 2*pollDuration, pollInterval) -} - -func testCleanup(t *testing.T, cat *ocv1.ClusterCatalog, clusterExtension *ocv1.ClusterExtension, sa *corev1.ServiceAccount, ns *corev1.Namespace) { - if cat != nil { - t.Logf("By deleting ClusterCatalog %q", cat.Name) - require.NoError(t, c.Delete(context.Background(), cat)) - require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) - return errors.IsNotFound(err) - }, pollDuration, pollInterval) - } - - if clusterExtension != nil { - t.Logf("By deleting ClusterExtension %q", clusterExtension.Name) - require.NoError(t, c.Delete(context.Background(), clusterExtension)) - require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, &ocv1.ClusterExtension{}) - return errors.IsNotFound(err) - }, pollDuration, pollInterval) - ensureNoExtensionResources(t, clusterExtension.Name) - } - - if sa != nil { - t.Logf("By deleting ServiceAccount %q", sa.Name) - require.NoError(t, c.Delete(context.Background(), sa)) - require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: sa.Name, Namespace: sa.Namespace}, &corev1.ServiceAccount{}) - return errors.IsNotFound(err) - }, pollDuration, pollInterval) - } - - if ns != nil { - t.Logf("By deleting Namespace %q", ns.Name) - require.NoError(t, c.Delete(context.Background(), ns)) - require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: ns.Name}, &corev1.Namespace{}) - return errors.IsNotFound(err) - }, pollDuration, pollInterval) - } -} - func TestClusterExtensionInstallRegistry(t *testing.T) { type testCase struct { name string @@ -365,8 +56,8 @@ func TestClusterExtensionInstallRegistry(t *testing.T) { t.Log("When a cluster extension is installed from a catalog") t.Log("When the extension bundle format is registry+v1") - clusterExtension, extensionCatalog, sa, ns := testInit(t) - defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) + clusterExtension, extensionCatalog, sa, ns := TestInit(t) + defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) defer utils.CollectTestArtifacts(t, artifactName, c, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -435,8 +126,8 @@ func TestClusterExtensionInstallRegistryDynamic(t *testing.T) { t.Log("When a cluster extension is installed from a catalog") t.Log("When the extension bundle format is registry+v1") - clusterExtension, extensionCatalog, sa, ns := testInit(t) - defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) + clusterExtension, extensionCatalog, sa, ns := TestInit(t) + defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) defer utils.CollectTestArtifacts(t, artifactName, c, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -505,11 +196,11 @@ location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"`, func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) { t.Log("When a cluster extension is installed from a catalog") - clusterExtension, extensionCatalog, sa, ns := testInit(t) - extraCatalog, err := createTestCatalog(context.Background(), "extra-test-catalog", os.Getenv(testCatalogRefEnvVar)) + clusterExtension, extensionCatalog, sa, ns := TestInit(t) + extraCatalog, err := CreateTestCatalog(context.Background(), "extra-test-catalog", os.Getenv(testCatalogRefEnvVar)) require.NoError(t, err) - defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) + defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) defer utils.CollectTestArtifacts(t, artifactName, c, cfg) defer func(cat *ocv1.ClusterCatalog) { require.NoError(t, c.Delete(context.Background(), cat)) @@ -555,8 +246,8 @@ func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) { t.Log("When a cluster extension is installed from a catalog") t.Log("When resolving upgrade edges") - clusterExtension, extensionCatalog, sa, ns := testInit(t) - defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) + clusterExtension, extensionCatalog, sa, ns := TestInit(t) + defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) defer utils.CollectTestArtifacts(t, artifactName, c, cfg) t.Log("By creating an ClusterExtension at a specified version") @@ -616,8 +307,8 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { t.Log("When a cluster extension is installed from a catalog") t.Log("When resolving upgrade edges") - clusterExtension, extensionCatalog, sa, ns := testInit(t) - defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) + clusterExtension, extensionCatalog, sa, ns := TestInit(t) + defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) defer utils.CollectTestArtifacts(t, artifactName, c, cfg) t.Log("By creating an ClusterExtension at a specified version") @@ -663,8 +354,8 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { t.Log("When a cluster extension is installed from a catalog") t.Log("When resolving upgrade edges") - clusterExtension, extensionCatalog, sa, ns := testInit(t) - defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) + clusterExtension, extensionCatalog, sa, ns := TestInit(t) + defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) defer utils.CollectTestArtifacts(t, artifactName, c, cfg) t.Log("By creating an ClusterExtension at a specified version") @@ -709,8 +400,8 @@ func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { t.Log("When a cluster extension is installed from a catalog") t.Log("It resolves again when a catalog is patched with new ImageRef") - clusterExtension, extensionCatalog, sa, ns := testInit(t) - defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) + clusterExtension, extensionCatalog, sa, ns := TestInit(t) + defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) defer utils.CollectTestArtifacts(t, artifactName, c, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -784,7 +475,7 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { // create a test-catalog with latest image tag latestCatalogImage := fmt.Sprintf("%s/e2e/test-catalog:latest", os.Getenv("CLUSTER_REGISTRY_HOST")) - extensionCatalog, err := createTestCatalog(context.Background(), testCatalogName, latestCatalogImage) + extensionCatalog, err := CreateTestCatalog(context.Background(), testCatalogName, latestCatalogImage) require.NoError(t, err) clusterExtensionName := fmt.Sprintf("clusterextension-%s", rand.String(8)) clusterExtension := &ocv1.ClusterExtension{ @@ -792,11 +483,11 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { Name: clusterExtensionName, }, } - ns, err := createNamespace(context.Background(), clusterExtensionName) + ns, err := CreateNamespace(context.Background(), clusterExtensionName) require.NoError(t, err) - sa, err := createServiceAccount(context.Background(), types.NamespacedName{Name: clusterExtensionName, Namespace: ns.Name}, clusterExtensionName) + sa, err := CreateServiceAccount(context.Background(), types.NamespacedName{Name: clusterExtensionName, Namespace: ns.Name}, clusterExtensionName) require.NoError(t, err) - defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) + defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) defer utils.CollectTestArtifacts(t, artifactName, c, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -853,8 +544,8 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T) { t.Log("When a cluster extension is installed from a catalog") t.Log("It resolves again when managed content is changed") - clusterExtension, extensionCatalog, sa, ns := testInit(t) - defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) + clusterExtension, extensionCatalog, sa, ns := TestInit(t) + defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) defer utils.CollectTestArtifacts(t, artifactName, c, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -906,9 +597,9 @@ func TestClusterExtensionRecoversFromNoNamespaceWhenFailureFixed(t *testing.T) { t.Log("When the extension bundle format is registry+v1") t.Log("By not creating the Namespace and ServiceAccount") - clusterExtension, extensionCatalog := testInitClusterExtensionClusterCatalog(t) + clusterExtension, extensionCatalog := TestInitClusterExtensionClusterCatalog(t) - defer testCleanup(t, extensionCatalog, clusterExtension, nil, nil) + defer TestCleanup(t, extensionCatalog, clusterExtension, nil, nil) defer utils.CollectTestArtifacts(t, artifactName, c, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -949,8 +640,8 @@ func TestClusterExtensionRecoversFromNoNamespaceWhenFailureFixed(t *testing.T) { }, pollDuration, pollInterval) t.Log("By creating the Namespace and ServiceAccount") - sa, ns := testInitServiceAccountNamespace(t, clusterExtension.Name) - defer testCleanup(t, nil, nil, sa, ns) + sa, ns := TestInitServiceAccountNamespace(t, clusterExtension.Name) + defer TestCleanup(t, nil, nil, sa, ns) // NOTE: In order to ensure predictable results we need to ensure we have a single // known failure with a singular fix operation. Additionally, due to the exponential @@ -981,9 +672,9 @@ func TestClusterExtensionRecoversFromExistingDeploymentWhenFailureFixed(t *testi t.Log("When a cluster extension is installed from a catalog") t.Log("When the extension bundle format is registry+v1") - clusterExtension, extensionCatalog, sa, ns := testInit(t) + clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) + defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) defer utils.CollectTestArtifacts(t, artifactName, c, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 0bf84bec88..aa033a2f1e 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -7,10 +7,8 @@ import ( "testing" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/rest" - "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -26,8 +24,6 @@ var ( const ( testSummaryOutputEnvVar = "E2E_SUMMARY_OUTPUT" - testCatalogRefEnvVar = "CATALOG_IMG" - testCatalogName = "test-catalog" latestImageTag = "latest" ) @@ -54,32 +50,6 @@ func TestMain(m *testing.M) { os.Exit(res) } -// createTestCatalog will create a new catalog on the test cluster, provided -// the context, catalog name, and the image reference. It returns the created catalog -// or an error if any errors occurred while creating the catalog. -// Note that catalogd will automatically create the label: -// -// "olm.operatorframework.io/metadata.name": name -func createTestCatalog(ctx context.Context, name string, imageRef string) (*ocv1.ClusterCatalog, error) { - catalog := &ocv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: ocv1.ClusterCatalogSpec{ - Source: ocv1.CatalogSource{ - Type: ocv1.SourceTypeImage, - Image: &ocv1.ImageSource{ - Ref: imageRef, - PollIntervalMinutes: ptr.To(1), - }, - }, - }, - } - - err := c.Create(ctx, catalog) - return catalog, err -} - // patchTestCatalog will patch the existing clusterCatalog on the test cluster, provided // the context, catalog name, and the image reference. It returns an error // if any errors occurred while updating the catalog. diff --git a/test/experimental-e2e/experimental_e2e_test.go b/test/experimental-e2e/experimental_e2e_test.go index 234d73d8db..fca2511f76 100644 --- a/test/experimental-e2e/experimental_e2e_test.go +++ b/test/experimental-e2e/experimental_e2e_test.go @@ -28,6 +28,7 @@ import ( ocv1 "github.com/operator-framework/operator-controller/api/v1" "github.com/operator-framework/operator-controller/internal/operator-controller/scheme" utils "github.com/operator-framework/operator-controller/internal/shared/util/testutils" + . "github.com/operator-framework/operator-controller/test/helpers" ) const ( @@ -389,6 +390,60 @@ func TestClusterExtensionConfigSupport(t *testing.T) { }, pollDuration, pollInterval) } +func TestClusterExtensionVersionUpdate(t *testing.T) { + t.Log("When a cluster extension is installed from a catalog") + t.Log("When resolving upgrade edges") + + clusterExtension, extensionCatalog, sa, ns := TestInit(t) + defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) + defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + + t.Log("By creating an ClusterExtension at a specified version") + clusterExtension.Spec = ocv1.ClusterExtensionSpec{ + Source: ocv1.SourceConfig{ + SourceType: "Catalog", + Catalog: &ocv1.CatalogFilter{ + PackageName: "test", + Version: "1.0.0", + }, + }, + Namespace: ns.Name, + ServiceAccount: ocv1.ServiceAccountReference{ + Name: sa.Name, + }, + } + require.NoError(t, c.Create(context.Background(), clusterExtension)) + t.Log("By eventually reporting a successful resolution") + require.EventuallyWithT(t, func(ct *assert.CollectT) { + require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) + require.NotNil(ct, cond) + require.Equal(ct, metav1.ConditionTrue, cond.Status) + require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) + }, pollDuration, pollInterval) + + t.Log("It allows to upgrade the ClusterExtension to a non-successor version") + t.Log("By forcing update of ClusterExtension resource to a non-successor version") + // 1.2.0 does not replace/skip/skipRange 1.0.0. + clusterExtension.Spec.Source.Catalog.Version = "1.2.0" + clusterExtension.Spec.Source.Catalog.UpgradeConstraintPolicy = ocv1.UpgradeConstraintPolicySelfCertified + require.NoError(t, c.Update(context.Background(), clusterExtension)) + t.Log("By eventually reporting a satisfiable resolution") + require.EventuallyWithT(t, func(ct *assert.CollectT) { + require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) + require.NotNil(ct, cond) + require.Equal(ct, metav1.ConditionTrue, cond.Status) + require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) + }, pollDuration, pollInterval) + t.Log("We should have two ClusterExtensionRevision resources") + require.EventuallyWithT(t, func(ct *assert.CollectT) { + cerList := &ocv1.ClusterExtensionRevisionList{} + require.NoError(ct, c.List(context.Background(), cerList)) + require.Len(ct, cerList.Items, 2) + }, pollDuration, pollInterval) +} + func getWebhookOperatorResource(name string, namespace string, valid bool) *unstructured.Unstructured { return &unstructured.Unstructured{ Object: map[string]interface{}{ diff --git a/test/helpers/helpers.go b/test/helpers/helpers.go new file mode 100644 index 0000000000..49ebeaab6a --- /dev/null +++ b/test/helpers/helpers.go @@ -0,0 +1,383 @@ +package utils + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/rest" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + ocv1 "github.com/operator-framework/operator-controller/api/v1" + "github.com/operator-framework/operator-controller/internal/operator-controller/scheme" +) + +var ( + cfg *rest.Config + c client.Client +) + +const ( + pollDuration = time.Minute + pollInterval = time.Second + testCatalogName = "test-catalog" + testCatalogRefEnvVar = "CATALOG_IMG" +) + +func CreateNamespace(ctx context.Context, name string) (*corev1.Namespace, error) { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + err := c.Create(ctx, ns) + if err != nil { + return nil, err + } + return ns, nil +} + +func CreateServiceAccount(ctx context.Context, name types.NamespacedName, clusterExtensionName string) (*corev1.ServiceAccount, error) { + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name.Name, + Namespace: name.Namespace, + }, + } + err := c.Create(ctx, sa) + if err != nil { + return nil, err + } + + return sa, CreateClusterRoleAndBindingForSA(ctx, name.Name, sa, clusterExtensionName) +} + +func CreateClusterRoleAndBindingForSA(ctx context.Context, name string, sa *corev1.ServiceAccount, clusterExtensionName string) error { + cr := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{ + "olm.operatorframework.io", + }, + Resources: []string{ + "clusterextensions/finalizers", + }, + Verbs: []string{ + "update", + }, + ResourceNames: []string{clusterExtensionName}, + }, + { + APIGroups: []string{ + "", + }, + Resources: []string{ + "configmaps", + "secrets", // for helm + "services", + "serviceaccounts", + }, + Verbs: []string{ + "create", + "update", + "delete", + "patch", + "get", + "list", + "watch", + }, + }, + { + APIGroups: []string{ + "apiextensions.k8s.io", + }, + Resources: []string{ + "customresourcedefinitions", + }, + Verbs: []string{ + "create", + "update", + "delete", + "patch", + "get", + "list", + "watch", + }, + }, + { + APIGroups: []string{ + "apps", + }, + Resources: []string{ + "deployments", + }, + Verbs: []string{ + "create", + "update", + "delete", + "patch", + "get", + "list", + "watch", + }, + }, + { + APIGroups: []string{ + "rbac.authorization.k8s.io", + }, + Resources: []string{ + "clusterroles", + "roles", + "clusterrolebindings", + "rolebindings", + }, + Verbs: []string{ + "create", + "update", + "delete", + "patch", + "get", + "list", + "watch", + "bind", + "escalate", + }, + }, + { + APIGroups: []string{ + "networking.k8s.io", + }, + Resources: []string{ + "networkpolicies", + }, + Verbs: []string{ + "get", + "list", + "watch", + "create", + "update", + "patch", + "delete", + }, + }, + }, + } + err := c.Create(ctx, cr) + if err != nil { + return err + } + crb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: sa.Name, + Namespace: sa.Namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: name, + }, + } + err = c.Create(ctx, crb) + if err != nil { + return err + } + + return nil +} + +func TestInit(t *testing.T) (*ocv1.ClusterExtension, *ocv1.ClusterCatalog, *corev1.ServiceAccount, *corev1.Namespace) { + ce, cc := TestInitClusterExtensionClusterCatalog(t) + sa, ns := TestInitServiceAccountNamespace(t, ce.Name) + return ce, cc, sa, ns +} + +func TestInitClusterExtensionClusterCatalog(t *testing.T) (*ocv1.ClusterExtension, *ocv1.ClusterCatalog) { + ceName := fmt.Sprintf("clusterextension-%s", rand.String(8)) + + ce := &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Name: ceName, + }, + } + + cc, err := CreateTestCatalog(context.Background(), testCatalogName, os.Getenv(testCatalogRefEnvVar)) + require.NoError(t, err) + + ValidateCatalogUnpack(t) + + return ce, cc +} + +func TestInitServiceAccountNamespace(t *testing.T, clusterExtensionName string) (*corev1.ServiceAccount, *corev1.Namespace) { + var err error + + ns, err := CreateNamespace(context.Background(), clusterExtensionName) + require.NoError(t, err) + + name := types.NamespacedName{ + Name: clusterExtensionName, + Namespace: ns.GetName(), + } + + sa, err := CreateServiceAccount(context.Background(), name, clusterExtensionName) + require.NoError(t, err) + + return sa, ns +} + +func ValidateCatalogUnpack(t *testing.T) { + catalog := &ocv1.ClusterCatalog{} + t.Log("Ensuring ClusterCatalog has Status.Condition of Progressing with a status == True and reason == Succeeded") + require.EventuallyWithT(t, func(ct *assert.CollectT) { + err := c.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) + require.NoError(ct, err) + cond := apimeta.FindStatusCondition(catalog.Status.Conditions, ocv1.TypeProgressing) + require.NotNil(ct, cond) + require.Equal(ct, metav1.ConditionTrue, cond.Status) + require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) + }, pollDuration, pollInterval) + + t.Log("Checking that catalog has the expected metadata label") + require.NotNil(t, catalog.Labels) + require.Contains(t, catalog.Labels, "olm.operatorframework.io/metadata.name") + require.Equal(t, testCatalogName, catalog.Labels["olm.operatorframework.io/metadata.name"]) + + t.Log("Ensuring ClusterCatalog has Status.Condition of Type = Serving with status == True") + require.EventuallyWithT(t, func(ct *assert.CollectT) { + err := c.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) + require.NoError(ct, err) + cond := apimeta.FindStatusCondition(catalog.Status.Conditions, ocv1.TypeServing) + require.NotNil(ct, cond) + require.Equal(ct, metav1.ConditionTrue, cond.Status) + require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) + }, pollDuration, pollInterval) +} + +func EnsureNoExtensionResources(t *testing.T, clusterExtensionName string) { + ls := labels.Set{"olm.operatorframework.io/owner-name": clusterExtensionName} + + // CRDs may take an extra long time to be deleted, and may run into the following error: + // Condition=Terminating Status=True Reason=InstanceDeletionFailed Message="could not list instances: storage is (re)initializing" + t.Logf("By waiting for CustomResourceDefinitions of %q to be deleted", clusterExtensionName) + require.EventuallyWithT(t, func(ct *assert.CollectT) { + list := &apiextensionsv1.CustomResourceDefinitionList{} + err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) + require.NoError(ct, err) + require.Empty(ct, list.Items) + }, 5*pollDuration, pollInterval) + + t.Logf("By waiting for ClusterRoleBindings of %q to be deleted", clusterExtensionName) + require.EventuallyWithT(t, func(ct *assert.CollectT) { + list := &rbacv1.ClusterRoleBindingList{} + err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) + require.NoError(ct, err) + require.Empty(ct, list.Items) + }, 2*pollDuration, pollInterval) + + t.Logf("By waiting for ClusterRoles of %q to be deleted", clusterExtensionName) + require.EventuallyWithT(t, func(ct *assert.CollectT) { + list := &rbacv1.ClusterRoleList{} + err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) + require.NoError(ct, err) + require.Empty(ct, list.Items) + }, 2*pollDuration, pollInterval) +} + +func TestCleanup(t *testing.T, cat *ocv1.ClusterCatalog, clusterExtension *ocv1.ClusterExtension, sa *corev1.ServiceAccount, ns *corev1.Namespace) { + if cat != nil { + t.Logf("By deleting ClusterCatalog %q", cat.Name) + require.NoError(t, c.Delete(context.Background(), cat)) + require.Eventually(t, func() bool { + err := c.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) + return errors.IsNotFound(err) + }, pollDuration, pollInterval) + } + + if clusterExtension != nil { + t.Logf("By deleting ClusterExtension %q", clusterExtension.Name) + require.NoError(t, c.Delete(context.Background(), clusterExtension)) + require.Eventually(t, func() bool { + err := c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, &ocv1.ClusterExtension{}) + return errors.IsNotFound(err) + }, pollDuration, pollInterval) + EnsureNoExtensionResources(t, clusterExtension.Name) + } + + if sa != nil { + t.Logf("By deleting ServiceAccount %q", sa.Name) + require.NoError(t, c.Delete(context.Background(), sa)) + require.Eventually(t, func() bool { + err := c.Get(context.Background(), types.NamespacedName{Name: sa.Name, Namespace: sa.Namespace}, &corev1.ServiceAccount{}) + return errors.IsNotFound(err) + }, pollDuration, pollInterval) + } + + if ns != nil { + t.Logf("By deleting Namespace %q", ns.Name) + require.NoError(t, c.Delete(context.Background(), ns)) + require.Eventually(t, func() bool { + err := c.Get(context.Background(), types.NamespacedName{Name: ns.Name}, &corev1.Namespace{}) + return errors.IsNotFound(err) + }, pollDuration, pollInterval) + } +} + +// CreateTestCatalog will create a new catalog on the test cluster, provided +// the context, catalog name, and the image reference. It returns the created catalog +// or an error if any errors occurred while creating the catalog. +// Note that catalogd will automatically create the label: +// +// "olm.operatorframework.io/metadata.name": name +func CreateTestCatalog(ctx context.Context, name string, imageRef string) (*ocv1.ClusterCatalog, error) { + catalog := &ocv1.ClusterCatalog{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: ocv1.ClusterCatalogSpec{ + Source: ocv1.CatalogSource{ + Type: ocv1.SourceTypeImage, + Image: &ocv1.ImageSource{ + Ref: imageRef, + PollIntervalMinutes: ptr.To(1), + }, + }, + }, + } + + err := c.Create(ctx, catalog) + return catalog, err +} + +func init() { + cfg = ctrl.GetConfigOrDie() + + var err error + utilruntime.Must(apiextensionsv1.AddToScheme(scheme.Scheme)) + c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + utilruntime.Must(err) +} diff --git a/testdata/images/bundles/test-operator/v1.2.0/manifests/bundle.configmap.yaml b/testdata/images/bundles/test-operator/v1.2.0/manifests/bundle.configmap.yaml new file mode 100644 index 0000000000..0d696a6d4f --- /dev/null +++ b/testdata/images/bundles/test-operator/v1.2.0/manifests/bundle.configmap.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-configmap + annotations: + shouldNotTemplate: > + The namespace is {{ $labels.namespace }}. The templated + $labels.namespace is NOT expected to be processed by OLM's + rendering engine for registry+v1 bundles. +data: + version: "v1.2.0" + name: "test-configmap" diff --git a/testdata/images/bundles/test-operator/v1.2.0/manifests/olm.operatorframework.com_olme2etest.yaml b/testdata/images/bundles/test-operator/v1.2.0/manifests/olm.operatorframework.com_olme2etest.yaml new file mode 100644 index 0000000000..fcfd4aeafe --- /dev/null +++ b/testdata/images/bundles/test-operator/v1.2.0/manifests/olm.operatorframework.com_olme2etest.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: olme2etests.olm.operatorframework.io +spec: + group: olm.operatorframework.io + names: + kind: OLME2ETest + listKind: OLME2ETestList + plural: olme2etests + singular: olme2etest + scope: Cluster + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + testField: + type: string diff --git a/testdata/images/bundles/test-operator/v1.2.0/manifests/testoperator.clusterserviceversion.yaml b/testdata/images/bundles/test-operator/v1.2.0/manifests/testoperator.clusterserviceversion.yaml new file mode 100644 index 0000000000..db7cdb6358 --- /dev/null +++ b/testdata/images/bundles/test-operator/v1.2.0/manifests/testoperator.clusterserviceversion.yaml @@ -0,0 +1,151 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: |- + [ + { + "apiVersion": "olme2etests.olm.operatorframework.io/v1", + "kind": "OLME2ETests", + "metadata": { + "labels": { + "app.kubernetes.io/managed-by": "kustomize", + "app.kubernetes.io/name": "test" + }, + "name": "test-sample" + }, + "spec": null + } + ] + capabilities: Basic Install + createdAt: "2024-10-24T19:21:40Z" + operators.operatorframework.io/builder: operator-sdk-v1.34.1 + operators.operatorframework.io/project_layout: go.kubebuilder.io/v4 + name: testoperator.v1.2.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - description: Configures subsections of Alertmanager configuration specific to each namespace + displayName: OLME2ETest + kind: OLME2ETest + name: olme2etests.olm.operatorframework.io + version: v1 + description: OLM E2E Testing Operator + displayName: test-operator + icon: + - base64data: "" + mediatype: "" + install: + spec: + deployments: + - label: + app.kubernetes.io/component: controller + app.kubernetes.io/name: test-operator + app.kubernetes.io/version: 1.2.0 + name: test-operator + spec: + replicas: 1 + selector: + matchLabels: + app: olme2etest + template: + metadata: + labels: + app: olme2etest + spec: + terminationGracePeriodSeconds: 0 + containers: + - name: busybox + image: busybox:1.37 + command: + - 'sleep' + - '1000' + securityContext: + runAsUser: 1000 + runAsNonRoot: true + serviceAccountName: simple-bundle-manager + clusterPermissions: + - rules: + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + serviceAccountName: simple-bundle-manager + permissions: + - rules: + - apiGroups: + - "" + resources: + - configmaps + - serviceaccounts + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - create + - update + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + serviceAccountName: simple-bundle-manager + strategy: deployment + installModes: + - supported: false + type: OwnNamespace + - supported: true + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - registry + links: + - name: simple-bundle + url: https://simple-bundle.domain + maintainers: + - email: main#simple-bundle.domain + name: Simple Bundle + maturity: beta + provider: + name: Simple Bundle + url: https://simple-bundle.domain + version: 1.2.0 diff --git a/testdata/images/bundles/test-operator/v1.2.0/manifests/testoperator.networkpolicy.yaml b/testdata/images/bundles/test-operator/v1.2.0/manifests/testoperator.networkpolicy.yaml new file mode 100644 index 0000000000..d87648e6f3 --- /dev/null +++ b/testdata/images/bundles/test-operator/v1.2.0/manifests/testoperator.networkpolicy.yaml @@ -0,0 +1,8 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: test-operator-network-policy +spec: + podSelector: {} + policyTypes: + - Ingress diff --git a/testdata/images/bundles/test-operator/v1.2.0/metadata/annotations.yaml b/testdata/images/bundles/test-operator/v1.2.0/metadata/annotations.yaml new file mode 100644 index 0000000000..404f0f4a34 --- /dev/null +++ b/testdata/images/bundles/test-operator/v1.2.0/metadata/annotations.yaml @@ -0,0 +1,10 @@ +annotations: + # Core bundle annotations. + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: test + operators.operatorframework.io.bundle.channels.v1: beta + operators.operatorframework.io.metrics.builder: operator-sdk-v1.28.0 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: unknown diff --git a/testdata/images/catalogs/test-catalog/v1/configs/catalog.yaml b/testdata/images/catalogs/test-catalog/v1/configs/catalog.yaml index 69553dbcca..2fead8261a 100644 --- a/testdata/images/catalogs/test-catalog/v1/configs/catalog.yaml +++ b/testdata/images/catalogs/test-catalog/v1/configs/catalog.yaml @@ -42,7 +42,7 @@ properties: schema: olm.bundle name: test-operator.1.2.0 package: test -image: docker-registry.operator-controller-e2e.svc.cluster.local:5000/bundles/registry-v1/test-operator:v1.0.0 +image: docker-registry.operator-controller-e2e.svc.cluster.local:5000/bundles/registry-v1/test-operator:v1.2.0 properties: - type: olm.package value: