diff --git a/Makefile b/Makefile index 2010d453f..cc1e53b5a 100644 --- a/Makefile +++ b/Makefile @@ -186,7 +186,7 @@ test: manifests generate fmt lint test-unit test-e2e #HELP Run all tests. .PHONY: e2e e2e: #EXHELP Run the e2e tests. - go test -count=1 -v -run "$(if $(TEST_FILTER),$(TEST_FILTER),.)" ./test/e2e/... + go test -count=1 -v ./test/e2e/... E2E_REGISTRY_NAME := docker-registry E2E_REGISTRY_NAMESPACE := operator-controller-e2e @@ -202,10 +202,7 @@ test-ext-dev-e2e: $(OPERATOR_SDK) $(KUSTOMIZE) $(KIND) #HELP Run extension creat test/extension-developer-e2e/setup.sh $(OPERATOR_SDK) $(CONTAINER_RUNTIME) $(KUSTOMIZE) $(KIND) $(KIND_CLUSTER_NAME) $(E2E_REGISTRY_NAMESPACE) go test -count=1 -v ./test/extension-developer-e2e/... -# Define TEST_PKGS to be either user-specified or a default set of packages: -ifeq ($(origin TEST_PKGS), undefined) -TEST_PKGS := $(shell go list ./... | grep -v /test/) -endif +UNIT_TEST_DIRS := $(shell go list ./... | grep -v /test/) COVERAGE_UNIT_DIR := $(ROOT_DIR)/coverage/unit .PHONY: envtest-k8s-bins #HELP Uses setup-envtest to download and install the binaries required to run ENVTEST-test based locally at the project/bin directory. @@ -221,8 +218,7 @@ test-unit: $(SETUP_ENVTEST) envtest-k8s-bins #HELP Run the unit tests -tags '$(GO_BUILD_TAGS)' \ -cover -coverprofile ${ROOT_DIR}/coverage/unit.out \ -count=1 -race -short \ - -run "$(if $(TEST_FILTER),$(TEST_FILTER),.)" \ - $(TEST_PKGS) \ + $(UNIT_TEST_DIRS) \ -test.gocoverdir=$(COVERAGE_UNIT_DIR) .PHONY: image-registry diff --git a/test/e2e/cluster_extension_install_test.go b/test/e2e/cluster_extension_install_test.go index 7c57a078c..a01124bfb 100644 --- a/test/e2e/cluster_extension_install_test.go +++ b/test/e2e/cluster_extension_install_test.go @@ -38,7 +38,7 @@ func createNamespace(ctx context.Context, name string) (*corev1.Namespace, error Name: name, }, } - err := globalClient.Create(ctx, ns) + err := c.Create(ctx, ns) if err != nil { return nil, err } @@ -52,7 +52,7 @@ func createServiceAccount(ctx context.Context, name types.NamespacedName, cluste Namespace: name.Namespace, }, } - err := globalClient.Create(ctx, sa) + err := c.Create(ctx, sa) if err != nil { return nil, err } @@ -156,7 +156,7 @@ func createClusterRoleAndBindingForSA(ctx context.Context, name string, sa *core }, }, } - err := globalClient.Create(ctx, cr) + err := c.Create(ctx, cr) if err != nil { return err } @@ -177,7 +177,7 @@ func createClusterRoleAndBindingForSA(ctx context.Context, name string, sa *core Name: name, }, } - err = globalClient.Create(ctx, crb) + err = c.Create(ctx, crb) if err != nil { return err } @@ -219,7 +219,7 @@ func validateCatalogUnpack(t *testing.T) { catalog := &ocv1.ClusterCatalog{} t.Log("Ensuring ClusterCatalog has Status.Condition of Progressing with a status == True and reason == Succeeded") require.EventuallyWithT(t, func(ct *assert.CollectT) { - err := globalClient.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) + err := c.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) assert.NoError(ct, err) cond := apimeta.FindStatusCondition(catalog.Status.Conditions, ocv1.TypeProgressing) assert.NotNil(ct, cond) @@ -234,7 +234,7 @@ func validateCatalogUnpack(t *testing.T) { t.Log("Ensuring ClusterCatalog has Status.Condition of Type = Serving with status == True") require.EventuallyWithT(t, func(ct *assert.CollectT) { - err := globalClient.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) + err := c.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) assert.NoError(ct, err) cond := apimeta.FindStatusCondition(catalog.Status.Conditions, ocv1.TypeServing) assert.NotNil(ct, cond) @@ -251,7 +251,7 @@ func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) { t.Logf("By waiting for CustomResourceDefinitions of %q to be deleted", clusterExtensionName) require.EventuallyWithT(t, func(ct *assert.CollectT) { list := &apiextensionsv1.CustomResourceDefinitionList{} - err := globalClient.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) + err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) assert.NoError(ct, err) assert.Empty(ct, list.Items) }, 5*pollDuration, pollInterval) @@ -259,7 +259,7 @@ func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) { t.Logf("By waiting for ClusterRoleBindings of %q to be deleted", clusterExtensionName) require.EventuallyWithT(t, func(ct *assert.CollectT) { list := &rbacv1.ClusterRoleBindingList{} - err := globalClient.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) + err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) assert.NoError(ct, err) assert.Empty(ct, list.Items) }, 2*pollDuration, pollInterval) @@ -267,7 +267,7 @@ func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) { t.Logf("By waiting for ClusterRoles of %q to be deleted", clusterExtensionName) require.EventuallyWithT(t, func(ct *assert.CollectT) { list := &rbacv1.ClusterRoleList{} - err := globalClient.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) + err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) assert.NoError(ct, err) assert.Empty(ct, list.Items) }, 2*pollDuration, pollInterval) @@ -275,32 +275,32 @@ func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) { func testCleanup(t *testing.T, cat *ocv1.ClusterCatalog, clusterExtension *ocv1.ClusterExtension, sa *corev1.ServiceAccount, ns *corev1.Namespace) { t.Logf("By deleting ClusterCatalog %q", cat.Name) - require.NoError(t, globalClient.Delete(context.Background(), cat)) + require.NoError(t, c.Delete(context.Background(), cat)) require.Eventually(t, func() bool { - err := globalClient.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) + err := c.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) t.Logf("By deleting ClusterExtension %q", clusterExtension.Name) - require.NoError(t, globalClient.Delete(context.Background(), clusterExtension)) + require.NoError(t, c.Delete(context.Background(), clusterExtension)) require.Eventually(t, func() bool { - err := globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, &ocv1.ClusterExtension{}) + err := c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, &ocv1.ClusterExtension{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) t.Logf("By deleting ServiceAccount %q", sa.Name) - require.NoError(t, globalClient.Delete(context.Background(), sa)) + require.NoError(t, c.Delete(context.Background(), sa)) require.Eventually(t, func() bool { - err := globalClient.Get(context.Background(), types.NamespacedName{Name: sa.Name, Namespace: sa.Namespace}, &corev1.ServiceAccount{}) + err := c.Get(context.Background(), types.NamespacedName{Name: sa.Name, Namespace: sa.Namespace}, &corev1.ServiceAccount{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) ensureNoExtensionResources(t, clusterExtension.Name) t.Logf("By deleting Namespace %q", ns.Name) - require.NoError(t, globalClient.Delete(context.Background(), ns)) + require.NoError(t, c.Delete(context.Background(), ns)) require.Eventually(t, func() bool { - err := globalClient.Get(context.Background(), types.NamespacedName{Name: ns.Name}, &corev1.Namespace{}) + err := c.Get(context.Background(), types.NamespacedName{Name: ns.Name}, &corev1.Namespace{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) } @@ -330,7 +330,7 @@ func TestClusterExtensionInstallRegistry(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) + defer utils.CollectTestArtifacts(t, artifactName, c, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -349,16 +349,16 @@ func TestClusterExtensionInstallRegistry(t *testing.T) { } t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) + require.NoError(t, c.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, pollDuration, pollInterval) t.Log("By eventually reporting progressing as True") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -368,7 +368,7 @@ func TestClusterExtensionInstallRegistry(t *testing.T) { t.Log("By eventually installing the package successfully") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -390,7 +390,7 @@ func TestClusterExtensionInstallRegistryDynamic(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) + defer utils.CollectTestArtifacts(t, artifactName, c, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -419,15 +419,15 @@ prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000" location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"`, }, } - require.NoError(t, globalClient.Update(context.Background(), &cm)) + require.NoError(t, c.Update(context.Background(), &cm)) t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) + require.NoError(t, c.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, 2*time.Minute, pollInterval) // Give the check 2 minutes instead of the typical 1 for the pod's @@ -436,7 +436,7 @@ location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"`, // ConfigMap cache TTL of 1 minute = 2 minutes t.Log("By eventually reporting progressing as True") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -446,7 +446,7 @@ location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"`, t.Log("By eventually installing the package successfully") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -465,11 +465,11 @@ func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) { require.NoError(t, err) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) + defer utils.CollectTestArtifacts(t, artifactName, c, cfg) defer func(cat *ocv1.ClusterCatalog) { - require.NoError(t, globalClient.Delete(context.Background(), cat)) + require.NoError(t, c.Delete(context.Background(), cat)) require.Eventually(t, func() bool { - err := globalClient.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) + err := c.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) }(extraCatalog) @@ -488,16 +488,16 @@ func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) { } t.Log("It resolves to multiple bundle paths") t.Log("By creating the ClusterExtension resource") - require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) + require.NoError(t, c.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a failed resolution with multiple bundles") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, pollDuration, pollInterval) t.Log("By eventually reporting Progressing == True and Reason Retrying") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -513,7 +513,7 @@ func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) + defer utils.CollectTestArtifacts(t, artifactName, c, cfg) t.Log("By creating an ClusterExtension at a specified version") clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -530,10 +530,10 @@ func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) { Name: sa.Name, }, } - require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) + require.NoError(t, c.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful installation") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) assert.Equal(ct, &ocv1.ClusterExtensionInstallStatus{Bundle: ocv1.BundleMetadata{ Name: "test-operator.1.0.0", @@ -553,15 +553,15 @@ func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) { t.Log("By updating the ClusterExtension resource to a non-successor version") // 1.2.0 does not replace/skip/skipRange 1.0.0. clusterExtension.Spec.Source.Catalog.Version = "1.2.0" - require.NoError(t, globalClient.Update(context.Background(), clusterExtension)) + require.NoError(t, c.Update(context.Background(), clusterExtension)) t.Log("By eventually reporting an unsatisfiable resolution") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, pollDuration, pollInterval) t.Log("By eventually reporting Progressing == True and Reason Retrying") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, ocv1.ReasonRetrying, cond.Reason) @@ -576,7 +576,7 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) + defer utils.CollectTestArtifacts(t, artifactName, c, cfg) t.Log("By creating an ClusterExtension at a specified version") clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -592,10 +592,10 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { Name: sa.Name, }, } - require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) + require.NoError(t, c.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -608,10 +608,10 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { // 1.2.0 does not replace/skip/skipRange 1.0.0. clusterExtension.Spec.Source.Catalog.Version = "1.2.0" clusterExtension.Spec.Source.Catalog.UpgradeConstraintPolicy = ocv1.UpgradeConstraintPolicySelfCertified - require.NoError(t, globalClient.Update(context.Background(), clusterExtension)) + require.NoError(t, c.Update(context.Background(), clusterExtension)) t.Log("By eventually reporting a satisfiable resolution") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -625,7 +625,7 @@ func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { t.Log("When resolving upgrade edges") clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) + defer utils.CollectTestArtifacts(t, artifactName, c, cfg) t.Log("By creating an ClusterExtension at a specified version") clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -641,10 +641,10 @@ func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { Name: sa.Name, }, } - require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) + require.NoError(t, c.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -656,10 +656,10 @@ func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { t.Log("By updating the ClusterExtension resource by skipping versions") // 1.0.1 replaces 1.0.0 in the test catalog clusterExtension.Spec.Source.Catalog.Version = "1.0.1" - require.NoError(t, globalClient.Update(context.Background(), clusterExtension)) + require.NoError(t, c.Update(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -673,7 +673,7 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { t.Log("It resolves again when a catalog is patched with new ImageRef") clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) + defer utils.CollectTestArtifacts(t, artifactName, c, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -698,11 +698,11 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { } t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) + require.NoError(t, c.Create(context.Background(), clusterExtension)) t.Log("By reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -716,7 +716,7 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { err := patchTestCatalog(context.Background(), testCatalogName, updatedCatalogImage) require.NoError(t, err) require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -726,7 +726,7 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -760,7 +760,7 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { sa, err := createServiceAccount(context.Background(), types.NamespacedName{Name: clusterExtensionName, Namespace: ns.Name}, clusterExtensionName) require.NoError(t, err) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) + defer utils.CollectTestArtifacts(t, artifactName, c, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -779,11 +779,11 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { } t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) + require.NoError(t, c.Create(context.Background(), clusterExtension)) t.Log("By reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -797,7 +797,7 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { err = crane.Tag(v2Image, latestImageTag, crane.Insecure) require.NoError(t, err) require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -807,7 +807,7 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -821,7 +821,7 @@ func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T t.Log("It resolves again when managed content is changed") clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) + defer utils.CollectTestArtifacts(t, artifactName, c, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -840,11 +840,11 @@ func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T } t.Log("It installs the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) + require.NoError(t, c.Create(context.Background(), clusterExtension)) t.Log("By reporting a successful installation") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -860,11 +860,11 @@ func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T Namespace: clusterExtension.Spec.Namespace, }, } - require.NoError(t, globalClient.Delete(context.Background(), testConfigMap)) + require.NoError(t, c.Delete(context.Background(), testConfigMap)) t.Log("By eventually re-creating the managed resource") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: testConfigMap.Name, Namespace: testConfigMap.Namespace}, testConfigMap)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: testConfigMap.Name, Namespace: testConfigMap.Namespace}, testConfigMap)) }, pollDuration, pollInterval) } @@ -881,10 +881,10 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes Namespace: ns.Name, }, } - err := globalClient.Create(context.Background(), sa) + err := c.Create(context.Background(), sa) require.NoError(t, err) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) + defer utils.CollectTestArtifacts(t, artifactName, c, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -903,16 +903,16 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes } t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) + require.NoError(t, c.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, pollDuration, pollInterval) t.Log("By eventually reporting Progressing == True with Reason Retrying") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -922,7 +922,7 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes t.Log("By eventually failing to install the package successfully due to insufficient ServiceAccount permissions") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionFalse, cond.Status) @@ -940,7 +940,7 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes // after creating and binding the needed permissions to the ServiceAccount. t.Log("By eventually installing the package successfully") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -952,7 +952,7 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes t.Log("By eventually reporting Progressing == True with Reason Success") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 7441d1f0b..354ef75f4 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -18,8 +18,8 @@ import ( ) var ( - globalConfig *rest.Config - globalClient client.Client + cfg *rest.Config + c client.Client ) const ( @@ -29,11 +29,11 @@ const ( ) func TestMain(m *testing.M) { - globalConfig = ctrl.GetConfigOrDie() + cfg = ctrl.GetConfigOrDie() var err error utilruntime.Must(apiextensionsv1.AddToScheme(scheme.Scheme)) - globalClient, err = client.New(globalConfig, client.Options{Scheme: scheme.Scheme}) + c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) utilruntime.Must(err) os.Exit(m.Run()) @@ -61,7 +61,7 @@ func createTestCatalog(ctx context.Context, name string, imageRef string) (*ocv1 }, } - err := globalClient.Create(ctx, catalog) + err := c.Create(ctx, catalog) return catalog, err } @@ -71,7 +71,7 @@ func createTestCatalog(ctx context.Context, name string, imageRef string) (*ocv1 func patchTestCatalog(ctx context.Context, name string, newImageRef string) error { // Fetch the existing ClusterCatalog catalog := &ocv1.ClusterCatalog{} - err := globalClient.Get(ctx, client.ObjectKey{Name: name}, catalog) + err := c.Get(ctx, client.ObjectKey{Name: name}, catalog) if err != nil { return err } @@ -80,7 +80,7 @@ func patchTestCatalog(ctx context.Context, name string, newImageRef string) erro catalog.Spec.Source.Image.Ref = newImageRef // Patch the ClusterCatalog - err = globalClient.Update(ctx, catalog) + err = c.Update(ctx, catalog) if err != nil { return err } diff --git a/test/e2e/metrics_test.go b/test/e2e/metrics_test.go index 3d15035b8..a1f6c4a2c 100644 --- a/test/e2e/metrics_test.go +++ b/test/e2e/metrics_test.go @@ -16,33 +16,23 @@ package e2e import ( "bytes" "context" - "errors" + "fmt" + "io" + "os/exec" "strings" "testing" "time" "github.com/stretchr/testify/require" - authenticationv1 "k8s.io/api/authentication/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/remotecommand" - "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/client/config" + + "github.com/operator-framework/operator-controller/test/utils" ) // TestOperatorControllerMetricsExportedEndpoint verifies that the metrics endpoint for the operator controller func TestOperatorControllerMetricsExportedEndpoint(t *testing.T) { - kubeClient, restConfig := findK8sClient(t) - mtc := NewMetricsTestConfig( - t, - kubeClient, - restConfig, + client := utils.FindK8sClient(t) + config := NewMetricsTestConfig( + t, client, "control-plane=operator-controller-controller-manager", "operator-controller-metrics-reader", "operator-controller-metrics-binding", @@ -51,16 +41,14 @@ func TestOperatorControllerMetricsExportedEndpoint(t *testing.T) { "https://operator-controller-service.NAMESPACE.svc.cluster.local:8443/metrics", ) - mtc.run() + config.run() } // TestCatalogdMetricsExportedEndpoint verifies that the metrics endpoint for catalogd func TestCatalogdMetricsExportedEndpoint(t *testing.T) { - kubeClient, restConfig := findK8sClient(t) - mtc := NewMetricsTestConfig( - t, - kubeClient, - restConfig, + client := utils.FindK8sClient(t) + config := NewMetricsTestConfig( + t, client, "control-plane=catalogd-controller-manager", "catalogd-metrics-reader", "catalogd-metrics-binding", @@ -69,25 +57,13 @@ func TestCatalogdMetricsExportedEndpoint(t *testing.T) { "https://catalogd-service.NAMESPACE.svc.cluster.local:7443/metrics", ) - mtc.run() -} - -func findK8sClient(t *testing.T) (kubernetes.Interface, *rest.Config) { - cfg, err := config.GetConfig() - require.NoError(t, err, "Failed to get Kubernetes config") - - clientset, err := kubernetes.NewForConfig(cfg) - require.NoError(t, err, "Failed to create client from config") - - t.Log("Successfully created Kubernetes client via controller-runtime config") - return clientset, cfg + config.run() } // MetricsTestConfig holds the necessary configurations for testing metrics endpoints. type MetricsTestConfig struct { t *testing.T - kubeClient kubernetes.Interface - restConfig *rest.Config + client string namespace string clusterRole string clusterBinding string @@ -97,27 +73,13 @@ type MetricsTestConfig struct { } // NewMetricsTestConfig initializes a new MetricsTestConfig. -func NewMetricsTestConfig( - t *testing.T, - kubeClient kubernetes.Interface, - restConfig *rest.Config, - selector string, - clusterRole string, - clusterBinding string, - serviceAccount string, - curlPodName string, - metricsURL string, -) *MetricsTestConfig { - // Discover which namespace the relevant Pod is running in - namespace := getComponentNamespace(t, kubeClient, selector) - - // Replace the placeholder in the metrics URL +func NewMetricsTestConfig(t *testing.T, client, selector, clusterRole, clusterBinding, serviceAccount, curlPodName, metricsURL string) *MetricsTestConfig { + namespace := getComponentNamespace(t, client, selector) metricsURL = strings.ReplaceAll(metricsURL, "NAMESPACE", namespace) return &MetricsTestConfig{ t: t, - kubeClient: kubeClient, - restConfig: restConfig, + client: client, namespace: namespace, clusterRole: clusterRole, clusterBinding: clusterBinding, @@ -127,252 +89,134 @@ func NewMetricsTestConfig( } } -// run executes the entire test flow +// run will execute all steps of those tests func (c *MetricsTestConfig) run() { - ctx := context.Background() - defer c.cleanup(ctx) - c.createMetricsClusterRoleBinding(ctx) - token := c.getServiceAccountToken(ctx) - c.createCurlMetricsPod(ctx) - c.waitForPodReady(ctx) - // Exec `curl` in the Pod to validate the metrics - c.validateMetricsEndpoint(ctx, token) + c.createMetricsClusterRoleBinding() + token := c.getServiceAccountToken() + c.createCurlMetricsPod() + c.validate(token) + defer c.cleanup() } -// createMetricsClusterRoleBinding to bind the cluster role so metrics are accessible -func (c *MetricsTestConfig) createMetricsClusterRoleBinding(ctx context.Context) { - c.t.Logf("Creating ClusterRoleBinding %q in namespace %q", c.clusterBinding, c.namespace) - - crb := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.clusterBinding, - }, - Subjects: []rbacv1.Subject{ - { - Kind: rbacv1.ServiceAccountKind, - Name: c.serviceAccount, - Namespace: c.namespace, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: c.clusterRole, - }, - } - - _, err := c.kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, crb, metav1.CreateOptions{}) - require.NoError(c.t, err, "Error creating ClusterRoleBinding") +// createMetricsClusterRoleBinding to binding and expose the metrics +func (c *MetricsTestConfig) createMetricsClusterRoleBinding() { + c.t.Logf("Creating ClusterRoleBinding %s in namespace %s", c.clusterBinding, c.namespace) + cmd := exec.Command(c.client, "create", "clusterrolebinding", c.clusterBinding, + "--clusterrole="+c.clusterRole, + "--serviceaccount="+c.namespace+":"+c.serviceAccount) + output, err := cmd.CombinedOutput() + require.NoError(c.t, err, "Error creating ClusterRoleBinding: %s", string(output)) } -// getServiceAccountToken creates a TokenRequest for the service account -func (c *MetricsTestConfig) getServiceAccountToken(ctx context.Context) string { - c.t.Logf("Generating ServiceAccount token in namespace %q", c.namespace) - - tokenRequest := &authenticationv1.TokenRequest{ - Spec: authenticationv1.TokenRequestSpec{ - Audiences: []string{"https://kubernetes.default.svc.cluster.local"}, - ExpirationSeconds: nil, - }, - } - - tr, err := c.kubeClient.CoreV1(). - ServiceAccounts(c.namespace). - CreateToken(ctx, c.serviceAccount, tokenRequest, metav1.CreateOptions{}) - require.NoError(c.t, err, "Error requesting token for SA %q", c.serviceAccount) - - token := tr.Status.Token - require.NotEmpty(c.t, token, "ServiceAccount token was empty") - return token +// getServiceAccountToken return the token requires to have access to the metrics +func (c *MetricsTestConfig) getServiceAccountToken() string { + c.t.Logf("Generating ServiceAccount token at namespace %s", c.namespace) + cmd := exec.Command(c.client, "create", "token", c.serviceAccount, "-n", c.namespace) + tokenOutput, tokenCombinedOutput, err := stdoutAndCombined(cmd) + require.NoError(c.t, err, "Error creating token: %s", string(tokenCombinedOutput)) + return string(bytes.TrimSpace(tokenOutput)) } -// createCurlMetricsPod spawns a pod running `curlimages/curl` to check metrics -func (c *MetricsTestConfig) createCurlMetricsPod(ctx context.Context) { +// createCurlMetricsPod creates the Pod with curl image to allow check if the metrics are working +func (c *MetricsTestConfig) createCurlMetricsPod() { c.t.Logf("Creating curl pod (%s/%s) to validate the metrics endpoint", c.namespace, c.curlPodName) - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.curlPodName, - Namespace: c.namespace, - }, - Spec: corev1.PodSpec{ - ServiceAccountName: c.serviceAccount, - TerminationGracePeriodSeconds: ptr.To(int64(0)), - Containers: []corev1.Container{ - { - Name: "curl", - Image: "curlimages/curl", - Command: []string{"sh", "-c", "sleep 3600"}, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: ptr.To(false), - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, - }, - RunAsNonRoot: ptr.To(true), - RunAsUser: ptr.To(int64(1000)), - SeccompProfile: &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - }, - }, - }, - }, - RestartPolicy: corev1.RestartPolicyNever, - }, - } - - _, err := c.kubeClient.CoreV1().Pods(c.namespace).Create(ctx, pod, metav1.CreateOptions{}) - require.NoError(c.t, err, "Error creating curl pod") -} - -// waitForPodReady polls until the Pod is in Ready condition -func (c *MetricsTestConfig) waitForPodReady(ctx context.Context) { - c.t.Log("Waiting for the curl pod to be ready") - err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) { - pod, err := c.kubeClient.CoreV1().Pods(c.namespace).Get(ctx, c.curlPodName, metav1.GetOptions{}) - if err != nil { - return false, err - } - for _, cond := range pod.Status.Conditions { - if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { - return true, nil + cmd := exec.Command(c.client, "run", c.curlPodName, + "--image=curlimages/curl", "-n", c.namespace, + "--restart=Never", + "--overrides", `{ + "spec": { + "terminationGradePeriodSeconds": 0, + "containers": [{ + "name": "curl", + "image": "curlimages/curl", + "command": ["sh", "-c", "sleep 3600"], + "securityContext": { + "allowPrivilegeEscalation": false, + "capabilities": {"drop": ["ALL"]}, + "runAsNonRoot": true, + "runAsUser": 1000, + "seccompProfile": {"type": "RuntimeDefault"} + } + }], + "serviceAccountName": "`+c.serviceAccount+`" } - } - return false, nil - }) - if errors.Is(err, context.DeadlineExceeded) { - c.t.Fatal("Timed out waiting for the curl pod to become Ready") - } - require.NoError(c.t, err, "Error waiting for curl pod to become Ready") + }`) + output, err := cmd.CombinedOutput() + require.NoError(c.t, err, "Error creating curl pod: %s", string(output)) } -// validateMetricsEndpoint performs `kubectl exec ... curl ` logic -func (c *MetricsTestConfig) validateMetricsEndpoint(ctx context.Context, token string) { - c.t.Log("Validating the metrics endpoint via pod exec") - - // The command to run inside the container - cmd := []string{ - "curl", "-v", "-k", - "-H", "Authorization: Bearer " + token, - c.metricsURL, - } - - // Construct the request to exec into the pod - req := c.kubeClient.CoreV1().RESTClient(). - Post(). - Resource("pods"). - Namespace(c.namespace). - Name(c.curlPodName). - SubResource("exec"). - VersionedParams(&corev1.PodExecOptions{ - Container: "curl", - Command: cmd, - Stdin: false, - Stdout: true, - Stderr: true, - TTY: false, - }, scheme.ParameterCodec) - - // Create an SPDY executor - executor, err := remotecommand.NewSPDYExecutor(c.restConfig, "POST", req.URL()) - require.NoError(c.t, err, "Error creating SPDY executor to exec in pod") - - var stdout, stderr bytes.Buffer - streamOpts := remotecommand.StreamOptions{ - Stdin: nil, - Stdout: &stdout, - Stderr: &stderr, - Tty: false, - } - - err = executor.StreamWithContext(ctx, streamOpts) - require.NoError(c.t, err, "Error streaming exec request: %v", stderr.String()) - - // Combine stdout + stderr - combined := stdout.String() + stderr.String() - require.Contains(c.t, combined, "200 OK", "Metrics endpoint did not return 200 OK") +// validate verifies if is possible to access the metrics +func (c *MetricsTestConfig) validate(token string) { + c.t.Log("Waiting for the curl pod to be ready") + waitCmd := exec.Command(c.client, "wait", "--for=condition=Ready", "pod", c.curlPodName, "-n", c.namespace, "--timeout=60s") + waitOutput, waitErr := waitCmd.CombinedOutput() + require.NoError(c.t, waitErr, "Error waiting for curl pod to be ready: %s", string(waitOutput)) + + c.t.Log("Validating the metrics endpoint") + curlCmd := exec.Command(c.client, "exec", c.curlPodName, "-n", c.namespace, "--", + "curl", "-v", "-k", "-H", "Authorization: Bearer "+token, c.metricsURL) + output, err := curlCmd.CombinedOutput() + require.NoError(c.t, err, "Error calling metrics endpoint: %s", string(output)) + require.Contains(c.t, string(output), "200 OK", "Metrics endpoint did not return 200 OK") } -// cleanup deletes the test resources -func (c *MetricsTestConfig) cleanup(ctx context.Context) { +// cleanup removes the created resources. Uses a context with timeout to prevent hangs. +func (c *MetricsTestConfig) cleanup() { c.t.Log("Cleaning up resources") - policy := metav1.DeletePropagationForeground + _ = exec.Command(c.client, "delete", "clusterrolebinding", c.clusterBinding, "--ignore-not-found=true", "--force").Run() + _ = exec.Command(c.client, "delete", "pod", c.curlPodName, "-n", c.namespace, "--ignore-not-found=true", "--force").Run() - // Delete the ClusterRoleBinding - _ = c.kubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, c.clusterBinding, metav1.DeleteOptions{ - PropagationPolicy: &policy, - }) - waitForClusterRoleBindingDeletion(ctx, c.t, c.kubeClient, c.clusterBinding) + // Create a context with a 60-second timeout. + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() - // "Force" delete the Pod by setting grace period to 0 - gracePeriod := int64(0) - _ = c.kubeClient.CoreV1().Pods(c.namespace).Delete(ctx, c.curlPodName, metav1.DeleteOptions{ - GracePeriodSeconds: &gracePeriod, - PropagationPolicy: &policy, - }) - waitForPodDeletion(ctx, c.t, c.kubeClient, c.namespace, c.curlPodName) -} + // Wait for the ClusterRoleBinding to be deleted. + if err := waitForDeletion(ctx, c.client, "clusterrolebinding", c.clusterBinding); err != nil { + c.t.Logf("Error waiting for clusterrolebinding deletion: %v", err) + } else { + c.t.Log("ClusterRoleBinding deleted") + } -// waitForClusterRoleBindingDeletion polls until the named ClusterRoleBinding no longer exists -func waitForClusterRoleBindingDeletion(ctx context.Context, t *testing.T, kubeClient kubernetes.Interface, name string) { - err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) { - _, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - return true, nil - } - return false, err - } - return false, nil - }) - if err != nil { - if errors.Is(err, context.DeadlineExceeded) { - t.Fatalf("Timed out waiting for ClusterRoleBinding %q to be deleted", name) - } - t.Logf("Error waiting for ClusterRoleBinding %q deletion: %v", name, err) + // Wait for the Pod to be deleted. + if err := waitForDeletion(ctx, c.client, "pod", c.curlPodName, "-n", c.namespace); err != nil { + c.t.Logf("Error waiting for pod deletion: %v", err) } else { - t.Logf("ClusterRoleBinding %q deleted", name) + c.t.Log("Pod deleted") } } -// waitForPodDeletion polls until the named Pod no longer exists -func waitForPodDeletion(ctx context.Context, t *testing.T, kubeClient kubernetes.Interface, namespace, name string) { - err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 90*time.Second, false, func(ctx context.Context) (bool, error) { - pod, getErr := kubeClient.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) - if getErr != nil { - if apierrors.IsNotFound(getErr) { - return true, nil - } - return false, getErr - } - // Some extra log info if the Pod is still around - t.Logf("Pod %q still present, phase=%q, deleting... (Timestamp=%v)", - name, pod.Status.Phase, pod.DeletionTimestamp) - return false, nil - }) +// waitForDeletion uses "kubectl wait" to block until the specified resource is deleted +// or until the 60-second timeout is reached. +func waitForDeletion(ctx context.Context, client, resourceType, resourceName string, extraArgs ...string) error { + args := []string{"wait", "--for=delete", resourceType, resourceName} + args = append(args, extraArgs...) + args = append(args, "--timeout=60s") + cmd := exec.CommandContext(ctx, client, args...) + output, err := cmd.CombinedOutput() if err != nil { - if errors.Is(err, context.DeadlineExceeded) { - t.Fatalf("Timed out waiting for Pod %q to be deleted", name) - } - t.Logf("Error waiting for Pod %q deletion: %v", name, err) - } else { - t.Logf("Pod %q deleted", name) + return fmt.Errorf("error waiting for deletion of %s %s: %v, output: %s", resourceType, resourceName, err, string(output)) } + return nil } -// getComponentNamespace identifies which Namespace is running a Pod that matches `selector` -func getComponentNamespace(t *testing.T, kubeClient kubernetes.Interface, selector string) string { - t.Logf("Listing pods for selector %q to discover namespace", selector) - ctx := context.Background() - - pods, err := kubeClient.CoreV1().Pods("").List(ctx, metav1.ListOptions{ - LabelSelector: selector, - }) - require.NoError(t, err, "Error listing pods for selector %q", selector) - require.NotEmpty(t, pods.Items, "No pods found for selector %q", selector) +// getComponentNamespace returns the namespace where operator-controller or catalogd is running +func getComponentNamespace(t *testing.T, client, selector string) string { + cmd := exec.Command(client, "get", "pods", "--all-namespaces", "--selector="+selector, "--output=jsonpath={.items[0].metadata.namespace}") + output, err := cmd.CombinedOutput() + require.NoError(t, err, "Error determining namespace: %s", string(output)) - namespace := pods.Items[0].Namespace + namespace := string(bytes.TrimSpace(output)) if namespace == "" { - t.Fatalf("No namespace found for selector %q", selector) + t.Fatal("No namespace found for selector " + selector) } return namespace } + +func stdoutAndCombined(cmd *exec.Cmd) ([]byte, []byte, error) { + var outOnly, outAndErr bytes.Buffer + allWriter := io.MultiWriter(&outOnly, &outAndErr) + cmd.Stdout = allWriter + cmd.Stderr = &outAndErr + err := cmd.Run() + return outOnly.Bytes(), outAndErr.Bytes(), err +} diff --git a/test/utils/utils.go b/test/utils/utils.go new file mode 100644 index 000000000..1acc55fe6 --- /dev/null +++ b/test/utils/utils.go @@ -0,0 +1,69 @@ +package utils + +import ( + "context" + "fmt" + "io" + "net/url" + "os/exec" + "strings" + "testing" + + "k8s.io/client-go/kubernetes" + + ocv1 "github.com/operator-framework/operator-controller/api/v1" +) + +// FindK8sClient returns the first available Kubernetes CLI client from the system, +// It checks for the existence of each client by running `version --client`. +// If no suitable client is found, the function terminates the test with a failure. +func FindK8sClient(t *testing.T) string { + t.Logf("Finding kubectl client") + clients := []string{"kubectl", "oc"} + for _, c := range clients { + // Would prefer to use `command -v`, but even that may not be installed! + if err := exec.Command(c, "version", "--client").Run(); err == nil { + t.Logf("Using %q as k8s client", c) + return c + } + } + t.Fatal("k8s client not found") + return "" +} + +func ReadTestCatalogServerContents(ctx context.Context, catalog *ocv1.ClusterCatalog, kubeClient kubernetes.Interface) ([]byte, error) { + if catalog == nil { + return nil, fmt.Errorf("cannot read nil catalog") + } + if catalog.Status.URLs == nil { + return nil, fmt.Errorf("catalog %q has no catalog urls", catalog.Name) + } + url, err := url.Parse(catalog.Status.URLs.Base) + if err != nil { + return nil, fmt.Errorf("error parsing clustercatalog url %q: %v", catalog.Status.URLs.Base, err) + } + // url is expected to be in the format of + // http://{service_name}.{namespace}.svc/catalogs/{catalog_name}/ + // so to get the namespace and name of the service we grab only + // the hostname and split it on the '.' character + ns := strings.Split(url.Hostname(), ".")[1] + name := strings.Split(url.Hostname(), ".")[0] + port := url.Port() + // the ProxyGet() call below needs an explicit port value, so if + // value from url.Port() is empty, we assume port 443. + if port == "" { + if url.Scheme == "https" { + port = "443" + } else { + port = "80" + } + } + resp := kubeClient.CoreV1().Services(ns).ProxyGet(url.Scheme, name, port, url.JoinPath("api", "v1", "all").Path, map[string]string{}) + rc, err := resp.Stream(ctx) + if err != nil { + return nil, err + } + defer rc.Close() + + return io.ReadAll(rc) +}