diff --git a/.github/workflows/license-check.yaml b/.github/workflows/license-check.yaml index 69eacdc..614157a 100644 --- a/.github/workflows/license-check.yaml +++ b/.github/workflows/license-check.yaml @@ -24,7 +24,7 @@ jobs: - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 with: - go-version: '1.24' + go-version: '1.25' - name: Install go-licenses run: | diff --git a/go.mod b/go.mod index 9111e0e..011d190 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/openmcp-project/metrics-operator -go 1.24.2 +go 1.25.1 require ( github.com/go-logr/logr v1.4.3 diff --git a/internal/orchestrator/managedhandler.go b/internal/orchestrator/managedhandler.go index 1aaaf08..0e1e0d6 100644 --- a/internal/orchestrator/managedhandler.go +++ b/internal/orchestrator/managedhandler.go @@ -3,6 +3,7 @@ package orchestrator import ( "context" "fmt" + "slices" "strconv" "strings" @@ -57,19 +58,21 @@ func (h *ManagedHandler) sendStatusBasedMetricValue(ctx context.Context) (string for _, cr := range resources { // Create a new data point for each resource dataPoint := clientoptl.NewDataPoint() - dataPoint.AddDimension("kind", cr.MangedResource.Kind) - dataPoint.AddDimension("apiversion", cr.MangedResource.APIVersion) + + // Add GVK dimensions from resource + gv, err := schema.ParseGroupVersion(cr.MangedResource.APIVersion) + if err != nil { + return "", err + } + dataPoint.AddDimension(KIND, cr.MangedResource.Kind) + dataPoint.AddDimension(GROUP, gv.Group) + dataPoint.AddDimension(VERSION, gv.Version) // Add cluster dimension if available if h.clusterName != nil { dataPoint.AddDimension(CLUSTER, *h.clusterName) } - // Add GVK dimensions - dataPoint.AddDimension(KIND, h.metric.Spec.Kind) - dataPoint.AddDimension(GROUP, h.metric.Spec.Group) - dataPoint.AddDimension(VERSION, h.metric.Spec.Version) - // Add status conditions as dimensions for typ, state := range cr.Status { dataPoint.AddDimension(strings.ToLower(typ), strconv.FormatBool(state)) @@ -150,31 +153,39 @@ func (h *ManagedHandler) getManagedResources(ctx context.Context) ([]Managed, er return nil, err } - var resourceCRDs []apiextensionsv1.CustomResourceDefinition + resourceCRDs := make([]apiextensionsv1.CustomResourceDefinition, 0, len(crds.Items)) for _, crd := range crds.Items { - if h.hasCategory("crossplane", crd) && h.hasCategory("managed", crd) { // filter previously acquired crds - resourceCRDs = append(resourceCRDs, crd) + // drop non-crossplane crds + if !h.hasCategory("crossplane", crd) || !h.hasCategory("managed", crd) { + continue } + // drop crds that don't match the spec gvk + if !h.matchesGroupVersionKind(crd) { + continue + } + resourceCRDs = append(resourceCRDs, crd) } var resources []unstructured.Unstructured for _, crd := range resourceCRDs { - - // Use the stored versions of the CRD - storedVersions := make(map[string]bool) - for _, v := range crd.Status.StoredVersions { - storedVersions[v] = true - } - + versionsToRetrieve := make([]string, 0, len(crd.Spec.Versions)) for _, crdv := range crd.Spec.Versions { - if !crdv.Served || !storedVersions[crdv.Name] { + // only use served versions for retrieval + if !crdv.Served { continue } - + // only use the metric target version if provided + if h.metric.Spec.Version != "" && crdv.Name != h.metric.Spec.Version { + continue + } + versionsToRetrieve = append(versionsToRetrieve, crdv.Name) + } + // finally retrieve all matching resources + for _, version := range versionsToRetrieve { gvr := schema.GroupVersionResource{ Resource: crd.Spec.Names.Plural, Group: crd.Spec.Group, - Version: crdv.Name, + Version: version, } list, err := h.dCli.Resource(gvr).List(ctx, metav1.ListOptions{}) // gets resources from all the available crds @@ -236,3 +247,20 @@ type ClusterResourceStatus struct { MangedResource Managed Status map[string]bool } + +func (h *ManagedHandler) matchesGroupVersionKind(crd apiextensionsv1.CustomResourceDefinition) bool { + crdVersions := make([]string, 0, len(crd.Spec.Versions)) + for _, version := range crd.Spec.Versions { + crdVersions = append(crdVersions, version.Name) + } + if h.metric.Spec.Version != "" && !slices.Contains(crdVersions, h.metric.Spec.Version) { + return false + } + if h.metric.Spec.Group != "" && crd.Spec.Group != h.metric.Spec.Group { + return false + } + if h.metric.Spec.Kind != "" && crd.Spec.Names.Kind != h.metric.Spec.Kind { + return false + } + return true +} diff --git a/internal/orchestrator/managedhandler_test.go b/internal/orchestrator/managedhandler_test.go new file mode 100644 index 0000000..59680e8 --- /dev/null +++ b/internal/orchestrator/managedhandler_test.go @@ -0,0 +1,376 @@ +package orchestrator + +import ( + "context" + "fmt" + "slices" + "strings" + "testing" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/yaml" + dynamicfake "k8s.io/client-go/dynamic/fake" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/openmcp-project/metrics-operator/api/v1alpha1" +) + +func TestGetManagedResources(t *testing.T) { + // we define a couple of GVKs to generate CRDs and resources for our test cases + k8sObjectGVK := schema.GroupVersionKind{ + Group: "kubernetes.m.crossplane.io", + Version: "v1alpha1", + Kind: "Object", + } + k8sObjectCollectionGVK := schema.GroupVersionKind{ + Group: "kubernetes.m.crossplane.io", + Version: "v1alpha1", + Kind: "ObservedObjectCollection", + } + nopResourceGVK := schema.GroupVersionKind{ + Group: "nop.crossplane.io", + Version: "v1alpha1", + Kind: "NopResource", + } + helmReleaseGVK := schema.GroupVersionKind{ + Group: "helm.m.crossplane.io", + Version: "v1beta1", + Kind: "Release", + } + + const ( + k8sObjects = "k8s-object" + k8sObjectCollections = "k8s-object-collection" + nopResources = "nop" + helmReleases = "helm" + ) + + // and a couple of fixed cluster resources + resourceFixture := map[string][]string{ + k8sObjects: { + fakeResource(k8sObjectGVK), + fakeResource(k8sObjectGVK), + }, + k8sObjectCollections: { + fakeResource(k8sObjectCollectionGVK), + fakeResource(k8sObjectCollectionGVK), + }, + nopResources: { + fakeResource(nopResourceGVK), + fakeResource(nopResourceGVK), + }, + helmReleases: { + fakeResource(helmReleaseGVK), + fakeResource(helmReleaseGVK), + }, + } + + tests := []struct { + name string + filter schema.GroupVersionKind + clusterCRDs []string + clusterResources []string + wantResources []string + }{ + { + name: "fully qualified target spec", + filter: k8sObjectGVK, + clusterCRDs: []string{ + managedAndServedCRD(k8sObjectGVK), + managedAndServedCRD(k8sObjectCollectionGVK), + managedAndServedCRD(nopResourceGVK), + managedAndServedCRD(helmReleaseGVK), + }, + clusterResources: slices.Concat( + resourceFixture[k8sObjects], + resourceFixture[k8sObjectCollections], + resourceFixture[nopResources], + resourceFixture[helmReleases], + ), + wantResources: resourceFixture[k8sObjects], + }, + { + name: "group version target", + filter: schema.GroupVersionKind{ + Group: k8sObjectGVK.Group, + Version: k8sObjectGVK.Version, + }, + clusterCRDs: []string{ + managedAndServedCRD(k8sObjectGVK), + managedAndServedCRD(k8sObjectCollectionGVK), + managedAndServedCRD(nopResourceGVK), + managedAndServedCRD(helmReleaseGVK), + }, + clusterResources: slices.Concat( + resourceFixture[k8sObjects], + resourceFixture[k8sObjectCollections], + resourceFixture[nopResources], + resourceFixture[helmReleases], + ), + wantResources: slices.Concat( + resourceFixture[k8sObjects], + resourceFixture[k8sObjectCollections], + ), + }, + { + name: "version target", + filter: schema.GroupVersionKind{ + Version: k8sObjectGVK.Version, + }, + clusterCRDs: []string{ + managedAndServedCRD(k8sObjectGVK), + managedAndServedCRD(k8sObjectCollectionGVK), + managedAndServedCRD(nopResourceGVK), + managedAndServedCRD(helmReleaseGVK), + }, + clusterResources: slices.Concat( + resourceFixture[k8sObjects], + resourceFixture[k8sObjectCollections], + resourceFixture[nopResources], + resourceFixture[helmReleases], + ), + wantResources: slices.Concat( + resourceFixture[k8sObjects], + resourceFixture[k8sObjectCollections], + resourceFixture[nopResources], + ), + }, + { + name: "unqualified target", + filter: schema.GroupVersionKind{}, + clusterCRDs: []string{ + managedAndServedCRD(k8sObjectGVK), + managedAndServedCRD(k8sObjectCollectionGVK), + managedAndServedCRD(nopResourceGVK), + managedAndServedCRD(helmReleaseGVK), + }, + clusterResources: slices.Concat( + resourceFixture[k8sObjects], + resourceFixture[k8sObjectCollections], + resourceFixture[nopResources], + resourceFixture[helmReleases], + ), + wantResources: slices.Concat( + resourceFixture[k8sObjects], + resourceFixture[k8sObjectCollections], + resourceFixture[nopResources], + resourceFixture[helmReleases], + ), + }, + { + name: "unmanaged custom resources get filtered out", + filter: schema.GroupVersionKind{}, + clusterCRDs: []string{ + unmanagedCRD(k8sObjectGVK), + managedAndServedCRD(k8sObjectCollectionGVK), + unmanagedCRD(nopResourceGVK), + managedAndServedCRD(helmReleaseGVK), + }, + clusterResources: slices.Concat( + resourceFixture[k8sObjects], + resourceFixture[k8sObjectCollections], + resourceFixture[nopResources], + resourceFixture[helmReleases], + ), + wantResources: slices.Concat( + resourceFixture[k8sObjectCollections], + resourceFixture[helmReleases], + ), + }, + { + name: "unserved custom resources are not retrievable", + filter: schema.GroupVersionKind{}, + clusterCRDs: []string{ + unservedCRD(k8sObjectGVK), + managedAndServedCRD(k8sObjectCollectionGVK), + managedAndServedCRD(nopResourceGVK), + unservedCRD(helmReleaseGVK), + }, + clusterResources: slices.Concat( + resourceFixture[k8sObjects], + resourceFixture[k8sObjectCollections], + resourceFixture[nopResources], + resourceFixture[helmReleases], + ), + wantResources: slices.Concat( + resourceFixture[k8sObjectCollections], + resourceFixture[nopResources], + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // setup handler + handler := ManagedHandler{ + client: setupFakeClient(t, tt.clusterCRDs), + dCli: setupFakeDynamicClient(t, tt.clusterResources), + metric: v1alpha1.ManagedMetric{ + Spec: v1alpha1.ManagedMetricSpec{ + Kind: tt.filter.Kind, + Group: tt.filter.Group, + Version: tt.filter.Version, + }, + }, + } + + // execute getManagedResources + result, err := handler.getManagedResources(context.Background()) + if err != nil { + t.Fatalf("getManagedResource failed: %v", err) + } + + // verify result + if len(tt.wantResources) != len(result) { + t.Errorf("unexpected result length: wanted=%v, got=%v", len(tt.wantResources), len(result)) + } + for _, managed := range result { + if !slices.ContainsFunc(tt.wantResources, func(yaml string) bool { + left := yamlNameGVK(t, yaml) + right := managedNameGVK(t, managed) + return left == right + }) { + t.Errorf("unexpected resource: %v", managedNameGVK(t, managed)) + } + } + }) + } +} + +func setupFakeClient(t *testing.T, yamlCRDs []string) client.WithWatch { + t.Helper() + + // general runtime setup + scheme := runtime.NewScheme() + _ = apiextensionsv1.AddToScheme(scheme) + + // setup fake crd result + result := make([]client.Object, 0, len(yamlCRDs)) + for _, yamlItem := range yamlCRDs { + var crd apiextensionsv1.CustomResourceDefinition + if err := yaml.Unmarshal([]byte(yamlItem), &crd); err != nil { + t.Fatalf("failed to unmarshal test CRD: %v", err) + } + result = append(result, &crd) + } + + // setup fake client + return fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(result...). + Build() +} + +func setupFakeDynamicClient(t *testing.T, yamlResources []string) *dynamicfake.FakeDynamicClient { + t.Helper() + + // general runtime setup + scheme := runtime.NewScheme() + _ = apiextensionsv1.AddToScheme(scheme) + + // setup fake managed resources result + fakeObjects := make([]runtime.Object, 0, len(yamlResources)) + for _, yamlItem := range yamlResources { + obj := toUnstructured(t, yamlItem) + fakeObjects = append(fakeObjects, &obj) + } + + // setup fake dynamic client + return dynamicfake.NewSimpleDynamicClient(scheme, fakeObjects...) +} + +func managedNameGVK(t *testing.T, managed Managed) string { + t.Helper() + gv, err := schema.ParseGroupVersion(managed.APIVersion) + if err != nil { + t.Errorf("failed to parse managed group version: %v", err) + } + gvk := schema.GroupVersionKind{ + Group: gv.Group, + Version: gv.Version, + Kind: managed.Kind, + } + return fmt.Sprintf("%v:%v", gvk, managed.Metadata.Name) +} + +func yamlNameGVK(t *testing.T, yaml string) string { + t.Helper() + obj := toUnstructured(t, yaml) + return fmt.Sprintf("%v:%v", obj.GetObjectKind().GroupVersionKind(), obj.GetName()) +} + +func fakeResource(gvk schema.GroupVersionKind) string { + return fmt.Sprintf(`apiVersion: %v +kind: %v +metadata: + name: %v +spec: + deletionPolicy: Delete +status: + conditions: + - lastTransitionTime: "2025-09-12T15:57:41Z" + observedGeneration: 1 + reason: ReconcileSuccess + status: "True" + type: Synced + - lastTransitionTime: "2025-09-09T14:33:38Z" + reason: Available + status: "True" + type: Ready +`, + gvk.GroupVersion(), + gvk.Kind, + rand.String(16)) +} + +func managedAndServedCRD(gvk schema.GroupVersionKind) string { + return fakeCRDTemplate(gvk, true, true) +} + +func unservedCRD(gvk schema.GroupVersionKind) string { + return fakeCRDTemplate(gvk, true, false) +} + +func unmanagedCRD(gvk schema.GroupVersionKind) string { + return fakeCRDTemplate(gvk, false, true) +} + +func fakeCRDTemplate(gvk schema.GroupVersionKind, managed bool, served bool) string { + categories := "[]" + if managed { + categories = ` + - crossplane + - managed` + } + return fmt.Sprintf(`apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: %vs.%v +spec: + group: %v + names: + categories: %v + kind: %v + listKind: %vList + plural: %vs + singular: %v + scope: Cluster + versions: + - name: %v + served: %v +`, + strings.ToLower(gvk.Kind), + gvk.Group, + gvk.Group, + categories, + gvk.Kind, + gvk.Kind, + strings.ToLower(gvk.Kind), + strings.ToLower(gvk.Kind), + gvk.Version, + served) +} diff --git a/internal/orchestrator/projectionhelper_test.go b/internal/orchestrator/projectionhelper_test.go index 4a9c9d6..04c501d 100644 --- a/internal/orchestrator/projectionhelper_test.go +++ b/internal/orchestrator/projectionhelper_test.go @@ -8,12 +8,12 @@ import ( ) const subaccountCR = ` -apiVersion: account.btp.sap.crossplane.io/v1alpha1 -kind: Subaccount +apiVersion: nop.crossplane.io/v1alpha1 +kind: NopResource metadata: annotations: - crossplane.io/external-name: test-subaccount - name: test-subaccount + crossplane.io/external-name: ext-example + name: example spec: deletionPolicy: Delete status: @@ -42,7 +42,7 @@ func TestNestedPrimitiveValue(t *testing.T) { name: "top level value retrieval", resourceYaml: subaccountCR, path: "kind", - wantValue: "Subaccount", + wantValue: "NopResource", wantFound: true, wantError: false, }, @@ -58,7 +58,7 @@ func TestNestedPrimitiveValue(t *testing.T) { name: "nested value retrieval with escaped name selector", resourceYaml: subaccountCR, path: "metadata.annotations.crossplane\\.io/external-name", - wantValue: "test-subaccount", + wantValue: "ext-example", wantFound: true, wantError: false, },