diff --git a/README.md b/README.md index 9824de5..2333280 100644 --- a/README.md +++ b/README.md @@ -124,25 +124,33 @@ spec: name: my-cluster namespace: default - permissions: - # Role - - namespace: default - rules: - - apiGroups: - - "" - resources: - - "secrets" - verbs: - - "*" - # ClusterRole - - rules: - - apiGroups: - - "" - resources: - - "configmaps" - verbs: - - "*" - + token: + permissions: + # Role + RoleBinding + - namespace: default + rules: + - apiGroups: + - "" + resources: + - "secrets" + verbs: + - "*" + # ClusterRole + ClusterRoleBinding + - rules: + - apiGroups: + - "" + resources: + - "configmaps" + verbs: + - "*" + roleRefs: + # bind to existing Role + - kind: Role + name: my-role + namespace: my-namespace + # bind to existing ClusterRole + - kind: ClusterRole + name: cluster-admin ``` This will result in a `ServiceAccount` on the referenced `Cluster` with the specified permissions applied. diff --git a/api/clusters/v1alpha1/cluster_types.go b/api/clusters/v1alpha1/cluster_types.go index 6341755..f956fad 100644 --- a/api/clusters/v1alpha1/cluster_types.go +++ b/api/clusters/v1alpha1/cluster_types.go @@ -130,8 +130,8 @@ func (c *Cluster) GetTenancyCount() int { func (c *Cluster) GetRequestUIDs() sets.Set[string] { res := sets.New[string]() for _, fin := range c.Finalizers { - if strings.HasPrefix(fin, RequestFinalizerOnClusterPrefix) { - res.Insert(strings.TrimPrefix(fin, RequestFinalizerOnClusterPrefix)) + if uid, ok := strings.CutPrefix(fin, RequestFinalizerOnClusterPrefix); ok { + res.Insert(uid) } } return res diff --git a/api/clusters/v1alpha1/constants.go b/api/clusters/v1alpha1/constants.go index 852cb8a..11789c8 100644 --- a/api/clusters/v1alpha1/constants.go +++ b/api/clusters/v1alpha1/constants.go @@ -84,13 +84,16 @@ const ( // SecretKeyCreationTimestamp is the name of the key in the AccessRequest secret that contains the creation timestamp. // This value is optional and must not be set for non-expiring authentication methods. SecretKeyCreationTimestamp = "creationTimestamp" - // SecretKeyCAData is the name of the key in the AccessRequest secret that contains the CA data. - // This value is optional and must not be set. - SecretKeyCAData = "caData" - // SecretKeyHost is the name of the key in the AccessRequest secret that contains the host. - // This value is optional and must not be set. - SecretKeyHost = "host" // SecretKeyClientID is the name of the key in the AccessRequest secret that contains the client ID. // This value is optional and must not be set for non-OIDC-based authentication methods. SecretKeyClientID = "clientID" + // SecretKeyHost is the name of the key in the AccessRequest secret that contains the host of the cluster. + // This value is optional. + SecretKeyHost = "host" + // SecretKeyCA is the name of the key in the AccessRequest secret that contains the CA certificate of the cluster. + // This value is optional. + SecretKeyCA = "ca.crt" + // SecretKeyToken is the name of the key in the AccessRequest secret that contains the token. + // This value is optional. + SecretKeyToken = "token" ) diff --git a/api/clusters/v1alpha1/constants/reasons.go b/api/clusters/v1alpha1/constants/reasons.go index a585f5c..e47f238 100644 --- a/api/clusters/v1alpha1/constants/reasons.go +++ b/api/clusters/v1alpha1/constants/reasons.go @@ -11,4 +11,18 @@ const ( ReasonConfigurationProblem = "ConfigurationProblem" // ReasonInternalError indicates that something went wrong internally. ReasonInternalError = "InternalError" + // ReasonWaitingForNamespaceDeletion indicates that something is waiting for a namespace to be deleted. + ReasonWaitingForNamespaceDeletion = "WaitingForNamespaceDeletion" + // ReasonWaitingForClusterRequest indicates that something is waiting for a ClusterRequest to become ready. + ReasonWaitingForClusterRequest = "WaitingForClusterRequest" + // ReasonWaitingForClusterRequestDeletion indicates that something is waiting for a ClusterRequest to be deleted. + ReasonWaitingForClusterRequestDeletion = "WaitingForClusterRequestDeletion" + // ReasonWaitingForAccessRequest indicates that something is waiting for an AccessRequest to become ready. + ReasonWaitingForAccessRequest = "WaitingForAccessRequest" + // ReasonWaitingForAccessRequestDeletion indicates that something is waiting for an AccessRequest to be deleted. + ReasonWaitingForAccessRequestDeletion = "WaitingForAccessRequestDeletion" + // ReasonWaitingForServices indicates that something is waiting for one or more service providers to do something. + ReasonWaitingForServices = "WaitingForServices" + // ReasonWaitingForServiceDeletion indicates that something is waiting for a service to be deleted. + ReasonWaitingForServiceDeletion = "WaitingForServiceDeletion" ) diff --git a/api/core/v2alpha1/constants.go b/api/core/v2alpha1/constants.go index c4e254f..1fceff5 100644 --- a/api/core/v2alpha1/constants.go +++ b/api/core/v2alpha1/constants.go @@ -3,4 +3,29 @@ package v2alpha1 const ( // DefaultOIDCProviderName is the identifier for the default OIDC provider. DefaultOIDCProviderName = "default" + // DefaultMCPClusterPurpose is the default purpose for ManagedControlPlane clusters. + DefaultMCPClusterPurpose = "mcp" +) + +const ( + MCPNameLabel = GroupName + "/mcp-name" + MCPNamespaceLabel = GroupName + "/mcp-namespace" + OIDCProviderLabel = GroupName + "/oidc-provider" + + MCPFinalizer = GroupName + "/mcp" + + // ServiceDependencyFinalizerPrefix is the prefix for the dependency finalizers that are added to MCP resources by associated services. + ServiceDependencyFinalizerPrefix = "services.openmcp.cloud/" + // ClusterRequestFinalizerPrefix is the prefix for the finalizers that are added to MCP resources for cluster requests. + ClusterRequestFinalizerPrefix = "request.clusters.openmcp.cloud/" +) + +const ( + ConditionMeta = "Meta" + + ConditionClusterRequestReady = "ClusterRequestReady" + ConditionPrefixOIDCAccessReady = "OIDCAccessReady:" + ConditionAllAccessReady = "AllAccessReady" + ConditionAllServicesDeleted = "AllServicesDeleted" + ConditionAllClusterRequestsDeleted = "AllClusterRequestsDeleted" ) diff --git a/api/core/v2alpha1/groupversion_info.go b/api/core/v2alpha1/groupversion_info.go index 81565da..ea41fa0 100644 --- a/api/core/v2alpha1/groupversion_info.go +++ b/api/core/v2alpha1/groupversion_info.go @@ -7,9 +7,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/scheme" ) +const GroupName = "core.openmcp.cloud" + var ( // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "core.openmcp.cloud", Version: "v2alpha1"} + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2alpha1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} diff --git a/api/core/v2alpha1/managedcontrolplane_types.go b/api/core/v2alpha1/managedcontrolplane_types.go index a4ddec7..eb43372 100644 --- a/api/core/v2alpha1/managedcontrolplane_types.go +++ b/api/core/v2alpha1/managedcontrolplane_types.go @@ -18,7 +18,8 @@ type ManagedControlPlaneV2Status struct { // Each referenced secret is expected to contain a 'kubeconfig' key with the kubeconfig that was generated for the respective OIDC provider for the ManagedControlPlaneV2. // The default OIDC provider, if configured, uses the name "default" in this mapping. // The "default" key is also used if the ClusterProvider does not support OIDC-based access and created a serviceaccount with a token instead. - Access map[string]commonapi.LocalObjectReference `json:"access"` + // +optional + Access map[string]commonapi.LocalObjectReference `json:"access,omitempty"` } type IAMConfig struct { diff --git a/api/crds/manifests/core.openmcp.cloud_managedcontrolplanev2s.yaml b/api/crds/manifests/core.openmcp.cloud_managedcontrolplanev2s.yaml new file mode 100644 index 0000000..858cd14 --- /dev/null +++ b/api/crds/manifests/core.openmcp.cloud_managedcontrolplanev2s.yaml @@ -0,0 +1,374 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + labels: + openmcp.cloud/cluster: onboarding + name: managedcontrolplanev2s.core.openmcp.cloud +spec: + group: core.openmcp.cloud + names: + kind: ManagedControlPlaneV2 + listKind: ManagedControlPlaneV2List + plural: managedcontrolplanev2s + shortNames: + - mcpv2 + singular: managedcontrolplanev2 + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + name: v2alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + iam: + description: IAM contains the access management configuration for + the ManagedControlPlaneV2. + properties: + oidcProviders: + description: |- + OIDCProviders is a list of OIDC providers that should be configured for the ManagedControlPlaneV2. + They are independent of the standard OIDC provider and in addition to it, unless it has been disabled by not specifying any role bindings. + items: + properties: + clientID: + description: ClientID is the client ID to use for the OIDC + provider. + type: string + extraScopes: + description: ExtraScopes is a list of extra scopes that + should be requested from the OIDC provider. + items: + type: string + type: array + groupsClaim: + default: groups + description: |- + GroupsClaim is the claim in the OIDC token that contains the groups. + If empty, the default claim "groups" will be used. + type: string + groupsPrefix: + description: |- + GroupsPrefix is a prefix that will be added to all group names when referenced in RBAC rules. + This is required to avoid conflicts with Kubernetes built-in groups. + If the prefix does not end with a colon (:), it will be added automatically. + minLength: 1 + type: string + issuer: + description: Issuer is the issuer URL of the OIDC provider. + type: string + name: + description: |- + Name is the name of the OIDC provider. + May be used in k8s resources, therefore has to be a valid k8s name. + maxLength: 253 + minLength: 1 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' + type: string + roleBindings: + description: |- + RoleBindings is a list of subjects with (cluster) role bindings that should be created for them. + Note that the username prefix is added automatically to the subjects' names, it must not be explicitly specified here. + items: + properties: + roleRefs: + description: |- + RoleRefs is a list of (cluster) role references that the subjects should be bound to. + Note that existence of the roles is not checked and missing (cluster) roles will result in ineffective (cluster) role bindings. + items: + description: RoleRef defines a reference to a (cluster) + role that should be bound to the subjects. + properties: + kind: + description: |- + Kind is the kind of the role to bind to the subjects. + It must be 'Role' or 'ClusterRole'. + enum: + - Role + - ClusterRole + type: string + name: + description: Name is the name of the role or + cluster role to bind to the subjects. + minLength: 1 + type: string + namespace: + description: |- + Namespace is the namespace of the role to bind to the subjects. + It must be set if the kind is 'Role' and may not be set if the kind is 'ClusterRole'. + type: string + required: + - kind + - name + type: object + type: array + subjects: + description: |- + Subjects is a list of subjects that should be bound to the specified roles. + The subjects' names will be prefixed with the username prefix of the OIDC provider. + items: + description: |- + Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, + or a value for non-objects such as user and group names. + properties: + apiGroup: + description: |- + APIGroup holds the API group of the referenced subject. + Defaults to "" for ServiceAccount subjects. + Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + type: string + kind: + description: |- + Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + If the Authorizer does not recognized the kind value, the Authorizer should report an error. + type: string + name: + description: Name of the object being referenced. + type: string + namespace: + description: |- + Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + the Authorizer should report an error. + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + type: array + required: + - roleRefs + - subjects + type: object + type: array + usernameClaim: + default: sub + description: |- + UsernameClaim is the claim in the OIDC token that contains the username. + If empty, the default claim "sub" will be used. + type: string + usernamePrefix: + description: |- + UsernamePrefix is a prefix that will be added to all usernames when referenced in RBAC rules. + This is required to avoid conflicts with Kubernetes built-in users. + If the prefix does not end with a colon (:), it will be added automatically. + minLength: 1 + type: string + required: + - clientID + - groupsPrefix + - issuer + - name + - roleBindings + - usernamePrefix + type: object + x-kubernetes-validations: + - message: OIDC provider name must not be 'default' as this + is reserved for the standard OIDC provider + rule: self.name != 'default' + type: array + roleBindings: + description: |- + RoleBindings is a list of subjects with (cluster) role bindings that should be created for them. + These bindings refer to the standard OIDC provider. If empty, the standard OIDC provider is disabled. + Note that the username prefix is added automatically to the subjects' names, it must not be explicitly specified here. + items: + properties: + roleRefs: + description: |- + RoleRefs is a list of (cluster) role references that the subjects should be bound to. + Note that existence of the roles is not checked and missing (cluster) roles will result in ineffective (cluster) role bindings. + items: + description: RoleRef defines a reference to a (cluster) + role that should be bound to the subjects. + properties: + kind: + description: |- + Kind is the kind of the role to bind to the subjects. + It must be 'Role' or 'ClusterRole'. + enum: + - Role + - ClusterRole + type: string + name: + description: Name is the name of the role or cluster + role to bind to the subjects. + minLength: 1 + type: string + namespace: + description: |- + Namespace is the namespace of the role to bind to the subjects. + It must be set if the kind is 'Role' and may not be set if the kind is 'ClusterRole'. + type: string + required: + - kind + - name + type: object + type: array + subjects: + description: |- + Subjects is a list of subjects that should be bound to the specified roles. + The subjects' names will be prefixed with the username prefix of the OIDC provider. + items: + description: |- + Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, + or a value for non-objects such as user and group names. + properties: + apiGroup: + description: |- + APIGroup holds the API group of the referenced subject. + Defaults to "" for ServiceAccount subjects. + Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + type: string + kind: + description: |- + Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + If the Authorizer does not recognized the kind value, the Authorizer should report an error. + type: string + name: + description: Name of the object being referenced. + type: string + namespace: + description: |- + Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + the Authorizer should report an error. + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + type: array + required: + - roleRefs + - subjects + type: object + type: array + type: object + required: + - iam + type: object + status: + properties: + access: + additionalProperties: + description: LocalObjectReference is a reference to an object in + the same namespace as the resource referencing it. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + description: |- + Access is a mapping from OIDC provider names to secret references. + Each referenced secret is expected to contain a 'kubeconfig' key with the kubeconfig that was generated for the respective OIDC provider for the ManagedControlPlaneV2. + The default OIDC provider, if configured, uses the name "default" in this mapping. + The "default" key is also used if the ClusterProvider does not support OIDC-based access and created a serviceaccount with a token instead. + type: object + conditions: + description: Conditions contains the conditions. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + observedGeneration: + description: ObservedGeneration is the generation of this resource + that was last reconciled by the controller. + format: int64 + type: integer + phase: + description: Phase is the current phase of the resource. + type: string + required: + - observedGeneration + - phase + type: object + type: object + selectableFields: + - jsonPath: .status.phase + served: true + storage: true + subresources: + status: {} diff --git a/api/crds/manifests/openmcp.cloud_clusterproviders.yaml b/api/crds/manifests/openmcp.cloud_clusterproviders.yaml index 53f0926..daf82ef 100644 --- a/api/crds/manifests/openmcp.cloud_clusterproviders.yaml +++ b/api/crds/manifests/openmcp.cloud_clusterproviders.yaml @@ -2033,14 +2033,20 @@ spec: ImagePullSecrets are secrets in the same namespace. They can be used to fetch provider images from private registries. items: + description: LocalObjectReference is a reference to an object in + the same namespace as the resource referencing it. properties: name: - description: Name is the name of the referenced resource. - minLength: 1 + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string - required: - - name type: object + x-kubernetes-map-type: atomic type: array initCommand: description: |- diff --git a/api/crds/manifests/openmcp.cloud_platformservices.yaml b/api/crds/manifests/openmcp.cloud_platformservices.yaml index c624597..754645a 100644 --- a/api/crds/manifests/openmcp.cloud_platformservices.yaml +++ b/api/crds/manifests/openmcp.cloud_platformservices.yaml @@ -2033,14 +2033,20 @@ spec: ImagePullSecrets are secrets in the same namespace. They can be used to fetch provider images from private registries. items: + description: LocalObjectReference is a reference to an object in + the same namespace as the resource referencing it. properties: name: - description: Name is the name of the referenced resource. - minLength: 1 + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string - required: - - name type: object + x-kubernetes-map-type: atomic type: array initCommand: description: |- diff --git a/api/crds/manifests/openmcp.cloud_serviceproviders.yaml b/api/crds/manifests/openmcp.cloud_serviceproviders.yaml index 1183473..77d19db 100644 --- a/api/crds/manifests/openmcp.cloud_serviceproviders.yaml +++ b/api/crds/manifests/openmcp.cloud_serviceproviders.yaml @@ -2033,14 +2033,20 @@ spec: ImagePullSecrets are secrets in the same namespace. They can be used to fetch provider images from private registries. items: + description: LocalObjectReference is a reference to an object in + the same namespace as the resource referencing it. properties: name: - description: Name is the name of the referenced resource. - minLength: 1 + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string - required: - - name type: object + x-kubernetes-map-type: atomic type: array initCommand: description: |- diff --git a/api/install/install.go b/api/install/install.go index fce1acf..6e975a5 100644 --- a/api/install/install.go +++ b/api/install/install.go @@ -20,10 +20,16 @@ func InstallCRDAPIs(scheme *runtime.Scheme) *runtime.Scheme { return scheme } -func InstallOperatorAPIs(scheme *runtime.Scheme) *runtime.Scheme { +func InstallOperatorAPIsPlatform(scheme *runtime.Scheme) *runtime.Scheme { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(providerv1alpha1.AddToScheme(scheme)) utilruntime.Must(clustersv1alpha1.AddToScheme(scheme)) + + return scheme +} + +func InstallOperatorAPIsOnboarding(scheme *runtime.Scheme) *runtime.Scheme { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(corev2alpha1.AddToScheme(scheme)) return scheme diff --git a/api/provider/v1alpha1/deployment_types.go b/api/provider/v1alpha1/deployment_types.go index 9e9258a..f15086d 100644 --- a/api/provider/v1alpha1/deployment_types.go +++ b/api/provider/v1alpha1/deployment_types.go @@ -19,6 +19,8 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openmcp-project/openmcp-operator/api/common" ) // DeploymentSpec defines the desired state of a provider. @@ -29,7 +31,7 @@ type DeploymentSpec struct { // ImagePullSecrets are secrets in the same namespace. // They can be used to fetch provider images from private registries. - ImagePullSecrets []ObjectReference `json:"imagePullSecrets,omitempty"` + ImagePullSecrets []common.LocalObjectReference `json:"imagePullSecrets,omitempty"` // InitCommand is the command that is executed to run the init job of the provider. // Defaults to ["init"], if not specified. @@ -86,12 +88,6 @@ type DeploymentStatus struct { Phase string `json:"phase,omitempty"` } -type ObjectReference struct { - // Name is the name of the referenced resource. - // +kubebuilder:validation:MinLength=1 - Name string `json:"name"` -} - // EnvVar represents an environment variable present in a Container. type EnvVar struct { // Name is the name of the environment variable. diff --git a/api/provider/v1alpha1/zz_generated.deepcopy.go b/api/provider/v1alpha1/zz_generated.deepcopy.go index 177cc09..b3295d5 100644 --- a/api/provider/v1alpha1/zz_generated.deepcopy.go +++ b/api/provider/v1alpha1/zz_generated.deepcopy.go @@ -8,6 +8,8 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" + + "github.com/openmcp-project/openmcp-operator/api/common" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -106,7 +108,7 @@ func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { *out = *in if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]ObjectReference, len(*in)) + *out = make([]common.LocalObjectReference, len(*in)) copy(*out, *in) } if in.InitCommand != nil { @@ -187,21 +189,6 @@ func (in *EnvVar) DeepCopy() *EnvVar { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. -func (in *ObjectReference) DeepCopy() *ObjectReference { - if in == nil { - return nil - } - out := new(ObjectReference) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlatformService) DeepCopyInto(out *PlatformService) { *out = *in diff --git a/cmd/openmcp-operator/app/app.go b/cmd/openmcp-operator/app/app.go index 3058b07..c46e0ff 100644 --- a/cmd/openmcp-operator/app/app.go +++ b/cmd/openmcp-operator/app/app.go @@ -2,18 +2,12 @@ package app import ( "context" - "fmt" "os" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/yaml" - "github.com/spf13/cobra" - "github.com/openmcp-project/controller-utils/pkg/clusters" - "github.com/openmcp-project/controller-utils/pkg/logging" - - "github.com/openmcp-project/openmcp-operator/internal/config" + "github.com/openmcp-project/openmcp-operator/cmd/openmcp-operator/app/mcp" + "github.com/openmcp-project/openmcp-operator/cmd/openmcp-operator/app/options" ) func NewOpenMCPOperatorCommand(ctx context.Context) *cobra.Command { @@ -24,110 +18,11 @@ func NewOpenMCPOperatorCommand(ctx context.Context) *cobra.Command { cmd.SetOut(os.Stdout) cmd.SetErr(os.Stderr) - so := &SharedOptions{ - RawSharedOptions: &RawSharedOptions{}, - PlatformCluster: clusters.New("platform"), - } - so.AddPersistentFlags(cmd) - cmd.AddCommand(NewInitCommand(so)) - cmd.AddCommand(NewRunCommand(so)) + po := options.NewPersistentOptions() + po.AddPersistentFlags(cmd) + cmd.AddCommand(NewInitCommand(po)) + cmd.AddCommand(NewRunCommand(po)) + cmd.AddCommand(mcp.NewMCPControllerSubcommand(ctx, po)) return cmd } - -type RawSharedOptions struct { - Environment string `json:"environment"` - DryRun bool `json:"dry-run"` - ConfigPaths []string `json:"configPaths"` - PlatformClusterKubeconfigPath string `json:"kubeconfig"` // dummy for printing, actual path is in Clusters -} - -type SharedOptions struct { - *RawSharedOptions - PlatformCluster *clusters.Cluster - - // fields filled in Complete() - Log logging.Logger - Config *config.Config -} - -func (o *SharedOptions) AddPersistentFlags(cmd *cobra.Command) { - // logging - logging.InitFlags(cmd.PersistentFlags()) - // clusters - o.PlatformCluster.RegisterSingleConfigPathFlag(cmd.PersistentFlags()) - // environment - cmd.PersistentFlags().StringVar(&o.Environment, "environment", "", "Environment name. Required. This is used to distinguish between different environments that are watching the same Onboarding cluster. Must be globally unique.") - // config - cmd.PersistentFlags().StringSliceVar(&o.ConfigPaths, "config", nil, "Paths to the config files (separate with comma or specify flag multiple times). Each path can be a file or directory. In the latter case, all files within with '.yaml', '.yml', and '.json' extensions are evaluated. The config is merged together from the different sources, with later configs overriding earlier ones.") - // misc - cmd.PersistentFlags().BoolVar(&o.DryRun, "dry-run", false, "If set, the command aborts after evaluation of the given flags.") -} - -func (o *SharedOptions) Complete() error { - if o.Environment == "" { - return fmt.Errorf("environment must not be empty") - } - config.SetEnvironment(o.Environment) - - // build logger - log, err := logging.GetLogger() - if err != nil { - return err - } - o.Log = log - ctrl.SetLogger(o.Log.Logr()) - - // construct cluster clients - if err := o.PlatformCluster.InitializeRESTConfig(); err != nil { - return err - } - - // load config - if len(o.ConfigPaths) > 0 { - cfg, err := config.LoadFromFiles(o.ConfigPaths...) - if err != nil { - return fmt.Errorf("error loading config from files: %w", err) - } - if err := cfg.Default(); err != nil { - _ = cfg.Dump(os.Stderr) - return fmt.Errorf("error defaulting config: %w", err) - } - if err := cfg.Validate(); err != nil { - _ = cfg.Dump(os.Stderr) - return fmt.Errorf("config is invalid: %w", err) - } - if err := cfg.Complete(); err != nil { - _ = cfg.Dump(os.Stderr) - return fmt.Errorf("error completing config: %w", err) - } - o.Config = cfg - } - - return nil -} - -func (o *SharedOptions) PrintRaw(cmd *cobra.Command) { - // fill dummy paths - o.PlatformClusterKubeconfigPath = o.PlatformCluster.ConfigPath() - - data, err := yaml.Marshal(o.RawSharedOptions) - if err != nil { - cmd.Println(fmt.Errorf("error marshalling raw shared options: %w", err).Error()) - return - } - cmd.Print(string(data)) -} - -func (o *SharedOptions) PrintCompleted(cmd *cobra.Command) { - raw := map[string]any{ - "platformCluster": o.PlatformCluster.APIServerEndpoint(), - "config": o.Config, - } - data, err := yaml.Marshal(raw) - if err != nil { - cmd.Println(fmt.Errorf("error marshalling completed shared options: %w", err).Error()) - return - } - cmd.Print(string(data)) -} diff --git a/cmd/openmcp-operator/app/init.go b/cmd/openmcp-operator/app/init.go index f474707..82b26dc 100644 --- a/cmd/openmcp-operator/app/init.go +++ b/cmd/openmcp-operator/app/init.go @@ -3,21 +3,35 @@ package app import ( "context" "fmt" + "os" + "strings" - crdutil "github.com/openmcp-project/controller-utils/pkg/crds" "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/yaml" + "github.com/openmcp-project/controller-utils/pkg/collections" + crdutil "github.com/openmcp-project/controller-utils/pkg/crds" + "github.com/openmcp-project/controller-utils/pkg/logging" + clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" + commonapi "github.com/openmcp-project/openmcp-operator/api/common" apiconst "github.com/openmcp-project/openmcp-operator/api/constants" "github.com/openmcp-project/openmcp-operator/api/crds" "github.com/openmcp-project/openmcp-operator/api/install" + providerv1alpha1 "github.com/openmcp-project/openmcp-operator/api/provider/v1alpha1" + "github.com/openmcp-project/openmcp-operator/cmd/openmcp-operator/app/options" + "github.com/openmcp-project/openmcp-operator/internal/controllers/managedcontrolplane" ) -func NewInitCommand(so *SharedOptions) *cobra.Command { +const OpenMCPOperatorName = "openmcp-operator" + +func NewInitCommand(po *options.PersistentOptions) *cobra.Command { opts := &InitOptions{ - SharedOptions: so, + PersistentOptions: po, } cmd := &cobra.Command{ Use: "init", @@ -43,29 +57,28 @@ func NewInitCommand(so *SharedOptions) *cobra.Command { } type InitOptions struct { - *SharedOptions + *options.PersistentOptions RawInitOptions } type RawInitOptions struct { - SkipPlatformCRDs bool `json:"skip-platform-crds"` - SkipOnboardingCRDs bool `json:"skip-onboarding-crds"` + SkipMCPPlatformService bool `json:"skip-mcp-platform-service"` } func (o *InitOptions) AddFlags(cmd *cobra.Command) { - cmd.Flags().BoolVar(&o.SkipPlatformCRDs, "skip-platform-crds", false, "Won't install CRDs for the platform cluster, if true.") - cmd.Flags().BoolVar(&o.SkipOnboardingCRDs, "skip-onboarding-crds", false, "Won't install CRDs for the onboarding cluster, if true.") + cmd.Flags().BoolVar(&o.SkipMCPPlatformService, "skip-mcp-platform-service", false, "If true, the PlatformService for the ManagedControlPlane controller will not be created/updated.") } func (o *InitOptions) Complete(ctx context.Context) error { - if err := o.SharedOptions.Complete(); err != nil { + if err := o.PersistentOptions.Complete(); err != nil { return err } return nil } +//nolint:gocyclo func (o *InitOptions) Run(ctx context.Context) error { - if err := o.PlatformCluster.InitializeClient(install.InstallCRDAPIs(runtime.NewScheme())); err != nil { + if err := o.PlatformCluster.InitializeClient(install.InstallOperatorAPIsPlatform(install.InstallCRDAPIs(runtime.NewScheme()))); err != nil { return err } @@ -75,11 +88,151 @@ func (o *InitOptions) Run(ctx context.Context) error { // apply CRDs crdManager := crdutil.NewCRDManager(apiconst.ClusterLabel, crds.CRDs) crdManager.AddCRDLabelToClusterMapping(clustersv1alpha1.PURPOSE_PLATFORM, o.PlatformCluster) + crdManager.SkipCRDsWithClusterLabel(clustersv1alpha1.PURPOSE_ONBOARDING) if err := crdManager.CreateOrUpdateCRDs(ctx, &log); err != nil { return fmt.Errorf("error creating/updating CRDs: %w", err) } + // create PlatformService for MCP controller (unless disabled) + if o.SkipMCPPlatformService { + log.Info("Skipping creation/update of PlatformService for ManagedControlPlane controller") + } else { + log.Info("Creating/updating PlatformService for ManagedControlPlane controller") + podName := os.Getenv(apiconst.EnvVariablePodName) + if podName == "" { + return fmt.Errorf("environment variable %s is not set", apiconst.EnvVariablePodName) + } + podNamespace := os.Getenv(apiconst.EnvVariablePodNamespace) + if podNamespace == "" { + return fmt.Errorf("environment variable %s is not set", apiconst.EnvVariablePodNamespace) + } + + log.Info("Fetching own pod to determine image", "name", podName, "namespace", podNamespace) + pod := &corev1.Pod{} + pod.Name = podName + pod.Namespace = podNamespace + if err := o.PlatformCluster.Client().Get(ctx, client.ObjectKeyFromObject(pod), pod); err != nil { + return fmt.Errorf("error fetching own pod %s/%s: %w", podNamespace, podName, err) + } + var container *corev1.Container + if len(pod.Spec.Containers) == 1 { + container = &pod.Spec.Containers[0] + } else { + for _, c := range pod.Spec.Containers { + if c.Name == OpenMCPOperatorName { + container = &c + break + } + } + } + if container == nil { + return fmt.Errorf("unable to determine main container from pod %s/%s", podNamespace, podName) + } + verbosity := "INFO" + if log.Enabled(logging.DEBUG) { + verbosity = "DEBUG" + } + pullSecrets := pod.Spec.ImagePullSecrets + + // identify volumes that need to be mounted in order to have the config available + configPaths := []string{} + volumeMounts := []corev1.VolumeMount{} + volumes := []corev1.Volume{} + next := false + for _, arg := range container.Args { + if next { + configPaths = append(configPaths, arg) + next = false + } else if arg == "--config" { + next = true + } else if suffix, ok := strings.CutPrefix(arg, "--config="); ok { + configPaths = append(configPaths, suffix) + } + } + if len(configPaths) > 0 { + for _, vm := range container.VolumeMounts { + for _, cp := range configPaths { + if strings.HasPrefix(cp, vm.MountPath) { + volumeMounts = append(volumeMounts, vm) + break + } + } + } + for _, v := range pod.Spec.Volumes { + for _, vm := range volumeMounts { + if v.Name == vm.Name { + volumes = append(volumes, v) + break + } + } + } + } + + // create/update PlatformService + mcpPSName := o.ProviderName + if mcpPSName == "" { + mcpPSName = strings.ToLower(managedcontrolplane.ControllerName) + } + + expectedLabels := map[string]string{ + apiconst.ManagedByLabel: OpenMCPOperatorName, + "platformservice." + apiconst.OpenMCPGroupName + "/purpose": managedcontrolplane.ControllerName, + } + psl := &providerv1alpha1.PlatformServiceList{} + if err := o.PlatformCluster.Client().List(ctx, psl, client.MatchingLabels(expectedLabels)); err != nil { + return fmt.Errorf("error listing PlatformServices: %w", err) + } + var ps *providerv1alpha1.PlatformService + psToDelete := []providerv1alpha1.PlatformService{} + for _, item := range psl.Items { + if item.DeletionTimestamp.IsZero() { // ignore platform services in deletion + if item.Name == mcpPSName { + ps = &item + } else { + psToDelete = append(psToDelete, item) + } + } + } + if ps == nil { + log.Info("Creating PlatformService for ManagedControlPlane controller", "name", mcpPSName) + ps = &providerv1alpha1.PlatformService{} + ps.Name = mcpPSName + } else { + log.Info("Updating PlatformService for ManagedControlPlane controller", "name", mcpPSName) + } + if _, err := controllerutil.CreateOrUpdate(ctx, o.PlatformCluster.Client(), ps, func() error { + ps.Labels = expectedLabels + ps.Spec.Image = container.Image + ps.Spec.ImagePullSecrets = collections.ProjectSliceToSlice(pullSecrets, func(ref corev1.LocalObjectReference) commonapi.LocalObjectReference { + return commonapi.LocalObjectReference{ + Name: ref.Name, + } + }) + ps.Spec.ExtraVolumes = volumes + ps.Spec.ExtraVolumeMounts = volumeMounts + ps.Spec.InitCommand = []string{"mcp", "init"} + ps.Spec.RunCommand = []string{"mcp", "run"} + for _, cp := range configPaths { + ps.Spec.InitCommand = append(ps.Spec.InitCommand, "--config="+cp) + ps.Spec.RunCommand = append(ps.Spec.RunCommand, "--config="+cp) + } + ps.Spec.Verbosity = verbosity + return nil + }); err != nil { + return fmt.Errorf("error creating/updating PlatformService %s: %w", ps.Name, err) + } + if len(psToDelete) > 0 { + log.Info("Deleting obsolete PlatformServices for ManagedControlPlane controller", "count", len(psToDelete)) + for _, psDel := range psToDelete { + if err := o.PlatformCluster.Client().Delete(ctx, &psDel); err != nil { + return fmt.Errorf("error deleting obsolete PlatformService %s: %w", psDel.Name, err) + } + log.Info("Deleted obsolete PlatformService", "name", psDel.Name) + } + } + } + log.Info("Finished init command") return nil } @@ -95,7 +248,7 @@ func (o *InitOptions) PrintRaw(cmd *cobra.Command) { func (o *InitOptions) PrintRawOptions(cmd *cobra.Command) { cmd.Println("########## RAW OPTIONS START ##########") - o.SharedOptions.PrintRaw(cmd) + o.PersistentOptions.PrintRaw(cmd) o.PrintRaw(cmd) cmd.Println("########## RAW OPTIONS END ##########") } @@ -104,7 +257,7 @@ func (o *InitOptions) PrintCompleted(cmd *cobra.Command) {} func (o *InitOptions) PrintCompletedOptions(cmd *cobra.Command) { cmd.Println("########## COMPLETED OPTIONS START ##########") - o.SharedOptions.PrintCompleted(cmd) + o.PersistentOptions.PrintCompleted(cmd) o.PrintCompleted(cmd) cmd.Println("########## COMPLETED OPTIONS END ##########") } diff --git a/cmd/openmcp-operator/app/mcp/app.go b/cmd/openmcp-operator/app/mcp/app.go new file mode 100644 index 0000000..549f9af --- /dev/null +++ b/cmd/openmcp-operator/app/mcp/app.go @@ -0,0 +1,23 @@ +package mcp + +import ( + "context" + "os" + + "github.com/spf13/cobra" + + "github.com/openmcp-project/openmcp-operator/cmd/openmcp-operator/app/options" +) + +func NewMCPControllerSubcommand(ctx context.Context, po *options.PersistentOptions) *cobra.Command { + cmd := &cobra.Command{ + Use: "mcp", + Short: "Commands running the MCP controller", + } + cmd.SetOut(os.Stdout) + cmd.SetErr(os.Stderr) + cmd.AddCommand(NewInitCommand(po)) + cmd.AddCommand(NewRunCommand(po)) + + return cmd +} diff --git a/cmd/openmcp-operator/app/mcp/init.go b/cmd/openmcp-operator/app/mcp/init.go new file mode 100644 index 0000000..7783ca9 --- /dev/null +++ b/cmd/openmcp-operator/app/mcp/init.go @@ -0,0 +1,134 @@ +package mcp + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/spf13/cobra" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + + crdutil "github.com/openmcp-project/controller-utils/pkg/crds" + + clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" + apiconst "github.com/openmcp-project/openmcp-operator/api/constants" + "github.com/openmcp-project/openmcp-operator/api/crds" + "github.com/openmcp-project/openmcp-operator/api/install" + "github.com/openmcp-project/openmcp-operator/cmd/openmcp-operator/app/options" + "github.com/openmcp-project/openmcp-operator/internal/controllers/managedcontrolplane" + "github.com/openmcp-project/openmcp-operator/lib/clusteraccess" +) + +func NewInitCommand(po *options.PersistentOptions) *cobra.Command { + opts := &InitOptions{ + PersistentOptions: po, + } + cmd := &cobra.Command{ + Use: "init", + Short: "Initialize the MCP controller", + Run: func(cmd *cobra.Command, args []string) { + opts.PrintRawOptions(cmd) + if err := opts.Complete(cmd.Context()); err != nil { + panic(fmt.Errorf("error completing options: %w", err)) + } + opts.PrintCompletedOptions(cmd) + if opts.DryRun { + cmd.Println("=== END OF DRY RUN ===") + return + } + if err := opts.Run(cmd.Context()); err != nil { + panic(err) + } + }, + } + opts.AddFlags(cmd) + + return cmd +} + +type InitOptions struct { + *options.PersistentOptions +} + +func (o *InitOptions) AddFlags(cmd *cobra.Command) {} + +func (o *InitOptions) Complete(ctx context.Context) error { + if err := o.PersistentOptions.Complete(); err != nil { + return err + } + if o.ProviderName == "" { + return fmt.Errorf("provider-name must not be empty") + } + return nil +} + +func (o *InitOptions) Run(ctx context.Context) error { + if err := o.PlatformCluster.InitializeClient(install.InstallOperatorAPIsPlatform(runtime.NewScheme())); err != nil { + return err + } + + log := o.Log.WithName("main") + + log.Info("Getting access to the onboarding cluster") + onboardingScheme := runtime.NewScheme() + install.InstallCRDAPIs(onboardingScheme) + + providerSystemNamespace := os.Getenv(apiconst.EnvVariablePodNamespace) + if providerSystemNamespace == "" { + return fmt.Errorf("environment variable %s is not set", apiconst.EnvVariablePodNamespace) + } + + clusterAccessManager := clusteraccess.NewClusterAccessManager(o.PlatformCluster.Client(), managedcontrolplane.ControllerName, providerSystemNamespace) + clusterAccessManager.WithLogger(&log). + WithInterval(10 * time.Second). + WithTimeout(30 * time.Minute) + + onboardingCluster, err := clusterAccessManager.CreateAndWaitForCluster(ctx, clustersv1alpha1.PURPOSE_ONBOARDING+"-init", clustersv1alpha1.PURPOSE_ONBOARDING, + onboardingScheme, []clustersv1alpha1.PermissionsRequest{ + { + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"apiextensions.k8s.io"}, + Resources: []string{"customresourcedefinitions"}, + Verbs: []string{"*"}, + }, + }, + }, + }) + + if err != nil { + return fmt.Errorf("error creating/updating onboarding cluster: %w", err) + } + + crdManager := crdutil.NewCRDManager(apiconst.ClusterLabel, crds.CRDs) + + // deploy only onboarding CRDs here, because the openMCP operator already deployed the platform CRDs + crdManager.AddCRDLabelToClusterMapping(clustersv1alpha1.PURPOSE_ONBOARDING, onboardingCluster) + crdManager.SkipCRDsWithClusterLabel(clustersv1alpha1.PURPOSE_PLATFORM) + + if err := crdManager.CreateOrUpdateCRDs(ctx, &o.Log); err != nil { + return fmt.Errorf("error creating/updating CRDs: %w", err) + } + log.Info("Finished init command") + return nil +} + +func (o *InitOptions) PrintRaw(cmd *cobra.Command) {} + +func (o *InitOptions) PrintRawOptions(cmd *cobra.Command) { + cmd.Println("########## RAW OPTIONS START ##########") + o.PersistentOptions.PrintRaw(cmd) + o.PrintRaw(cmd) + cmd.Println("########## RAW OPTIONS END ##########") +} + +func (o *InitOptions) PrintCompleted(cmd *cobra.Command) {} + +func (o *InitOptions) PrintCompletedOptions(cmd *cobra.Command) { + cmd.Println("########## COMPLETED OPTIONS START ##########") + o.PersistentOptions.PrintCompleted(cmd) + o.PrintCompleted(cmd) + cmd.Println("########## COMPLETED OPTIONS END ##########") +} diff --git a/cmd/openmcp-operator/app/mcp/run.go b/cmd/openmcp-operator/app/mcp/run.go new file mode 100644 index 0000000..80448f3 --- /dev/null +++ b/cmd/openmcp-operator/app/mcp/run.go @@ -0,0 +1,332 @@ +package mcp + +import ( + "context" + "crypto/tls" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/spf13/cobra" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/yaml" + + "github.com/openmcp-project/controller-utils/pkg/logging" + + clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" + apiconst "github.com/openmcp-project/openmcp-operator/api/constants" + "github.com/openmcp-project/openmcp-operator/api/install" + "github.com/openmcp-project/openmcp-operator/cmd/openmcp-operator/app/options" + "github.com/openmcp-project/openmcp-operator/internal/controllers/managedcontrolplane" + "github.com/openmcp-project/openmcp-operator/lib/clusteraccess" +) + +var setupLog logging.Logger + +func NewRunCommand(po *options.PersistentOptions) *cobra.Command { + opts := &RunOptions{ + PersistentOptions: po, + } + cmd := &cobra.Command{ + Use: "run", + Short: "Run the openMCP Operator", + Run: func(cmd *cobra.Command, args []string) { + opts.PrintRawOptions(cmd) + if err := opts.Complete(cmd.Context()); err != nil { + panic(fmt.Errorf("error completing options: %w", err)) + } + opts.PrintCompletedOptions(cmd) + if opts.DryRun { + cmd.Println("=== END OF DRY RUN ===") + return + } + if err := opts.Run(cmd.Context()); err != nil { + panic(err) + } + }, + } + opts.AddFlags(cmd) + + return cmd +} + +func (o *RunOptions) AddFlags(cmd *cobra.Command) { + // kubebuilder default flags + cmd.Flags().StringVar(&o.MetricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") + cmd.Flags().StringVar(&o.ProbeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + cmd.Flags().StringVar(&o.PprofAddr, "pprof-bind-address", "", "The address the pprof endpoint binds to. Expected format is ':'. Leave empty to disable pprof endpoint.") + cmd.Flags().BoolVar(&o.EnableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + cmd.Flags().BoolVar(&o.SecureMetrics, "metrics-secure", true, "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") + cmd.Flags().StringVar(&o.WebhookCertPath, "webhook-cert-path", "", "The directory that contains the webhook certificate.") + cmd.Flags().StringVar(&o.WebhookCertName, "webhook-cert-name", "tls.crt", "The name of the webhook certificate file.") + cmd.Flags().StringVar(&o.WebhookCertKey, "webhook-cert-key", "tls.key", "The name of the webhook key file.") + cmd.Flags().StringVar(&o.MetricsCertPath, "metrics-cert-path", "", "The directory that contains the metrics server certificate.") + cmd.Flags().StringVar(&o.MetricsCertName, "metrics-cert-name", "tls.crt", "The name of the metrics server certificate file.") + cmd.Flags().StringVar(&o.MetricsCertKey, "metrics-cert-key", "tls.key", "The name of the metrics server key file.") + cmd.Flags().BoolVar(&o.EnableHTTP2, "enable-http2", false, "If set, HTTP/2 will be enabled for the metrics and webhook servers") +} + +type RawRunOptions struct { + // kubebuilder default flags + MetricsAddr string `json:"metrics-bind-address"` + MetricsCertPath string `json:"metrics-cert-path"` + MetricsCertName string `json:"metrics-cert-name"` + MetricsCertKey string `json:"metrics-cert-key"` + WebhookCertPath string `json:"webhook-cert-path"` + WebhookCertName string `json:"webhook-cert-name"` + WebhookCertKey string `json:"webhook-cert-key"` + EnableLeaderElection bool `json:"leader-elect"` + ProbeAddr string `json:"health-probe-bind-address"` + PprofAddr string `json:"pprof-bind-address"` + SecureMetrics bool `json:"metrics-secure"` + EnableHTTP2 bool `json:"enable-http2"` +} + +type RunOptions struct { + *options.PersistentOptions + RawRunOptions + + // fields filled in Complete() + ProviderGVKList []schema.GroupVersionKind + TLSOpts []func(*tls.Config) + WebhookTLSOpts []func(*tls.Config) + MetricsServerOptions metricsserver.Options + MetricsCertWatcher *certwatcher.CertWatcher + WebhookCertWatcher *certwatcher.CertWatcher +} + +func (o *RunOptions) PrintRaw(cmd *cobra.Command) { + data, err := yaml.Marshal(o.RawRunOptions) + if err != nil { + cmd.Println(fmt.Errorf("error marshalling raw options: %w", err).Error()) + return + } + cmd.Print(string(data)) +} + +func (o *RunOptions) PrintRawOptions(cmd *cobra.Command) { + cmd.Println("########## RAW OPTIONS START ##########") + o.PersistentOptions.PrintRaw(cmd) + o.PrintRaw(cmd) + cmd.Println("########## RAW OPTIONS END ##########") +} + +func (o *RunOptions) Complete(ctx context.Context) error { + if err := o.PersistentOptions.Complete(); err != nil { + return err + } + if o.ProviderName == "" { + return fmt.Errorf("provider-name must not be empty") + } + setupLog = o.Log.WithName("setup") + ctrl.SetLogger(o.Log.Logr()) + + // kubebuilder default stuff + + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + setupLog.Info("Disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + if !o.EnableHTTP2 { + o.TLSOpts = append(o.TLSOpts, disableHTTP2) + } + + // Initial webhook TLS options + o.WebhookTLSOpts = o.TLSOpts + + if len(o.WebhookCertPath) > 0 { + setupLog.Info("Initializing webhook certificate watcher using provided certificates", "webhook-cert-path", o.WebhookCertPath, "webhook-cert-name", o.WebhookCertName, "webhook-cert-key", o.WebhookCertKey) + + var err error + o.WebhookCertWatcher, err = certwatcher.New( + filepath.Join(o.WebhookCertPath, o.WebhookCertName), + filepath.Join(o.WebhookCertPath, o.WebhookCertKey), + ) + if err != nil { + return fmt.Errorf("failed to initialize webhook certificate watcher: %w", err) + } + + o.WebhookTLSOpts = append(o.WebhookTLSOpts, func(config *tls.Config) { + config.GetCertificate = o.WebhookCertWatcher.GetCertificate + }) + } + + // More info: + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.20.2/pkg/metrics/server + // - https://book.kubebuilder.io/reference/metrics.html + o.MetricsServerOptions = metricsserver.Options{ + BindAddress: o.MetricsAddr, + SecureServing: o.SecureMetrics, + TLSOpts: o.TLSOpts, + } + + if o.SecureMetrics { + // FilterProvider is used to protect the metrics endpoint with authn/authz. + // These configurations ensure that only authorized users and service accounts + // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.20.2/pkg/metrics/filters#WithAuthenticationAndAuthorization + o.MetricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization + } + + // If the certificate is not specified, controller-runtime will automatically + // generate self-signed certificates for the metrics server. While convenient for development and testing, + // this setup is not recommended for production. + // + // TODO(user): If you enable certManager, uncomment the following lines: + // - [METRICS-WITH-CERTS] at config/default/kustomization.yaml to generate and use certificates + // managed by cert-manager for the metrics server. + // - [PROMETHEUS-WITH-CERTS] at config/prometheus/kustomization.yaml for TLS certification. + if len(o.MetricsCertPath) > 0 { + setupLog.Info("Initializing metrics certificate watcher using provided certificates", "metrics-cert-path", o.MetricsCertPath, "metrics-cert-name", o.MetricsCertName, "metrics-cert-key", o.MetricsCertKey) + + var err error + o.MetricsCertWatcher, err = certwatcher.New( + filepath.Join(o.MetricsCertPath, o.MetricsCertName), + filepath.Join(o.MetricsCertPath, o.MetricsCertKey), + ) + if err != nil { + return fmt.Errorf("failed to initialize metrics certificate watcher: %w", err) + } + + o.MetricsServerOptions.TLSOpts = append(o.MetricsServerOptions.TLSOpts, func(config *tls.Config) { + config.GetCertificate = o.MetricsCertWatcher.GetCertificate + }) + } + + return nil +} + +func (o *RunOptions) PrintCompleted(cmd *cobra.Command) {} + +func (o *RunOptions) PrintCompletedOptions(cmd *cobra.Command) { + cmd.Println("########## COMPLETED OPTIONS START ##########") + o.PersistentOptions.PrintCompleted(cmd) + o.PrintCompleted(cmd) + cmd.Println("########## COMPLETED OPTIONS END ##########") +} + +func (o *RunOptions) Run(ctx context.Context) error { + if err := o.PlatformCluster.InitializeClient(install.InstallOperatorAPIsPlatform(runtime.NewScheme())); err != nil { + return err + } + + setupLog = o.Log.WithName("setup") + setupLog.Info("Environment", "value", o.Environment) + setupLog.Info("Provider name", "value", o.ProviderName) + + // get access to the onboarding cluster + setupLog.Info("Getting access to the onboarding cluster") + onboardingScheme := runtime.NewScheme() + install.InstallOperatorAPIsOnboarding(onboardingScheme) + + providerSystemNamespace := os.Getenv(apiconst.EnvVariablePodNamespace) + if providerSystemNamespace == "" { + return fmt.Errorf("environment variable %s is not set", apiconst.EnvVariablePodNamespace) + } + + clusterAccessManager := clusteraccess.NewClusterAccessManager(o.PlatformCluster.Client(), managedcontrolplane.ControllerName, providerSystemNamespace) + clusterAccessManager.WithLogger(&setupLog). + WithInterval(10 * time.Second). + WithTimeout(30 * time.Minute) + + onboardingCluster, err := clusterAccessManager.CreateAndWaitForCluster(ctx, clustersv1alpha1.PURPOSE_ONBOARDING, clustersv1alpha1.PURPOSE_ONBOARDING, + onboardingScheme, []clustersv1alpha1.PermissionsRequest{ + { + Rules: []rbacv1.PolicyRule{ + // It is hard to limit the permissions here, because the mcp controller needs to be able to fetch and delete all ServiceProvider-related resources. + // These could be discovered through the ServiceProvider resources, but then we would need a mechanism to restart the MCP controller every time a new ServiceProvider is created. + // For now, let's just go with full permissions to keep it simple. + { + APIGroups: []string{"*"}, + Resources: []string{"*"}, + Verbs: []string{"*"}, + }, + }, + }, + }) + + if err != nil { + return fmt.Errorf("error creating/updating onboarding cluster: %w", err) + } + + webhookServer := webhook.NewServer(webhook.Options{ + TLSOpts: o.WebhookTLSOpts, + }) + + mgr, err := ctrl.NewManager(onboardingCluster.RESTConfig(), ctrl.Options{ + Scheme: onboardingCluster.Scheme(), + Metrics: o.MetricsServerOptions, + WebhookServer: webhookServer, + HealthProbeBindAddress: o.ProbeAddr, + PprofBindAddress: o.PprofAddr, + LeaderElection: o.EnableLeaderElection, + LeaderElectionID: "github.com/openmcp-project/openmcp-operator--mcp-controller", + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + return fmt.Errorf("unable to create manager: %w", err) + } + + // setup MCP controller + mcpRec, err := managedcontrolplane.NewManagedControlPlaneReconciler(o.PlatformCluster, onboardingCluster, mgr.GetEventRecorderFor(managedcontrolplane.ControllerName), o.Config.ManagedControlPlane) + if err != nil { + return fmt.Errorf("unable to create managedcontrolplane controller: %w", err) + } + if err := mcpRec.SetupWithManager(mgr); err != nil { + return fmt.Errorf("unable to setup managedcontrolplane controller: %w", err) + } + + if o.MetricsCertWatcher != nil { + setupLog.Info("Adding metrics certificate watcher to manager") + if err := mgr.Add(o.MetricsCertWatcher); err != nil { + return fmt.Errorf("unable to add metrics certificate watcher to manager: %w", err) + } + } + + if o.WebhookCertWatcher != nil { + setupLog.Info("Adding webhook certificate watcher to manager") + if err := mgr.Add(o.WebhookCertWatcher); err != nil { + return fmt.Errorf("unable to add webhook certificate watcher to manager: %w", err) + } + } + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + return fmt.Errorf("unable to set up health check: %w", err) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + return fmt.Errorf("unable to set up ready check: %w", err) + } + + setupLog.Info("Starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + return fmt.Errorf("problem running manager: %w", err) + } + + return nil +} diff --git a/cmd/openmcp-operator/app/options/options.go b/cmd/openmcp-operator/app/options/options.go new file mode 100644 index 0000000..c1637c6 --- /dev/null +++ b/cmd/openmcp-operator/app/options/options.go @@ -0,0 +1,122 @@ +package options + +import ( + "fmt" + "os" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/yaml" + + "github.com/spf13/cobra" + + "github.com/openmcp-project/controller-utils/pkg/clusters" + "github.com/openmcp-project/controller-utils/pkg/logging" + + "github.com/openmcp-project/openmcp-operator/internal/config" +) + +func NewPersistentOptions() *PersistentOptions { + return &PersistentOptions{ + RawPersistentOptions: &RawPersistentOptions{}, + PlatformCluster: clusters.New("platform"), + } +} + +type RawPersistentOptions struct { + Environment string `json:"environment"` + ProviderName string `json:"provider-name"` + DryRun bool `json:"dry-run"` + ConfigPaths []string `json:"configPaths"` + PlatformClusterKubeconfigPath string `json:"kubeconfig"` // dummy for printing, actual path is in Clusters +} + +type PersistentOptions struct { + *RawPersistentOptions + PlatformCluster *clusters.Cluster + + // fields filled in Complete() + Log logging.Logger + Config *config.Config +} + +func (o *PersistentOptions) AddPersistentFlags(cmd *cobra.Command) { + // logging + logging.InitFlags(cmd.PersistentFlags()) + // clusters + o.PlatformCluster.RegisterSingleConfigPathFlag(cmd.PersistentFlags()) + // environment + cmd.PersistentFlags().StringVar(&o.Environment, "environment", "", "Environment name. Required. This is used to distinguish between different environments that are watching the same Onboarding cluster. Must be globally unique.") + cmd.PersistentFlags().StringVar(&o.ProviderName, "provider-name", "", "Provider name. Optional for the top-level run and init commands, where it can be used to override the default name for the generated MCP PlatformService. Required for the MCP controller subcommand, where it must match the provider name of the PlatformService in the Platform cluster.") + // config + cmd.PersistentFlags().StringSliceVar(&o.ConfigPaths, "config", nil, "Paths to the config files (separate with comma or specify flag multiple times). Each path can be a file or directory. In the latter case, all files within with '.yaml', '.yml', and '.json' extensions are evaluated. The config is merged together from the different sources, with later configs overriding earlier ones.") + // misc + cmd.PersistentFlags().BoolVar(&o.DryRun, "dry-run", false, "If set, the command aborts after evaluation of the given flags.") +} + +func (o *PersistentOptions) Complete() error { + if o.Environment == "" { + return fmt.Errorf("environment must not be empty") + } + config.SetEnvironment(o.Environment) + + // build logger + log, err := logging.GetLogger() + if err != nil { + return err + } + o.Log = log + ctrl.SetLogger(o.Log.Logr()) + + // construct cluster clients + if err := o.PlatformCluster.InitializeRESTConfig(); err != nil { + return err + } + + // load config + if len(o.ConfigPaths) > 0 { + cfg, err := config.LoadFromFiles(o.ConfigPaths...) + if err != nil { + return fmt.Errorf("error loading config from files: %w", err) + } + if err := cfg.Default(); err != nil { + _ = cfg.Dump(os.Stderr) + return fmt.Errorf("error defaulting config: %w", err) + } + if err := cfg.Validate(); err != nil { + _ = cfg.Dump(os.Stderr) + return fmt.Errorf("config is invalid: %w", err) + } + if err := cfg.Complete(); err != nil { + _ = cfg.Dump(os.Stderr) + return fmt.Errorf("error completing config: %w", err) + } + o.Config = cfg + } + + return nil +} + +func (o *PersistentOptions) PrintRaw(cmd *cobra.Command) { + // fill dummy paths + o.PlatformClusterKubeconfigPath = o.PlatformCluster.ConfigPath() + + data, err := yaml.Marshal(o.RawPersistentOptions) + if err != nil { + cmd.Println(fmt.Errorf("error marshalling raw shared options: %w", err).Error()) + return + } + cmd.Print(string(data)) +} + +func (o *PersistentOptions) PrintCompleted(cmd *cobra.Command) { + raw := map[string]any{ + "platformCluster": o.PlatformCluster.APIServerEndpoint(), + "config": o.Config, + } + data, err := yaml.Marshal(raw) + if err != nil { + cmd.Println(fmt.Errorf("error marshalling completed shared options: %w", err).Error()) + return + } + cmd.Print(string(data)) +} diff --git a/cmd/openmcp-operator/app/run.go b/cmd/openmcp-operator/app/run.go index 94740e8..1f46210 100644 --- a/cmd/openmcp-operator/app/run.go +++ b/cmd/openmcp-operator/app/run.go @@ -25,6 +25,7 @@ import ( "github.com/openmcp-project/openmcp-operator/api/install" "github.com/openmcp-project/openmcp-operator/api/provider/v1alpha1" + "github.com/openmcp-project/openmcp-operator/cmd/openmcp-operator/app/options" "github.com/openmcp-project/openmcp-operator/internal/config" "github.com/openmcp-project/openmcp-operator/internal/controllers/accessrequest" "github.com/openmcp-project/openmcp-operator/internal/controllers/provider" @@ -38,9 +39,9 @@ var allControllers = []string{ strings.ToLower(accessrequest.ControllerName), } -func NewRunCommand(so *SharedOptions) *cobra.Command { +func NewRunCommand(po *options.PersistentOptions) *cobra.Command { opts := &RunOptions{ - SharedOptions: so, + PersistentOptions: po, } cmd := &cobra.Command{ Use: "run", @@ -103,7 +104,7 @@ type RawRunOptions struct { } type RunOptions struct { - *SharedOptions + *options.PersistentOptions RawRunOptions // fields filled in Complete() @@ -126,13 +127,13 @@ func (o *RunOptions) PrintRaw(cmd *cobra.Command) { func (o *RunOptions) PrintRawOptions(cmd *cobra.Command) { cmd.Println("########## RAW OPTIONS START ##########") - o.SharedOptions.PrintRaw(cmd) + o.PersistentOptions.PrintRaw(cmd) o.PrintRaw(cmd) cmd.Println("########## RAW OPTIONS END ##########") } func (o *RunOptions) Complete(ctx context.Context) error { - if err := o.SharedOptions.Complete(); err != nil { + if err := o.PersistentOptions.Complete(); err != nil { return err } setupLog = o.Log.WithName("setup") @@ -243,13 +244,13 @@ func (o *RunOptions) PrintCompleted(cmd *cobra.Command) { func (o *RunOptions) PrintCompletedOptions(cmd *cobra.Command) { cmd.Println("########## COMPLETED OPTIONS START ##########") - o.SharedOptions.PrintCompleted(cmd) + o.PersistentOptions.PrintCompleted(cmd) o.PrintCompleted(cmd) cmd.Println("########## COMPLETED OPTIONS END ##########") } func (o *RunOptions) Run(ctx context.Context) error { - if err := o.PlatformCluster.InitializeClient(install.InstallOperatorAPIs(runtime.NewScheme())); err != nil { + if err := o.PlatformCluster.InitializeClient(install.InstallOperatorAPIsPlatform(runtime.NewScheme())); err != nil { return err } @@ -261,7 +262,7 @@ func (o *RunOptions) Run(ctx context.Context) error { }) mgr, err := ctrl.NewManager(o.PlatformCluster.RESTConfig(), ctrl.Options{ - Scheme: install.InstallOperatorAPIs(runtime.NewScheme()), + Scheme: install.InstallOperatorAPIsPlatform(runtime.NewScheme()), Metrics: o.MetricsServerOptions, WebhookServer: webhookServer, HealthProbeBindAddress: o.ProbeAddr, diff --git a/go.mod b/go.mod index 60ab79e..aee05ae 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,10 @@ go 1.25.0 //godebug default=go1.23 -replace github.com/openmcp-project/openmcp-operator/api => ./api +replace ( + github.com/openmcp-project/openmcp-operator/api => ./api + github.com/openmcp-project/openmcp-operator/lib => ./lib +) require ( dario.cat/mergo v1.0.2 @@ -12,6 +15,7 @@ require ( github.com/onsi/gomega v1.38.2 github.com/openmcp-project/controller-utils v0.19.0 github.com/openmcp-project/openmcp-operator/api v0.12.0 + github.com/openmcp-project/openmcp-operator/lib v0.12.0 github.com/spf13/cobra v1.9.1 k8s.io/api v0.34.0 k8s.io/apimachinery v0.34.0 diff --git a/internal/config/config.go b/internal/config/config.go index cbc114d..8b96eb0 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -29,6 +29,9 @@ type Config struct { // AccessRequest is the configuration for the access request controller. AccessRequest *AccessRequestConfig `json:"accessRequest,omitempty"` + + // ManagedControlPlane is the configuration for the MCP controller. + ManagedControlPlane *ManagedControlPlaneConfig `json:"managedControlPlane,omitempty"` } // Dump is used for logging and debugging purposes. diff --git a/internal/config/config_accessrequest.go b/internal/config/config_accessrequest.go index 6f4337f..26c1979 100644 --- a/internal/config/config_accessrequest.go +++ b/internal/config/config_accessrequest.go @@ -6,6 +6,9 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" ) +var _ Validatable = &AccessRequestConfig{} +var _ Completable = &AccessRequestConfig{} + type AccessRequestConfig struct { // If set, only AccessRequests that match the selector will be reconciled. Selector *Selector `json:"selector,omitempty"` diff --git a/internal/config/config_managedcontrolplane.go b/internal/config/config_managedcontrolplane.go new file mode 100644 index 0000000..9819ee6 --- /dev/null +++ b/internal/config/config_managedcontrolplane.go @@ -0,0 +1,63 @@ +package config + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/validation/field" + + commonapi "github.com/openmcp-project/openmcp-operator/api/common" + corev2alpha1 "github.com/openmcp-project/openmcp-operator/api/core/v2alpha1" +) + +var _ Defaultable = &ManagedControlPlaneConfig{} +var _ Validatable = &ManagedControlPlaneConfig{} + +type ManagedControlPlaneConfig struct { + // MCPClusterPurpose is the purpose that is used for ClusterRequests created for ManagedControlPlane resources. + MCPClusterPurpose string `json:"mcpClusterPurpose"` + + // DefaultOIDCProvider is the standard OIDC provider that is enabled for all ManagedControlPlane resources, unless explicitly disabled. + // If nil, no standard OIDC provider will be used. + DefaultOIDCProvider *commonapi.OIDCProviderConfig `json:"defaultOIDCProvider,omitempty"` + + // ReconcileMCPEveryXDays specifies after how many days an MCP should be reconciled. + // This is useful if the AccessRequests created by the MCP use an expiring authentication method and the MCP needs to refresh the access regularly. + // A value of 0 disables the periodic reconciliation. + // +optional + ReconcileMCPEveryXDays int `json:"reconcileMCPEveryXDays,omitempty"` +} + +func (c *ManagedControlPlaneConfig) Default(_ *field.Path) error { + if c.DefaultOIDCProvider != nil { + c.DefaultOIDCProvider.Default() + if c.DefaultOIDCProvider.Name == "" { + c.DefaultOIDCProvider.Name = corev2alpha1.DefaultOIDCProviderName + } + } + if c.MCPClusterPurpose == "" { + c.MCPClusterPurpose = corev2alpha1.DefaultMCPClusterPurpose + } + return nil +} + +func (c *ManagedControlPlaneConfig) Validate(fldPath *field.Path) error { + errs := field.ErrorList{} + + if c.MCPClusterPurpose == "" { + errs = append(errs, field.Required(fldPath.Child("mcpClusterPurpose"), "MCP cluster purpose must be set")) + } + if c.ReconcileMCPEveryXDays < 0 { + errs = append(errs, field.Invalid(fldPath.Child("reconcileMCPEveryXDays"), c.ReconcileMCPEveryXDays, "reconcile interval must be 0 or greater")) + } + if c.DefaultOIDCProvider != nil { + oidcFldPath := fldPath.Child("defaultOIDCProvider") + if len(c.DefaultOIDCProvider.RoleBindings) > 0 { + errs = append(errs, field.Forbidden(oidcFldPath.Child("roleBindings"), "role bindings are specified in the MCP spec and may not be set in the config")) + } + if c.DefaultOIDCProvider.Name != "" && c.DefaultOIDCProvider.Name != corev2alpha1.DefaultOIDCProviderName { + errs = append(errs, field.Invalid(oidcFldPath.Child("name"), c.DefaultOIDCProvider.Name, fmt.Sprintf("standard OIDC provider name must be '%s' or left empty (in which case it will be defaulted)", corev2alpha1.DefaultOIDCProviderName))) + } + } + + return errs.ToAggregate() +} diff --git a/internal/config/config_scheduler.go b/internal/config/config_scheduler.go index 6bba750..a699ceb 100644 --- a/internal/config/config_scheduler.go +++ b/internal/config/config_scheduler.go @@ -11,6 +11,10 @@ import ( clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" ) +var _ Defaultable = &SchedulerConfig{} +var _ Validatable = &SchedulerConfig{} +var _ Completable = &SchedulerConfig{} + type SchedulerConfig struct { // Scope determines whether the scheduler considers all clusters or only the ones in the same namespace as the ClusterRequest. // Defaults to "Namespaced". diff --git a/internal/controllers/accessrequest/controller_test.go b/internal/controllers/accessrequest/controller_test.go index 5c5ff11..3c6dab0 100644 --- a/internal/controllers/accessrequest/controller_test.go +++ b/internal/controllers/accessrequest/controller_test.go @@ -18,7 +18,7 @@ import ( "github.com/openmcp-project/openmcp-operator/internal/controllers/accessrequest" ) -var scheme = install.InstallOperatorAPIs(runtime.NewScheme()) +var scheme = install.InstallOperatorAPIsPlatform(runtime.NewScheme()) func arReconciler(c client.Client) reconcile.Reconciler { return accessrequest.NewAccessRequestReconciler(clusters.NewTestClusterFromClient("platform", c), nil) diff --git a/internal/controllers/managedcontrolplane/access.go b/internal/controllers/managedcontrolplane/access.go new file mode 100644 index 0000000..5db8f9c --- /dev/null +++ b/internal/controllers/managedcontrolplane/access.go @@ -0,0 +1,304 @@ +package managedcontrolplane + +import ( + "context" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/openmcp-project/controller-utils/pkg/collections" + ctrlutils "github.com/openmcp-project/controller-utils/pkg/controller" + errutils "github.com/openmcp-project/controller-utils/pkg/errors" + "github.com/openmcp-project/controller-utils/pkg/logging" + + clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" + cconst "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1/constants" + commonapi "github.com/openmcp-project/openmcp-operator/api/common" + apiconst "github.com/openmcp-project/openmcp-operator/api/constants" + corev2alpha1 "github.com/openmcp-project/openmcp-operator/api/core/v2alpha1" +) + +// manageAccessRequests aligns the existing AccessRequests for the MCP with the currently configured OIDC providers. +// It uses the given createCon function to create conditions for AccessRequests and returns a set of conditions that should be removed from the MCP status. +// The bool return value specifies whether everything related to MCP access is in the desired state or not. If 'false', it is recommended to requeue the MCP. +func (r *ManagedControlPlaneReconciler) manageAccessRequests(ctx context.Context, mcp *corev2alpha1.ManagedControlPlaneV2, platformNamespace string, cr *clustersv1alpha1.ClusterRequest, createCon func(conType string, status metav1.ConditionStatus, reason, message string)) (bool, sets.Set[string], errutils.ReasonableError) { + updatedAccessRequests, rerr := r.createOrUpdateDesiredAccessRequests(ctx, mcp, platformNamespace, cr, createCon) + if rerr != nil { + return false, nil, rerr + } + + accessRequestsInDeletion, rerr := r.deleteUndesiredAccessRequests(ctx, mcp, platformNamespace, updatedAccessRequests, createCon) + if rerr != nil { + return false, nil, rerr + } + + allAccessReady, rerr := r.syncAccessSecrets(ctx, mcp, updatedAccessRequests, createCon) + if rerr != nil { + return false, nil, rerr + } + + accessSecretsInDeletion, rerr := r.deleteUndesiredAccessSecrets(ctx, mcp, updatedAccessRequests, createCon) + if rerr != nil { + return false, nil, rerr + } + + // remove conditions for AccessRequests that are neither required nor in deletion (= have been deleted already) + // first, build a set of OIDC provider names that have a condition in the MCP status + removeConditions := collections.AggregateSlice(mcp.Status.Conditions, func(con metav1.Condition, cur sets.Set[string]) sets.Set[string] { + if providerName, found := strings.CutPrefix(con.Type, corev2alpha1.ConditionPrefixOIDCAccessReady); found { + cur.Insert(providerName) + } + return cur + }, sets.New[string]()) + // then, remove all conditions from it which belong to updated AccessRequests + removeConditions = removeConditions.Difference(sets.KeySet(updatedAccessRequests)) + // and all conditions that are in deletion + removeConditions = removeConditions.Difference(accessRequestsInDeletion) + // now, add the condition prefix again + removeConditions = collections.ProjectMapToMap(removeConditions, func(providerName string, _ sets.Empty) (string, sets.Empty) { + return corev2alpha1.ConditionPrefixOIDCAccessReady + providerName, sets.Empty{} + }) + + everythingReady := accessRequestsInDeletion.Len() == 0 && accessSecretsInDeletion.Len() == 0 && allAccessReady + if everythingReady { + createCon(corev2alpha1.ConditionAllAccessReady, metav1.ConditionTrue, "", "All accesses are ready") + } else { + reason := cconst.ReasonWaitingForAccessRequest + if allAccessReady { + reason = cconst.ReasonWaitingForAccessRequestDeletion + } + createCon(corev2alpha1.ConditionAllAccessReady, metav1.ConditionFalse, reason, "Not all accesses are ready") + } + + return everythingReady, removeConditions, nil +} + +// createOrUpdateDesiredAccessRequests creates/updates all AccessRequests that are desired according to the ManagedControlPlane's configured OIDC providers. +// It returns a mapping from OIDC provider names to the corresponding AccessRequests. +// If the ManagedControlPlane has a non-zero DeletionTimestamp, no AccessRequests will be created or updated and the returned map will be empty. +func (r *ManagedControlPlaneReconciler) createOrUpdateDesiredAccessRequests(ctx context.Context, mcp *corev2alpha1.ManagedControlPlaneV2, platformNamespace string, cr *clustersv1alpha1.ClusterRequest, createCon func(conType string, status metav1.ConditionStatus, reason, message string)) (map[string]*clustersv1alpha1.AccessRequest, errutils.ReasonableError) { + log := logging.FromContextOrPanic(ctx) + + updatedAccessRequests := map[string]*clustersv1alpha1.AccessRequest{} + var oidcProviders []*commonapi.OIDCProviderConfig + + // create or update AccessRequests for the ManagedControlPlane + if mcp.DeletionTimestamp.IsZero() { + oidcProviders = make([]*commonapi.OIDCProviderConfig, 0, len(mcp.Spec.IAM.OIDCProviders)+1) + if r.Config.DefaultOIDCProvider != nil && len(mcp.Spec.IAM.RoleBindings) > 0 { + // add default OIDC provider, unless it has been disabled + defaultOidc := r.Config.DefaultOIDCProvider.DeepCopy() + defaultOidc.Name = corev2alpha1.DefaultOIDCProviderName + defaultOidc.RoleBindings = mcp.Spec.IAM.RoleBindings + oidcProviders = append(oidcProviders, defaultOidc) + } + oidcProviders = append(oidcProviders, mcp.Spec.IAM.OIDCProviders...) + } + + for _, oidc := range oidcProviders { + log.Debug("Creating/updating AccessRequest for OIDC provider", "oidcProviderName", oidc.Name) + arName := ctrlutils.K8sNameUUIDUnsafe(mcp.Name, oidc.Name) + ar := &clustersv1alpha1.AccessRequest{} + ar.Name = arName + ar.Namespace = platformNamespace + if _, err := controllerutil.CreateOrUpdate(ctx, r.PlatformCluster.Client(), ar, func() error { + ar.Spec.RequestRef = &commonapi.ObjectReference{ + Name: cr.Name, + Namespace: cr.Namespace, + } + ar.Spec.OIDC = &clustersv1alpha1.OIDCConfig{ + OIDCProviderConfig: *oidc, + } + + // set labels + if ar.Labels == nil { + ar.Labels = map[string]string{} + } + ar.Labels[corev2alpha1.MCPNameLabel] = mcp.Name + ar.Labels[corev2alpha1.MCPNamespaceLabel] = mcp.Namespace + ar.Labels[apiconst.ManagedByLabel] = ControllerName + ar.Labels[corev2alpha1.OIDCProviderLabel] = oidc.Name + + return nil + }); err != nil { + rerr := errutils.WithReason(fmt.Errorf("error creating/updating AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err), cconst.ReasonPlatformClusterInteractionProblem) + createCon(corev2alpha1.ConditionPrefixOIDCAccessReady+oidc.Name, metav1.ConditionFalse, rerr.Reason(), rerr.Error()) + createCon(corev2alpha1.ConditionAllAccessReady, metav1.ConditionFalse, cconst.ReasonWaitingForAccessRequest, "Error creating/updating AccessRequest for OIDC provider "+oidc.Name) + return nil, rerr + } + updatedAccessRequests[oidc.Name] = ar + } + + return updatedAccessRequests, nil +} + +// deleteUndesiredAccessRequests deletes all AccessRequests that belong to the given ManagedControlPlane, but are not in the updatedAccessRequests map. +// These are AccessRequests that have been created for a previous version of the ManagedControlPlane and are not needed anymore. +// It returns a set of OIDC provider names for which the AccessRequests are still in deletion. If the set is empty, all undesired AccessRequests have been deleted. +func (r *ManagedControlPlaneReconciler) deleteUndesiredAccessRequests(ctx context.Context, mcp *corev2alpha1.ManagedControlPlaneV2, platformNamespace string, updatedAccessRequests map[string]*clustersv1alpha1.AccessRequest, createCon func(conType string, status metav1.ConditionStatus, reason, message string)) (sets.Set[string], errutils.ReasonableError) { + log := logging.FromContextOrPanic(ctx) + + accessRequestsInDeletion := sets.New[string]() + + // delete all AccessRequests that have previously been created for this ManagedControlPlane but are not needed anymore + oidcARs := &clustersv1alpha1.AccessRequestList{} + if err := r.PlatformCluster.Client().List(ctx, oidcARs, client.InNamespace(platformNamespace), client.HasLabels{corev2alpha1.OIDCProviderLabel}, client.MatchingLabels{ + corev2alpha1.MCPNameLabel: mcp.Name, + corev2alpha1.MCPNamespaceLabel: mcp.Namespace, + apiconst.ManagedByLabel: ControllerName, + }); err != nil { + rerr := errutils.WithReason(fmt.Errorf("error listing AccessRequests for ManagedControlPlane '%s/%s': %w", mcp.Namespace, mcp.Name, err), cconst.ReasonPlatformClusterInteractionProblem) + createCon(corev2alpha1.ConditionAllAccessReady, metav1.ConditionFalse, rerr.Reason(), rerr.Error()) + return accessRequestsInDeletion, rerr + } + errs := errutils.NewReasonableErrorList() + for _, ar := range oidcARs.Items { + if _, ok := updatedAccessRequests[ar.Spec.OIDC.Name]; ok { + continue + } + providerName := "" + if ar.Spec.OIDC != nil { + providerName = ar.Spec.OIDC.Name + } + accessRequestsInDeletion.Insert(providerName) + if !ar.DeletionTimestamp.IsZero() { + log.Debug("Waiting for deletion of AccessRequest that is no longer required", "accessRequestName", ar.Name, "accessRequestNamespace", ar.Namespace, "oidcProviderName", providerName) + createCon(corev2alpha1.ConditionPrefixOIDCAccessReady+providerName, metav1.ConditionFalse, cconst.ReasonWaitingForAccessRequestDeletion, "AccessRequest is being deleted") + continue + } + log.Debug("Deleting AccessRequest that is no longer needed", "accessRequestName", ar.Name, "accessRequestNamespace", ar.Namespace, "oidcProviderName", providerName) + if err := r.PlatformCluster.Client().Delete(ctx, &ar); client.IgnoreNotFound(err) != nil { + rerr := errutils.WithReason(fmt.Errorf("error deleting AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err), cconst.ReasonPlatformClusterInteractionProblem) + errs.Append(rerr) + createCon(corev2alpha1.ConditionPrefixOIDCAccessReady+providerName, metav1.ConditionFalse, rerr.Reason(), rerr.Error()) + } + createCon(corev2alpha1.ConditionPrefixOIDCAccessReady+providerName, metav1.ConditionFalse, cconst.ReasonWaitingForAccessRequestDeletion, "AccessRequest is being deleted") + } + if rerr := errs.Aggregate(); rerr != nil { + createCon(corev2alpha1.ConditionAllAccessReady, metav1.ConditionFalse, cconst.ReasonWaitingForAccessRequestDeletion, "Error deleting AccessRequests that are no longer needed") + return accessRequestsInDeletion, rerr + } + + return accessRequestsInDeletion, nil +} + +// deleteUndesiredAccessSecrets deletes all access secrets belonging to the ManagedControlPlane that are not copied from an up-to-date AccessRequest. +// It also deletes all mappings for which no secret exists from the ManagedControlPlane status. +// It returns a set of OIDC provider names for which the AccessRequest secrets are still in deletion. +func (r *ManagedControlPlaneReconciler) deleteUndesiredAccessSecrets(ctx context.Context, mcp *corev2alpha1.ManagedControlPlaneV2, updatedAccessRequests map[string]*clustersv1alpha1.AccessRequest, createCon func(conType string, status metav1.ConditionStatus, reason, message string)) (sets.Set[string], errutils.ReasonableError) { + log := logging.FromContextOrPanic(ctx) + + accessSecretsInDeletion := sets.New[string]() + + // delete all AccessRequest secrets that have been copied to the Onboarding cluster and belong to AccessRequests that are no longer needed + mcpSecrets := &corev1.SecretList{} + if err := r.OnboardingCluster.Client().List(ctx, mcpSecrets, client.InNamespace(mcp.Namespace), client.HasLabels{corev2alpha1.OIDCProviderLabel}, client.MatchingLabels{ + corev2alpha1.MCPNameLabel: mcp.Name, + corev2alpha1.MCPNamespaceLabel: mcp.Namespace, + apiconst.ManagedByLabel: ControllerName, + }); err != nil { + rerr := errutils.WithReason(fmt.Errorf("error listing secrets for ManagedControlPlane '%s/%s': %w", mcp.Namespace, mcp.Name, err), cconst.ReasonOnboardingClusterInteractionProblem) + createCon(corev2alpha1.ConditionAllAccessReady, metav1.ConditionFalse, rerr.Reason(), rerr.Error()) + return accessSecretsInDeletion, rerr + } + + errs := errutils.NewReasonableErrorList() + for _, mcpSecret := range mcpSecrets.Items { + providerName := mcpSecret.Labels[corev2alpha1.OIDCProviderLabel] + if providerName == "" { + log.Error(nil, "Secret for ManagedControlPlane has an empty OIDCProvider label, this should not happen", "secretName", mcpSecret.Name, "secretNamespace", mcpSecret.Namespace) + continue + } + if _, ok := updatedAccessRequests[providerName]; ok { + continue + } + accessSecretsInDeletion.Insert(providerName) + if !mcpSecret.DeletionTimestamp.IsZero() { + log.Debug("Waiting for deletion of access secret that is no longer required", "secretName", mcpSecret.Name, "secretNamespace", mcpSecret.Namespace, "oidcProviderName", providerName) + createCon(corev2alpha1.ConditionPrefixOIDCAccessReady+providerName, metav1.ConditionFalse, cconst.ReasonWaitingForAccessRequestDeletion, "AccessRequest secret is being deleted") + continue + } + log.Debug("Deleting access secret that is no longer required", "secretName", mcpSecret.Name, "secretNamespace", mcpSecret.Namespace, "oidcProviderName", providerName) + if err := r.OnboardingCluster.Client().Delete(ctx, &mcpSecret); client.IgnoreNotFound(err) != nil { + rerr := errutils.WithReason(fmt.Errorf("error deleting access secret '%s/%s': %w", mcpSecret.Namespace, mcpSecret.Name, err), cconst.ReasonOnboardingClusterInteractionProblem) + errs.Append(rerr) + createCon(corev2alpha1.ConditionPrefixOIDCAccessReady+providerName, metav1.ConditionFalse, rerr.Reason(), rerr.Error()) + } + createCon(corev2alpha1.ConditionPrefixOIDCAccessReady+providerName, metav1.ConditionFalse, cconst.ReasonWaitingForAccessRequestDeletion, "access secret is being deleted") + } + if rerr := errs.Aggregate(); rerr != nil { + createCon(corev2alpha1.ConditionAllAccessReady, metav1.ConditionFalse, cconst.ReasonWaitingForAccessRequestDeletion, "Error deleting access secrets that are no longer needed") + return accessSecretsInDeletion, rerr + } + + // delete all references to access secrets that are being deleted from the ManagedControlPlane status + for providerName := range accessSecretsInDeletion { + delete(mcp.Status.Access, providerName) + } + + return accessSecretsInDeletion, nil +} + +// syncAccessSecrets checks if all AccessRequests belonging to the ManagedControlPlane are ready and copies their secrets to the Onboarding cluster and references them in the ManagedControlPlane status. +// It returns a boolean indicating whether all AccessRequests are ready and their secrets have been copied successfully (true) or not (false). +func (r *ManagedControlPlaneReconciler) syncAccessSecrets(ctx context.Context, mcp *corev2alpha1.ManagedControlPlaneV2, updatedAccessRequests map[string]*clustersv1alpha1.AccessRequest, createCon func(conType string, status metav1.ConditionStatus, reason, message string)) (bool, errutils.ReasonableError) { + log := logging.FromContextOrPanic(ctx) + + allAccessReady := true + if mcp.Status.Access == nil { + mcp.Status.Access = map[string]commonapi.LocalObjectReference{} + } + for providerName, ar := range updatedAccessRequests { + if !ar.Status.IsGranted() || ar.Status.SecretRef == nil { + log.Debug("AccessRequest is not ready yet", "accessRequestName", ar.Name, "accessRequestNamespace", ar.Namespace, "oidcProviderName", providerName) + createCon(corev2alpha1.ConditionPrefixOIDCAccessReady+providerName, metav1.ConditionFalse, cconst.ReasonWaitingForAccessRequest, "AccessRequest is not ready yet") + allAccessReady = false + } else { + // copy access request secret and reference it in the ManagedControlPlane status + arSecret := &corev1.Secret{} + arSecret.Name = ar.Status.SecretRef.Name + arSecret.Namespace = ar.Namespace + if err := r.PlatformCluster.Client().Get(ctx, client.ObjectKeyFromObject(arSecret), arSecret); err != nil { + rerr := errutils.WithReason(fmt.Errorf("error getting AccessRequest secret '%s/%s': %w", arSecret.Namespace, arSecret.Name, err), cconst.ReasonPlatformClusterInteractionProblem) + createCon(corev2alpha1.ConditionPrefixOIDCAccessReady+providerName, metav1.ConditionFalse, rerr.Reason(), rerr.Error()) + createCon(corev2alpha1.ConditionAllAccessReady, metav1.ConditionFalse, cconst.ReasonWaitingForAccessRequest, "Error getting AccessRequest secret for OIDC provider "+providerName) + return false, rerr + } + mcpSecret := &corev1.Secret{} + mcpSecret.Name = ctrlutils.K8sNameUUIDUnsafe(mcp.Name, providerName) + mcpSecret.Namespace = mcp.Namespace + if _, err := controllerutil.CreateOrUpdate(ctx, r.OnboardingCluster.Client(), mcpSecret, func() error { + mcpSecret.Data = arSecret.Data + if mcpSecret.Labels == nil { + mcpSecret.Labels = map[string]string{} + } + mcpSecret.Labels[corev2alpha1.MCPNameLabel] = mcp.Name + mcpSecret.Labels[corev2alpha1.MCPNamespaceLabel] = mcp.Namespace + mcpSecret.Labels[corev2alpha1.OIDCProviderLabel] = providerName + mcpSecret.Labels[apiconst.ManagedByLabel] = ControllerName + + if err := controllerutil.SetControllerReference(mcp, mcpSecret, r.OnboardingCluster.Scheme()); err != nil { + return err + } + return nil + }); err != nil { + rerr := errutils.WithReason(fmt.Errorf("error creating/updating AccessRequest secret '%s/%s': %w", mcpSecret.Namespace, mcpSecret.Name, err), cconst.ReasonOnboardingClusterInteractionProblem) + createCon(corev2alpha1.ConditionPrefixOIDCAccessReady+providerName, metav1.ConditionFalse, rerr.Reason(), rerr.Error()) + createCon(corev2alpha1.ConditionAllAccessReady, metav1.ConditionFalse, cconst.ReasonWaitingForAccessRequest, "Error creating/updating AccessRequest secret for OIDC provider "+providerName) + return false, rerr + } + log.Debug("Access secret for ManagedControlPlane created/updated", "secretName", mcpSecret.Name, "oidcProviderName", providerName) + mcp.Status.Access[providerName] = commonapi.LocalObjectReference{ + Name: mcpSecret.Name, + } + createCon(corev2alpha1.ConditionPrefixOIDCAccessReady+providerName, metav1.ConditionTrue, "", "") + } + } + + return allAccessReady, nil +} diff --git a/internal/controllers/managedcontrolplane/clusters.go b/internal/controllers/managedcontrolplane/clusters.go new file mode 100644 index 0000000..590f48d --- /dev/null +++ b/internal/controllers/managedcontrolplane/clusters.go @@ -0,0 +1,91 @@ +package managedcontrolplane + +import ( + "context" + "fmt" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" + + errutils "github.com/openmcp-project/controller-utils/pkg/errors" + "github.com/openmcp-project/controller-utils/pkg/logging" + + clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" + cconst "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1/constants" + corev2alpha1 "github.com/openmcp-project/openmcp-operator/api/core/v2alpha1" +) + +func (r *ManagedControlPlaneReconciler) deleteRelatedClusterRequests(ctx context.Context, mcp *corev2alpha1.ManagedControlPlaneV2, platformNamespace string) (sets.Set[string], errutils.ReasonableError) { + log := logging.FromContextOrPanic(ctx) + + // delete depending cluster requests, if any + crNames := sets.New[string]() + + if mcp == nil { + log.Debug("MCP is nil, no need to check for cluster requests") + return crNames, nil + } + + // identify cluster request finalizers + for _, fin := range mcp.Finalizers { + if crName, ok := strings.CutPrefix(fin, corev2alpha1.ClusterRequestFinalizerPrefix); ok { + crNames.Insert(crName) + } + } + + if crNames.Len() == 0 { + log.Debug("No cluster request finalizers found on MCP") + return crNames, nil + } + + // fetch cluster requests, if any exist + resources := map[string]*clustersv1alpha1.ClusterRequest{} + errs := errutils.NewReasonableErrorList() + for crName := range crNames { + cr := &clustersv1alpha1.ClusterRequest{} + cr.SetName(crName) + cr.SetNamespace(platformNamespace) + if err := r.PlatformCluster.Client().Get(ctx, client.ObjectKeyFromObject(cr), cr); err != nil { + if !apierrors.IsNotFound(err) { + errs.Append(errutils.WithReason(fmt.Errorf("error getting ClusterRequest '%s/%s': %w", platformNamespace, crName, err), cconst.ReasonPlatformClusterInteractionProblem)) + } + continue + } + resources[crName] = cr + } + if rerr := errs.Aggregate(); rerr != nil { + return sets.KeySet(resources), rerr + } + + // delete cluster requests + errs = errutils.NewReasonableErrorList() + for crName, cr := range resources { + if crName == mcp.Name && len(resources) > 1 { + // skip the MCP's main ClusterRequest for now + // we want to make sure that all other ClusterRequests are deleted first + // in case the corresponding clusters are hosting resources that depend on the MCP cluster + log.Debug("Skipping deletion of MCP's primary ClusterRequest, because there are other ClusterRequests to delete first", "crName", crName, "namespace", cr.GetNamespace()) + continue + } + if !cr.GetDeletionTimestamp().IsZero() { + log.Debug("ClusterRequest resource already marked for deletion", "crName", crName, "namespace", cr.GetNamespace()) + continue + } + log.Info("Deleting ClusterRequest", "crName", crName, "namespace", cr.GetNamespace()) + if err := r.PlatformCluster.Client().Delete(ctx, cr); err != nil { + if !apierrors.IsNotFound(err) { + errs.Append(errutils.WithReason(fmt.Errorf("error deleting ClusterRequest '%s/%s': %w", platformNamespace, crName, err), cconst.ReasonPlatformClusterInteractionProblem)) + } else { + log.Debug("ClusterRequest not found during deletion", "crName", crName, "namespace", cr.GetNamespace()) + delete(resources, crName) // remove from resources if not found + } + } + } + if rerr := errs.Aggregate(); rerr != nil { + return sets.KeySet(resources), rerr + } + + return sets.KeySet(resources), nil +} diff --git a/internal/controllers/managedcontrolplane/controller.go b/internal/controllers/managedcontrolplane/controller.go new file mode 100644 index 0000000..4cc3c1d --- /dev/null +++ b/internal/controllers/managedcontrolplane/controller.go @@ -0,0 +1,416 @@ +package managedcontrolplane + +import ( + "context" + "fmt" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/openmcp-project/controller-utils/pkg/clusteraccess" + "github.com/openmcp-project/controller-utils/pkg/clusters" + "github.com/openmcp-project/controller-utils/pkg/collections" + "github.com/openmcp-project/controller-utils/pkg/collections/filters" + "github.com/openmcp-project/controller-utils/pkg/conditions" + ctrlutils "github.com/openmcp-project/controller-utils/pkg/controller" + "github.com/openmcp-project/controller-utils/pkg/controller/smartrequeue" + errutils "github.com/openmcp-project/controller-utils/pkg/errors" + "github.com/openmcp-project/controller-utils/pkg/logging" + "github.com/openmcp-project/controller-utils/pkg/pairs" + + clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" + cconst "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1/constants" + commonapi "github.com/openmcp-project/openmcp-operator/api/common" + apiconst "github.com/openmcp-project/openmcp-operator/api/constants" + corev2alpha1 "github.com/openmcp-project/openmcp-operator/api/core/v2alpha1" + "github.com/openmcp-project/openmcp-operator/internal/config" + libutils "github.com/openmcp-project/openmcp-operator/lib/utils" +) + +const ControllerName = "ManagedControlPlane" + +func NewManagedControlPlaneReconciler(platformCluster *clusters.Cluster, onboardingCluster *clusters.Cluster, eventRecorder record.EventRecorder, cfg *config.ManagedControlPlaneConfig) (*ManagedControlPlaneReconciler, error) { + if cfg == nil { + cfg = &config.ManagedControlPlaneConfig{} + if err := cfg.Default(nil); err != nil { + return nil, err + } + } + return &ManagedControlPlaneReconciler{ + PlatformCluster: platformCluster, + OnboardingCluster: onboardingCluster, + Config: cfg, + eventRecorder: eventRecorder, + sr: smartrequeue.NewStore(5*time.Second, 24*time.Hour, 1.5), + }, nil +} + +type ManagedControlPlaneReconciler struct { + PlatformCluster *clusters.Cluster + OnboardingCluster *clusters.Cluster + Config *config.ManagedControlPlaneConfig + eventRecorder record.EventRecorder + sr *smartrequeue.Store +} + +var _ reconcile.Reconciler = &ManagedControlPlaneReconciler{} + +type ReconcileResult = ctrlutils.ReconcileResult[*corev2alpha1.ManagedControlPlaneV2] + +func (r *ManagedControlPlaneReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + log := logging.FromContextOrPanic(ctx).WithName(ControllerName) + ctx = logging.NewContext(ctx, log) + log.Info("Starting reconcile") + rr := r.reconcile(ctx, req) + + // status update + return ctrlutils.NewOpenMCPStatusUpdaterBuilder[*corev2alpha1.ManagedControlPlaneV2](). + WithNestedStruct("Status"). + WithPhaseUpdateFunc(func(obj *corev2alpha1.ManagedControlPlaneV2, rr ctrlutils.ReconcileResult[*corev2alpha1.ManagedControlPlaneV2]) (string, error) { + if rr.Object != nil { + if !rr.Object.DeletionTimestamp.IsZero() { + return commonapi.StatusPhaseTerminating, nil + } + if conditions.AllConditionsHaveStatus(metav1.ConditionTrue, rr.Object.Status.Conditions...) { + return commonapi.StatusPhaseReady, nil + } + } + return commonapi.StatusPhaseProgressing, nil + }). + WithConditionUpdater(false). + WithConditionEvents(r.eventRecorder, conditions.EventPerChange). + WithSmartRequeue(r.sr). + Build(). + UpdateStatus(ctx, r.OnboardingCluster.Client(), rr) +} + +func (r *ManagedControlPlaneReconciler) reconcile(ctx context.Context, req reconcile.Request) ReconcileResult { + log := logging.FromContextOrPanic(ctx) + // get ManagedControlPlane resource + mcp := &corev2alpha1.ManagedControlPlaneV2{} + if err := r.OnboardingCluster.Client().Get(ctx, req.NamespacedName, mcp); err != nil { + if apierrors.IsNotFound(err) { + log.Info("Resource not found") + return ReconcileResult{} + } + return ReconcileResult{ReconcileError: errutils.WithReason(fmt.Errorf("unable to get resource '%s' from cluster: %w", req.String(), err), cconst.ReasonOnboardingClusterInteractionProblem)} + } + + // handle operation annotation + if mcp.GetAnnotations() != nil { + op, ok := mcp.GetAnnotations()[apiconst.OperationAnnotation] + if ok { + switch op { + case apiconst.OperationAnnotationValueIgnore: + log.Info("Ignoring resource due to ignore operation annotation") + return ReconcileResult{} + case apiconst.OperationAnnotationValueReconcile: + log.Debug("Removing reconcile operation annotation from resource") + if err := ctrlutils.EnsureAnnotation(ctx, r.OnboardingCluster.Client(), mcp, apiconst.OperationAnnotation, "", true, ctrlutils.DELETE); err != nil { + return ReconcileResult{ReconcileError: errutils.WithReason(fmt.Errorf("error removing operation annotation: %w", err), cconst.ReasonOnboardingClusterInteractionProblem)} + } + } + } + } + + var rr ReconcileResult + if mcp.DeletionTimestamp.IsZero() { + rr = r.handleCreateOrUpdate(ctx, mcp) + } else { + rr = r.handleDelete(ctx, mcp) + } + + return rr +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ManagedControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&corev2alpha1.ManagedControlPlaneV2{}). + WithEventFilter(predicate.And( + predicate.Or( + predicate.GenerationChangedPredicate{}, + ctrlutils.DeletionTimestampChangedPredicate{}, + ctrlutils.GotAnnotationPredicate(apiconst.OperationAnnotation, apiconst.OperationAnnotationValueReconcile), + ctrlutils.LostAnnotationPredicate(apiconst.OperationAnnotation, apiconst.OperationAnnotationValueIgnore), + ), + predicate.Not( + ctrlutils.HasAnnotationPredicate(apiconst.OperationAnnotation, apiconst.OperationAnnotationValueIgnore), + ), + )). + Complete(r) +} + +func (r *ManagedControlPlaneReconciler) handleCreateOrUpdate(ctx context.Context, mcp *corev2alpha1.ManagedControlPlaneV2) ReconcileResult { + log := logging.FromContextOrPanic(ctx) + log.Info("Handling creation or update of ManagedControlPlane resource") + + rr := ReconcileResult{ + Result: ctrl.Result{ + RequeueAfter: time.Duration(r.Config.ReconcileMCPEveryXDays) * 24 * time.Hour, + }, + Object: mcp, + OldObject: mcp.DeepCopy(), + Conditions: []metav1.Condition{}, + } + createCon := ctrlutils.GenerateCreateConditionFunc(&rr) + + // ensure MCP and ClusterRequest finalizers on the MCP + changed := controllerutil.AddFinalizer(mcp, corev2alpha1.MCPFinalizer) + changed = controllerutil.AddFinalizer(mcp, corev2alpha1.ClusterRequestFinalizerPrefix+mcp.Name) || changed + if changed { + log.Debug("Adding finalizers to MCP") + if err := r.OnboardingCluster.Client().Patch(ctx, mcp, client.MergeFrom(rr.OldObject)); err != nil { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("error adding finalizers to MCP: %w", err), cconst.ReasonOnboardingClusterInteractionProblem) + createCon(corev2alpha1.ConditionMeta, metav1.ConditionFalse, rr.ReconcileError.Reason(), rr.ReconcileError.Error()) + return rr + } + } + + // ensure that the MCP namespace on the platform cluster exists + mcpLabels := map[string]string{ + corev2alpha1.MCPNameLabel: mcp.Name, + corev2alpha1.MCPNamespaceLabel: mcp.Namespace, + apiconst.ManagedByLabel: ControllerName, + } + platformNamespace, err := libutils.StableMCPNamespace(mcp.Name, mcp.Namespace) + if err != nil { + rr.ReconcileError = errutils.WithReason(err, cconst.ReasonInternalError) + createCon(corev2alpha1.ConditionMeta, metav1.ConditionFalse, rr.ReconcileError.Reason(), rr.ReconcileError.Error()) + return rr + } + _, err = clusteraccess.EnsureNamespace(ctx, r.PlatformCluster.Client(), platformNamespace, pairs.MapToPairs(mcpLabels)...) + if err != nil { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("error ensuring namespace '%s' on platform cluster: %w", platformNamespace, err), cconst.ReasonPlatformClusterInteractionProblem) + createCon(corev2alpha1.ConditionMeta, metav1.ConditionFalse, rr.ReconcileError.Reason(), rr.ReconcileError.Error()) + return rr + } + createCon(corev2alpha1.ConditionMeta, metav1.ConditionTrue, "", "") + + // ensure that the ClusterRequest exists + // since ClusterRequests are basically immutable, updating them is not required + cr := &clustersv1alpha1.ClusterRequest{} + cr.Name = mcp.Name + cr.Namespace = platformNamespace + if err := r.PlatformCluster.Client().Get(ctx, client.ObjectKeyFromObject(cr), cr); err != nil { + if !apierrors.IsNotFound(err) { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("unable to get ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err), cconst.ReasonPlatformClusterInteractionProblem) + createCon(corev2alpha1.ConditionClusterRequestReady, metav1.ConditionFalse, rr.ReconcileError.Reason(), rr.ReconcileError.Error()) + return rr + } + + log.Info("ClusterRequest not found, creating it", "clusterRequestName", cr.Name, "clusterRequestNamespace", cr.Namespace, "purpose", r.Config.MCPClusterPurpose) + cr.Labels = mcpLabels + cr.Spec = clustersv1alpha1.ClusterRequestSpec{ + Purpose: r.Config.MCPClusterPurpose, + WaitForClusterDeletion: ptr.To(true), + } + if err := r.PlatformCluster.Client().Create(ctx, cr); err != nil { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("error creating ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err), cconst.ReasonPlatformClusterInteractionProblem) + createCon(corev2alpha1.ConditionClusterRequestReady, metav1.ConditionFalse, rr.ReconcileError.Reason(), rr.ReconcileError.Error()) + return rr + } + } else { + log.Debug("ClusterRequest found", "clusterRequestName", cr.Name, "clusterRequestNamespace", cr.Namespace, "purposeInConfig", r.Config.MCPClusterPurpose, "purposeInClusterRequest", cr.Spec.Purpose) + } + + // check if the ClusterRequest is ready + if !cr.Status.IsGranted() { + log.Info("Waiting for ClusterRequest to become ready", "clusterRequestName", cr.Name, "clusterRequestNamespace", cr.Namespace, "phase", cr.Status.Phase) + createCon(corev2alpha1.ConditionClusterRequestReady, metav1.ConditionFalse, cconst.ReasonWaitingForClusterRequest, "ClusterRequest is not ready yet") + rr.SmartRequeue = ctrlutils.SR_BACKOFF + return rr + } + log.Debug("ClusterRequest is ready", "clusterRequestName", cr.Name, "clusterRequestNamespace", cr.Namespace) + createCon(corev2alpha1.ConditionClusterRequestReady, metav1.ConditionTrue, "", "ClusterRequest is ready") + + // manage AccessRequests + allAccessReady, removeConditions, rerr := r.manageAccessRequests(ctx, mcp, platformNamespace, cr, createCon) + rr.ConditionsToRemove = removeConditions.UnsortedList() + if rerr != nil { + rr.ReconcileError = rerr + return rr + } + + if allAccessReady { + rr.SmartRequeue = ctrlutils.SR_NO_REQUEUE + } else { + rr.SmartRequeue = ctrlutils.SR_BACKOFF + } + + return rr +} + +func (r *ManagedControlPlaneReconciler) handleDelete(ctx context.Context, mcp *corev2alpha1.ManagedControlPlaneV2) ReconcileResult { + log := logging.FromContextOrPanic(ctx) + log.Info("Handling deletion of ManagedControlPlane resource") + + rr := ReconcileResult{ + Result: ctrl.Result{ + RequeueAfter: time.Duration(r.Config.ReconcileMCPEveryXDays) * 24 * time.Hour, + }, + Object: mcp, + OldObject: mcp.DeepCopy(), + Conditions: []metav1.Condition{}, + } + createCon := ctrlutils.GenerateCreateConditionFunc(&rr) + + // delete services + remainingResources, rerr := r.deleteDependingServices(ctx, mcp) + if rerr != nil { + rr.ReconcileError = rerr + createCon(corev2alpha1.ConditionAllServicesDeleted, metav1.ConditionFalse, rr.ReconcileError.Reason(), rr.ReconcileError.Error()) + return rr + } + if len(remainingResources) > 0 { + serviceResourceCount := collections.AggregateMap(remainingResources, func(service string, resources []*unstructured.Unstructured, agg pairs.Pair[*[]string, int]) pairs.Pair[*[]string, int] { + *agg.Key = append(*agg.Key, service) + agg.Value += len(resources) + return agg + }, pairs.New(ptr.To([]string{}), 0)) + log.Info("Waiting for service resources to be deleted", "services", strings.Join(*serviceResourceCount.Key, ", "), "remainingResourcesCount", serviceResourceCount.Value) + msg := strings.Builder{} + msg.WriteString("Waiting for the following service resources to be deleted: ") + for providerName, resources := range remainingResources { + for _, res := range resources { + msg.WriteString(fmt.Sprintf("[%s]%s.%s, ", providerName, res.GetKind(), res.GetAPIVersion())) + } + } + createCon(corev2alpha1.ConditionAllServicesDeleted, metav1.ConditionFalse, cconst.ReasonWaitingForServiceDeletion, strings.TrimSuffix(msg.String(), ", ")) + rr.SmartRequeue = ctrlutils.SR_BACKOFF + return rr + } + createCon(corev2alpha1.ConditionAllServicesDeleted, metav1.ConditionTrue, "", "All service resources have been deleted") + log.Debug("All service resources deleted") + + // delete AccessRequests and related secrets + platformNamespace, err := libutils.StableMCPNamespace(mcp.Name, mcp.Namespace) + if err != nil { + rr.ReconcileError = errutils.WithReason(err, cconst.ReasonInternalError) + createCon(corev2alpha1.ConditionMeta, metav1.ConditionFalse, rr.ReconcileError.Reason(), rr.ReconcileError.Error()) + return rr + } + accessReady, removeConditions, rerr := r.manageAccessRequests(ctx, mcp, platformNamespace, nil, createCon) + rr.ConditionsToRemove = removeConditions.UnsortedList() + if rerr != nil { + rr.ReconcileError = rerr + createCon(corev2alpha1.ConditionAllAccessReady, metav1.ConditionFalse, rr.ReconcileError.Reason(), rr.ReconcileError.Error()) + return rr + } + if !accessReady { + log.Info("Waiting for AccessRequests to be deleted") + createCon(corev2alpha1.ConditionAllAccessReady, metav1.ConditionFalse, cconst.ReasonWaitingForAccessRequestDeletion, "Waiting for AccessRequests to be deleted") + rr.SmartRequeue = ctrlutils.SR_BACKOFF + return rr + } + createCon(corev2alpha1.ConditionAllAccessReady, metav1.ConditionTrue, "", "All AccessRequests have been deleted") + log.Debug("All AccessRequests deleted") + + // delete cluster requests related to this MCP + remainingCRs, rerr := r.deleteRelatedClusterRequests(ctx, mcp, platformNamespace) + if rerr != nil { + rr.ReconcileError = rerr + createCon(corev2alpha1.ConditionAllClusterRequestsDeleted, metav1.ConditionFalse, rr.ReconcileError.Reason(), rr.ReconcileError.Error()) + return rr + } + finalizersToRemove := sets.New(filters.FilterSlice(mcp.Finalizers, func(args ...any) bool { + fin, ok := args[0].(string) + if !ok { + return false + } + crName, ok := strings.CutPrefix(fin, corev2alpha1.ClusterRequestFinalizerPrefix) + return ok && !remainingCRs.Has(crName) + })...) + if len(finalizersToRemove) > 0 { + log.Debug("Removing ClusterRequest finalizers for deleted ClusterRequests from MCP", "finalizers", strings.Join(sets.List(finalizersToRemove), ", ")) + old := mcp.DeepCopy() + newFinalizers := []string{} + for _, fin := range mcp.Finalizers { + if !finalizersToRemove.Has(fin) { + newFinalizers = append(newFinalizers, fin) + } + } + mcp.Finalizers = newFinalizers + if err := r.OnboardingCluster.Client().Patch(ctx, mcp, client.MergeFrom(old)); err != nil { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("error removing ClusterRequest finalizers from MCP: %w", err), cconst.ReasonOnboardingClusterInteractionProblem) + createCon(corev2alpha1.ConditionAllClusterRequestsDeleted, metav1.ConditionFalse, rr.ReconcileError.Reason(), rr.ReconcileError.Error()) + return rr + } + rr.OldObject = mcp.DeepCopy() + } + if remainingCRs.Len() > 0 { + tmp := strings.Join(sets.List(remainingCRs), ", ") + log.Info("Waiting for ClusterRequests to be deleted", "remainingClusterRequests", tmp) + createCon(corev2alpha1.ConditionAllClusterRequestsDeleted, metav1.ConditionFalse, cconst.ReasonWaitingForClusterRequestDeletion, fmt.Sprintf("Waiting for the following ClusterRequests to be deleted: %s", tmp)) + rr.SmartRequeue = ctrlutils.SR_BACKOFF + return rr + } + createCon(corev2alpha1.ConditionAllClusterRequestsDeleted, metav1.ConditionTrue, "", "All ClusterRequests have been deleted") + log.Debug("All ClusterRequests deleted") + + // delete MCP namespace on the platform cluster + ns := &corev1.Namespace{} + ns.Name = platformNamespace + if err := r.PlatformCluster.Client().Get(ctx, client.ObjectKeyFromObject(ns), ns); err != nil { + if !apierrors.IsNotFound(err) { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("error getting namespace '%s' on platform cluster: %w", platformNamespace, err), cconst.ReasonPlatformClusterInteractionProblem) + createCon(corev2alpha1.ConditionMeta, metav1.ConditionFalse, rr.ReconcileError.Reason(), rr.ReconcileError.Error()) + return rr + } + log.Debug("Namespace already deleted", "namespace", platformNamespace) + ns = nil + } else { + if ns.Labels[corev2alpha1.MCPNameLabel] != mcp.Name || ns.Labels[corev2alpha1.MCPNamespaceLabel] != mcp.Namespace || ns.Labels[apiconst.ManagedByLabel] != ControllerName { + log.Debug("Labels on MCP namespace on platform cluster do not match expected labels, skipping deletion", "platformNamespace", ns.Name) + } else { + if !ns.DeletionTimestamp.IsZero() { + log.Debug("MCP namespace already marked for deletion", "platformNamespace", ns.Name) + } else { + log.Debug("Deleting MCP namespace on platform cluster", "platformNamespace", ns.Name) + if err := r.PlatformCluster.Client().Delete(ctx, ns); err != nil { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("error deleting namespace '%s' on platform cluster: %w", platformNamespace, err), cconst.ReasonPlatformClusterInteractionProblem) + createCon(corev2alpha1.ConditionMeta, metav1.ConditionFalse, rr.ReconcileError.Reason(), rr.ReconcileError.Error()) + return rr + } + } + } + } + if ns != nil { + log.Info("Waiting for MCP namespace to be deleted", "platformNamespace", ns.Name) + createCon(corev2alpha1.ConditionMeta, metav1.ConditionFalse, cconst.ReasonWaitingForNamespaceDeletion, fmt.Sprintf("Waiting for namespace '%s' to be deleted", platformNamespace)) + rr.SmartRequeue = ctrlutils.SR_BACKOFF + return rr + } + + // remove MCP finalizer + if controllerutil.RemoveFinalizer(mcp, corev2alpha1.MCPFinalizer) { + log.Debug("Removing MCP finalizer") + if err := r.OnboardingCluster.Client().Patch(ctx, mcp, client.MergeFrom(rr.OldObject)); err != nil { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("error removing MCP finalizer: %w", err), cconst.ReasonOnboardingClusterInteractionProblem) + createCon(corev2alpha1.ConditionMeta, metav1.ConditionFalse, rr.ReconcileError.Reason(), rr.ReconcileError.Error()) + return rr + } + } + createCon(corev2alpha1.ConditionMeta, metav1.ConditionTrue, "", "MCP finalizer removed") + rr.Result.RequeueAfter = 0 + if len(mcp.Finalizers) == 0 { + // if we just removed the last finalizer on the MCP + // (which should usually be the case, unless something external added one) + // the MCP is now gone and updating the status will fail + rr.Object = nil + } + + return rr +} diff --git a/internal/controllers/managedcontrolplane/controller_test.go b/internal/controllers/managedcontrolplane/controller_test.go new file mode 100644 index 0000000..f6ffdb8 --- /dev/null +++ b/internal/controllers/managedcontrolplane/controller_test.go @@ -0,0 +1,602 @@ +package managedcontrolplane_test + +import ( + "os" + "path/filepath" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + . "github.com/openmcp-project/controller-utils/pkg/testing/matchers" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/openmcp-project/controller-utils/pkg/clusters" + ctrlutils "github.com/openmcp-project/controller-utils/pkg/controller" + testutils "github.com/openmcp-project/controller-utils/pkg/testing" + + clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" + cconst "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1/constants" + commonapi "github.com/openmcp-project/openmcp-operator/api/common" + apiconst "github.com/openmcp-project/openmcp-operator/api/constants" + corev2alpha1 "github.com/openmcp-project/openmcp-operator/api/core/v2alpha1" + "github.com/openmcp-project/openmcp-operator/api/install" + "github.com/openmcp-project/openmcp-operator/internal/config" + "github.com/openmcp-project/openmcp-operator/internal/controllers/managedcontrolplane" + libutils "github.com/openmcp-project/openmcp-operator/lib/utils" +) + +const ( + platform = "platform" + onboarding = "onboarding" + mcpRec = "mcp" +) + +// defaultTestSetup initializes a new environment for testing the mcp controller. +// Expected folder structure is a 'config.yaml' file next to a 'platform' and 'onboarding' directory, containing the manifests for the respective clusters. +func defaultTestSetup(testDirPathSegments ...string) (*managedcontrolplane.ManagedControlPlaneReconciler, *testutils.ComplexEnvironment) { + cfg, err := config.LoadFromFiles(filepath.Join(append(testDirPathSegments, "config.yaml")...)) + Expect(err).ToNot(HaveOccurred()) + Expect(cfg.Default()).To(Succeed()) + Expect(cfg.Validate()).To(Succeed()) + Expect(cfg.Complete()).To(Succeed()) + platformDirExists := true + _, err = os.Stat(filepath.Join(append(testDirPathSegments, platform)...)) + Expect(err).To(Or(Not(HaveOccurred()), MatchError(os.IsNotExist, "IsNotExist"))) + if err != nil { + platformDirExists = false + } + onboardingDirExists := true + _, err = os.Stat(filepath.Join(append(testDirPathSegments, onboarding)...)) + Expect(err).To(Or(Not(HaveOccurred()), MatchError(os.IsNotExist, "IsNotExist"))) + if err != nil { + onboardingDirExists = false + } + envB := testutils.NewComplexEnvironmentBuilder(). + WithFakeClient(platform, install.InstallOperatorAPIsPlatform(runtime.NewScheme())). + WithDynamicObjectsWithStatus(platform, &clustersv1alpha1.ClusterRequest{}, &clustersv1alpha1.AccessRequest{}). + WithFakeClient(onboarding, install.InstallOperatorAPIsOnboarding(runtime.NewScheme())). + WithReconcilerConstructor(mcpRec, func(clients ...client.Client) reconcile.Reconciler { + mcpr, err := managedcontrolplane.NewManagedControlPlaneReconciler(clusters.NewTestClusterFromClient(platform, clients[0]), clusters.NewTestClusterFromClient(onboarding, clients[1]), nil, cfg.ManagedControlPlane) + Expect(err).ToNot(HaveOccurred()) + return mcpr + }, platform, onboarding) + if platformDirExists { + envB.WithInitObjectPath(platform, append(testDirPathSegments, platform)...) + } + if onboardingDirExists { + envB.WithInitObjectPath(onboarding, append(testDirPathSegments, onboarding)...) + } + env := envB.Build() + mcpReconciler, ok := env.Reconciler(mcpRec).(*managedcontrolplane.ManagedControlPlaneReconciler) + Expect(ok).To(BeTrue(), "Reconciler is not of type ManagedControlPlaneReconciler") + return mcpReconciler, env +} + +var _ = Describe("ManagedControlPlane Controller", func() { + + It("should correctly handle the creation, update, and deletion flow for MCP resources", func() { + rec, env := defaultTestSetup("testdata", "test-01") + + mcp := &corev2alpha1.ManagedControlPlaneV2{} + mcp.SetName("mcp-01") + mcp.SetNamespace("test") + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(mcp), mcp)).To(Succeed()) + + By("=== CREATE ===") + + // reconcile the MCP + // expected outcome: + // - mcp has an mcp finalizer + // - mcp has a cluster request finalizer + // - a namespace was created for the MCP on the platform cluster + // - a cluster request was created on the platform cluster + // - the mcp has conditions that reflect that it is waiting for the cluster request + // - the mcp should be requeued with a short requeueAfter duration + By("first MCP reconciliation") + platformNamespace, err := libutils.StableMCPNamespace(mcp.Name, mcp.Namespace) + Expect(err).ToNot(HaveOccurred()) + res := env.ShouldReconcile(mcpRec, testutils.RequestFromObject(mcp)) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(mcp), mcp)).To(Succeed()) + Expect(res.RequeueAfter).To(BeNumerically(">", 0)) + Expect(res.RequeueAfter).To(BeNumerically("<", 1*time.Minute)) + Expect(mcp.Finalizers).To(ContainElements( + corev2alpha1.MCPFinalizer, + corev2alpha1.ClusterRequestFinalizerPrefix+mcp.Name, + )) + ns := &corev1.Namespace{} + ns.SetName(platformNamespace) + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(ns), ns)).To(Succeed()) + Expect(ns.Labels).To(HaveKeyWithValue(corev2alpha1.MCPNameLabel, mcp.Name)) + Expect(ns.Labels).To(HaveKeyWithValue(corev2alpha1.MCPNamespaceLabel, mcp.Namespace)) + Expect(ns.Labels).To(HaveKeyWithValue(apiconst.ManagedByLabel, managedcontrolplane.ControllerName)) + cr := &clustersv1alpha1.ClusterRequest{} + cr.SetName(mcp.Name) + cr.SetNamespace(platformNamespace) + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed()) + Expect(cr.Spec.Purpose).To(Equal(rec.Config.MCPClusterPurpose)) + Expect(cr.Spec.WaitForClusterDeletion).To(PointTo(BeTrue())) + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionClusterRequestReady). + WithStatus(metav1.ConditionFalse). + WithReason(cconst.ReasonWaitingForClusterRequest)), + )) + + // fake ClusterRequest ready status + By("fake: ClusterRequest readiness") + cr.Status.Phase = clustersv1alpha1.REQUEST_GRANTED + Expect(env.Client(platform).Status().Update(env.Ctx, cr)).To(Succeed()) + + // reconcile the MCP again + // expected outcome: + // - multiple access requests have been created on the platform cluster, one for each configured OIDC provider + // - the mcp has conditions that reflect that it is waiting for the access requests (one for each OIDC provider and one overall one) + // - the mcp should be requeued with a short requeueAfter duration + By("second MCP reconciliation") + res = env.ShouldReconcile(mcpRec, testutils.RequestFromObject(mcp)) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(mcp), mcp)).To(Succeed()) + Expect(res.RequeueAfter).To(BeNumerically(">", 0)) + Expect(res.RequeueAfter).To(BeNumerically("<", 1*time.Minute)) + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionClusterRequestReady). + WithStatus(metav1.ConditionTrue)), + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionAllAccessReady). + WithStatus(metav1.ConditionFalse). + WithReason(cconst.ReasonWaitingForAccessRequest)), + )) + oidcProviders := []commonapi.OIDCProviderConfig{*rec.Config.DefaultOIDCProvider.DeepCopy()} + oidcProviders[0].RoleBindings = mcp.Spec.IAM.RoleBindings + for _, addProv := range mcp.Spec.IAM.OIDCProviders { + oidcProviders = append(oidcProviders, *addProv.DeepCopy()) + } + Expect(oidcProviders).To(HaveLen(3)) + for _, oidc := range oidcProviders { + By("verifying that the AccessRequest is not ready for oidc provider: " + oidc.Name) + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionPrefixOIDCAccessReady + oidc.Name). + WithStatus(metav1.ConditionFalse). + WithReason(cconst.ReasonWaitingForAccessRequest)), + )) + ar := &clustersv1alpha1.AccessRequest{} + ar.SetName(ctrlutils.K8sNameUUIDUnsafe(mcp.Name, oidc.Name)) + ar.SetNamespace(platformNamespace) + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar.Spec.RequestRef.Name).To(Equal(cr.Name)) + Expect(ar.Spec.RequestRef.Namespace).To(Equal(cr.Namespace)) + Expect(ar.Spec.OIDC).ToNot(BeNil()) + Expect(ar.Spec.OIDC.OIDCProviderConfig).To(Equal(oidc)) + } + + // fake AccessRequest ready status + By("fake: AccessRequest readiness") + for _, oidc := range oidcProviders { + By("fake: AccessRequest readiness for oidc provider: " + oidc.Name) + ar := &clustersv1alpha1.AccessRequest{} + ar.SetName(ctrlutils.K8sNameUUIDUnsafe(mcp.Name, oidc.Name)) + ar.SetNamespace(platformNamespace) + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + ar.Status.Phase = clustersv1alpha1.REQUEST_GRANTED + ar.Status.SecretRef = &commonapi.ObjectReference{ + Name: ar.Name, + Namespace: ar.Namespace, + } + sec := &corev1.Secret{} + sec.SetName(ar.Status.SecretRef.Name) + sec.SetNamespace(ar.Namespace) + sec.Data = map[string][]byte{ + clustersv1alpha1.SecretKeyKubeconfig: []byte(oidc.Name), + } + Expect(env.Client(platform).Status().Update(env.Ctx, ar)).To(Succeed()) + Expect(env.Client(platform).Create(env.Ctx, sec)).To(Succeed()) + } + + // reconcile the MCP again + // expected outcome: + // - the mcp has conditions that reflect that all access requests are ready + // - the mcp has copied the kubeconfig secrets from the access requests into the onboarding cluster and references them in its status + // - the mcp should be requeued with a requeueAfter duration that matches the reconcile interval from the controller config + By("third MCP reconciliation") + res = env.ShouldReconcile(mcpRec, testutils.RequestFromObject(mcp)) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(mcp), mcp)).To(Succeed()) + Expect(res.RequeueAfter).To(BeNumerically("~", int64(rec.Config.ReconcileMCPEveryXDays)*24*int64(time.Hour), int64(time.Second))) + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionClusterRequestReady). + WithStatus(metav1.ConditionTrue)), + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionAllAccessReady). + WithStatus(metav1.ConditionTrue)), + )) + for _, oidc := range oidcProviders { + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionPrefixOIDCAccessReady + oidc.Name). + WithStatus(metav1.ConditionTrue)), + )) + } + Expect(mcp.Status.Access).To(HaveLen(len(oidcProviders))) + for providerName, secretRef := range mcp.Status.Access { + By("verifying MCP access secret for oidc provider: " + providerName) + sec := &corev1.Secret{} + sec.SetName(secretRef.Name) + sec.SetNamespace(mcp.Namespace) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(sec), sec)).To(Succeed()) + Expect(sec.Data).To(HaveKeyWithValue(clustersv1alpha1.SecretKeyKubeconfig, []byte(providerName))) + } + + By("=== UPDATE ===") + + // change the rolebindings in the MCP spec and remove one OIDC provider + By("updating MCP spec") + mcp.Spec.IAM.RoleBindings = mcp.Spec.IAM.RoleBindings[:len(mcp.Spec.IAM.RoleBindings)-1] + removedOIDCProviderName := mcp.Spec.IAM.OIDCProviders[len(mcp.Spec.IAM.OIDCProviders)-1].Name + toBeRemovedSecretName := mcp.Status.Access[removedOIDCProviderName].Name + mcp.Spec.IAM.OIDCProviders = mcp.Spec.IAM.OIDCProviders[:len(mcp.Spec.IAM.OIDCProviders)-1] + Expect(env.Client(onboarding).Update(env.Ctx, mcp)).To(Succeed()) + + By("fake: adding finalizers to AccessRequests") + for _, oidc := range oidcProviders { + By("fake: adding finalizer to AccessRequest for oidc provider: " + oidc.Name) + ar := &clustersv1alpha1.AccessRequest{} + ar.SetName(ctrlutils.K8sNameUUIDUnsafe(mcp.Name, oidc.Name)) + ar.SetNamespace(platformNamespace) + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + controllerutil.AddFinalizer(ar, "dummy") + Expect(env.Client(platform).Update(env.Ctx, ar)).To(Succeed()) + } + + // reconcile the MCP + // expected outcome: + // - the rolebindings in the AccessRequest for the standard OIDC provider have been updated + // - the access secret for the removed OIDC provider have been deleted + // - the AccessRequest for the removed OIDC provider has a deletion timestamp + // - the condition for the removed OIDC provider is false and indicating that it is waiting for the AccessRequest + // - the mcp should be requeued with a short requeueAfter duration + By("first MCP reconciliation after update") + res = env.ShouldReconcile(mcpRec, testutils.RequestFromObject(mcp)) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(mcp), mcp)).To(Succeed()) + Expect(res.RequeueAfter).To(BeNumerically(">", 0)) + Expect(res.RequeueAfter).To(BeNumerically("<", 1*time.Minute)) + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionClusterRequestReady). + WithStatus(metav1.ConditionTrue)), + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionAllAccessReady). + WithStatus(metav1.ConditionFalse). + WithReason(cconst.ReasonWaitingForAccessRequestDeletion)), + )) + removedOIDCIdx := -1 + for i, oidc := range oidcProviders { + By("verifying condition for oidc provider: " + oidc.Name) + if oidc.Name == removedOIDCProviderName { + removedOIDCIdx = i + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionPrefixOIDCAccessReady + oidc.Name). + WithStatus(metav1.ConditionFalse). + WithReason(cconst.ReasonWaitingForAccessRequestDeletion), + ))) + Expect(mcp.Status.Access).ToNot(HaveKey(oidc.Name)) + } else { + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionPrefixOIDCAccessReady + oidc.Name). + WithStatus(metav1.ConditionTrue), + ))) + Expect(mcp.Status.Access).To(HaveKey(oidc.Name)) + } + } + Expect(removedOIDCIdx).To(BeNumerically(">", -1)) + oidcProviders = append(oidcProviders[:removedOIDCIdx], oidcProviders[removedOIDCIdx+1:]...) + Expect(mcp.Status.Access).ToNot(HaveKey(removedOIDCProviderName)) + sec := &corev1.Secret{} + sec.SetName(toBeRemovedSecretName) + sec.SetNamespace(mcp.Namespace) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(sec), sec)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + ar := &clustersv1alpha1.AccessRequest{} + ar.SetName(ctrlutils.K8sNameUUIDUnsafe(mcp.Name, removedOIDCProviderName)) + ar.SetNamespace(platformNamespace) + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar.GetDeletionTimestamp().IsZero()).To(BeFalse()) + + // remove dummy finalizer from AccessRequest belonging to the removed OIDC provider + By("fake: removing dummy finalizer from AccessRequest for removed OIDC provider: " + removedOIDCProviderName) + controllerutil.RemoveFinalizer(ar, "dummy") + Expect(env.Client(platform).Update(env.Ctx, ar)).To(Succeed()) + + // reconcile the MCP again + // expected outcome: + // - the AccessRequest for the removed OIDC provider has been deleted + // - the condition for the removed OIDC provider is gone + // - the mcp should be requeued with a requeueAfter duration that matches the reconcile interval from the controller config + By("second MCP reconciliation after update") + res = env.ShouldReconcile(mcpRec, testutils.RequestFromObject(mcp)) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(mcp), mcp)).To(Succeed()) + Expect(res.RequeueAfter).To(BeNumerically("~", int64(rec.Config.ReconcileMCPEveryXDays)*24*int64(time.Hour), int64(time.Second))) + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionClusterRequestReady). + WithStatus(metav1.ConditionTrue)), + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionAllAccessReady). + WithStatus(metav1.ConditionTrue)), + )) + for _, oidc := range oidcProviders { + By("verifying condition for oidc provider: " + oidc.Name) + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionPrefixOIDCAccessReady + oidc.Name). + WithStatus(metav1.ConditionTrue)), + )) + } + Expect(mcp.Status.Access).ToNot(HaveKey(removedOIDCProviderName)) + Expect(mcp.Status.Conditions).ToNot(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionPrefixOIDCAccessReady + removedOIDCProviderName), + ), + )) + + By("=== DELETE ===") + + // fake some more ClusterRequests + By("fake: some more ClusterRequests") + cr2 := &clustersv1alpha1.ClusterRequest{} + cr2.SetName("cr2") + cr2.SetNamespace(platformNamespace) + cr2.Finalizers = []string{"dummy"} + Expect(env.Client(platform).Create(env.Ctx, cr2)).To(Succeed()) + cr3 := &clustersv1alpha1.ClusterRequest{} + cr3.SetName("cr3") + cr3.SetNamespace(platformNamespace) + cr3.Finalizers = []string{"dummy"} + Expect(env.Client(platform).Create(env.Ctx, cr3)).To(Succeed()) + mcp.Finalizers = append(mcp.Finalizers, corev2alpha1.ClusterRequestFinalizerPrefix+cr2.Name, corev2alpha1.ClusterRequestFinalizerPrefix+cr3.Name) + Expect(env.Client(onboarding).Update(env.Ctx, mcp)).To(Succeed()) + + // put a finalizer on the MCP cr + cr.Finalizers = append(cr.Finalizers, "dummy") + Expect(env.Client(platform).Update(env.Ctx, cr)).To(Succeed()) + + // delete the MCP + By("deleting the MCP") + Expect(env.Client(onboarding).Delete(env.Ctx, mcp)).To(Succeed()) + + // reconcile the MCP + // expected outcome: + // - all service resources that depend on the MCP have a deletion timestamp + // - the MCP conditions reflect that it is waiting for services to be deleted + // - neither ClusterRequests nor AccessRequests have deletion timestamps + // - the MCP should be requeued with a short requeueAfter duration + By("first MCP reconciliation after delete") + res = env.ShouldReconcile(mcpRec, testutils.RequestFromObject(mcp)) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(mcp), mcp)).To(Succeed()) + Expect(res.RequeueAfter).To(BeNumerically(">", 0)) + Expect(res.RequeueAfter).To(BeNumerically("<", 1*time.Minute)) + serviceResources := []client.Object{ + &corev1.ConfigMap{}, + &corev1.ServiceAccount{}, + &corev1.Secret{}, + } + for _, obj := range serviceResources { + obj.SetName(mcp.Name) + obj.SetNamespace(mcp.Namespace) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(obj), obj)).To(Succeed()) + Expect(obj.GetDeletionTimestamp().IsZero()).To(BeFalse()) + } + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionAllServicesDeleted). + WithStatus(metav1.ConditionFalse). + WithReason(cconst.ReasonWaitingForServiceDeletion)), + )) + for _, oidc := range oidcProviders { + By("verifying AccessRequest does not have a deletion timestamp for oidc provider: " + oidc.Name) + ar := &clustersv1alpha1.AccessRequest{} + ar.SetName(ctrlutils.K8sNameUUIDUnsafe(mcp.Name, oidc.Name)) + ar.SetNamespace(platformNamespace) + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar.DeletionTimestamp.IsZero()).To(BeTrue()) + } + for _, obj := range []client.Object{cr, cr2, cr3} { + By("verifying ClusterRequest does not have a deletion timestamp: " + obj.GetName()) + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(obj), obj)).To(Succeed()) + Expect(obj.GetDeletionTimestamp().IsZero()).To(BeTrue()) + } + + // remove service finalizers + By("fake: removing service finalizers") + for _, obj := range serviceResources { + By("fake: removing finalizer from service resource: " + obj.GetObjectKind().GroupVersionKind().Kind) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(obj), obj)).To(Succeed()) + controllerutil.RemoveFinalizer(obj, "dummy") + Expect(env.Client(onboarding).Update(env.Ctx, obj)).To(Succeed()) + } + newFins := []string{} + for _, fin := range mcp.Finalizers { + if !strings.HasPrefix(fin, corev2alpha1.ServiceDependencyFinalizerPrefix) { + newFins = append(newFins, fin) + } + } + mcp.Finalizers = newFins + Expect(env.Client(onboarding).Update(env.Ctx, mcp)).To(Succeed()) + + // reconcile the MCP again + // expected outcome: + // - all AccessRequests have deletion timestamps + // - all access secrets have been deleted + // - the MCP conditions reflect that it is waiting for AccessRequests to be deleted + // - no ClusterRequests should have deletion timestamps + // - the MCP should be requeued with a short requeueAfter duration + By("second MCP reconciliation after delete") + res = env.ShouldReconcile(mcpRec, testutils.RequestFromObject(mcp)) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(mcp), mcp)).To(Succeed()) + Expect(res.RequeueAfter).To(BeNumerically(">", 0)) + Expect(res.RequeueAfter).To(BeNumerically("<", 1*time.Minute)) + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionClusterRequestReady). + WithStatus(metav1.ConditionTrue)), + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionAllAccessReady). + WithStatus(metav1.ConditionFalse). + WithReason(cconst.ReasonWaitingForAccessRequestDeletion)), + )) + for _, oidc := range oidcProviders { + By("verifying AccessRequest and access secret deletion status for oidc provider: " + oidc.Name) + ar := &clustersv1alpha1.AccessRequest{} + ar.SetName(ctrlutils.K8sNameUUIDUnsafe(mcp.Name, oidc.Name)) + ar.SetNamespace(platformNamespace) + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar.DeletionTimestamp.IsZero()).To(BeFalse()) + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionPrefixOIDCAccessReady + oidc.Name). + WithStatus(metav1.ConditionFalse). + WithReason(cconst.ReasonWaitingForAccessRequestDeletion), + ), + )) + } + Expect(mcp.Status.Access).To(BeEmpty()) + secs := &corev1.SecretList{} + Expect(env.Client(onboarding).List(env.Ctx, secs, client.InNamespace(mcp.Namespace))).To(Succeed()) + Expect(secs.Items).To(BeEmpty()) + for _, obj := range []client.Object{cr, cr2, cr3} { + By("verifying that ClusterRequest has not been deleted: " + obj.GetName()) + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(obj), obj)).To(Succeed()) + Expect(obj.GetDeletionTimestamp().IsZero()).To(BeTrue()) + } + + // remove AccessRequest finalizers + By("fake: removing AccessRequest finalizers") + for _, oidc := range oidcProviders { + By("fake: removing finalizer from AccessRequest for oidc provider: " + oidc.Name) + ar := &clustersv1alpha1.AccessRequest{} + ar.SetName(ctrlutils.K8sNameUUIDUnsafe(mcp.Name, oidc.Name)) + ar.SetNamespace(platformNamespace) + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + controllerutil.RemoveFinalizer(ar, "dummy") + Expect(env.Client(platform).Update(env.Ctx, ar)).To(Succeed()) + } + + // reconcile the MCP again + // expected outcome: + // - cr2 and cr3 have deletion timestamps, cr has not + // - the AccessRequests are deleted + // - the MCP has a condition stating that it is waiting for the ClusterRequests to be deleted + // - the MCP should be requeued with a short requeueAfter duration + By("third MCP reconciliation after delete") + res = env.ShouldReconcile(mcpRec, testutils.RequestFromObject(mcp)) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(mcp), mcp)).To(Succeed()) + Expect(res.RequeueAfter).To(BeNumerically(">", 0)) + Expect(res.RequeueAfter).To(BeNumerically("<", 1*time.Minute)) + By("verifying ClusterRequest deletion status") + for _, obj := range []client.Object{cr, cr2, cr3} { + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(obj), obj)).To(Succeed()) + } + Expect(cr.GetDeletionTimestamp().IsZero()).To(BeTrue(), "ClusterRequest should not be marked for deletion") + Expect(cr2.GetDeletionTimestamp().IsZero()).To(BeFalse()) + Expect(cr3.GetDeletionTimestamp().IsZero()).To(BeFalse()) + By("verifying AccessRequest deletion") + for _, oidc := range oidcProviders { + By("verifying AccessRequest deletion for oidc provider: " + oidc.Name) + ar := &clustersv1alpha1.AccessRequest{} + ar.SetName(ctrlutils.K8sNameUUIDUnsafe(mcp.Name, oidc.Name)) + ar.SetNamespace(platformNamespace) + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + } + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionAllClusterRequestsDeleted). + WithStatus(metav1.ConditionFalse). + WithReason(cconst.ReasonWaitingForClusterRequestDeletion)), + )) + + // remove finalizers from cr2 and cr3 + By("fake: removing finalizers from additional ClusterRequests") + controllerutil.RemoveFinalizer(cr2, "dummy") + Expect(env.Client(platform).Update(env.Ctx, cr2)).To(Succeed()) + controllerutil.RemoveFinalizer(cr3, "dummy") + Expect(env.Client(platform).Update(env.Ctx, cr3)).To(Succeed()) + + // reconcile the MCP again + // expected outcome: + // - cr2 and cr3 have been deleted + // - cr has a deletion timestamp + // - the MCP has a condition stating that it is waiting for the ClusterRequest to be deleted + // - the MCP should be requeued with a short requeueAfter duration + By("fourth MCP reconciliation after delete") + res = env.ShouldReconcile(mcpRec, testutils.RequestFromObject(mcp)) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(mcp), mcp)).To(Succeed()) + Expect(res.RequeueAfter).To(BeNumerically(">", 0)) + Expect(res.RequeueAfter).To(BeNumerically("<", 1*time.Minute)) + By("verifying ClusterRequest deletion status") + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed()) + Expect(cr.GetDeletionTimestamp().IsZero()).To(BeFalse(), "ClusterRequest should be marked for deletion") + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionAllClusterRequestsDeleted). + WithStatus(metav1.ConditionFalse). + WithReason(cconst.ReasonWaitingForClusterRequestDeletion)), + )) + + // remove finalizer from cr + By("fake: removing finalizer from primary ClusterRequest") + controllerutil.RemoveFinalizer(cr, "dummy") + Expect(env.Client(platform).Update(env.Ctx, cr)).To(Succeed()) + + // add finalizer to MCP namespace + By("fake: adding finalizer to MCP namespace") + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(ns), ns)).To(Succeed()) + controllerutil.AddFinalizer(ns, "dummy") + Expect(env.Client(platform).Update(env.Ctx, ns)).To(Succeed()) + + // reconcile the MCP again + // expected outcome: + // - the MCP namespace has a deletion timestamp + // - the MCP has a condition stating that it is waiting for the MCP namespace to be deleted + // - the MCP should be requeued with a short requeueAfter duration + By("fifth MCP reconciliation after delete") + res = env.ShouldReconcile(mcpRec, testutils.RequestFromObject(mcp)) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(mcp), mcp)).To(Succeed()) + Expect(res.RequeueAfter).To(BeNumerically(">", 0)) + Expect(res.RequeueAfter).To(BeNumerically("<", 1*time.Minute)) + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(ns), ns)).To(Succeed()) + Expect(ns.GetDeletionTimestamp().IsZero()).To(BeFalse(), "MCP namespace should be marked for deletion") + Expect(mcp.Status.Conditions).To(ContainElements( + MatchCondition(TestCondition(). + WithType(corev2alpha1.ConditionMeta). + WithStatus(metav1.ConditionFalse). + WithReason(cconst.ReasonWaitingForNamespaceDeletion)), + )) + + // remove finalizer from MCP namespace + By("fake: removing finalizer from MCP namespace") + controllerutil.RemoveFinalizer(ns, "dummy") + Expect(env.Client(platform).Update(env.Ctx, ns)).To(Succeed()) + + // reconcile the MCP again + // expected outcome: + // - cr has been deleted + // - mcp has been deleted + // - the MCP should not be requeued + By("sixth MCP reconciliation after delete") + res = env.ShouldReconcile(mcpRec, testutils.RequestFromObject(mcp)) + Expect(env.Client(onboarding).Get(env.Ctx, client.ObjectKeyFromObject(mcp), mcp)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + Expect(res.IsZero()).To(BeTrue()) + Expect(env.Client(platform).Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + }) + +}) diff --git a/internal/controllers/managedcontrolplane/services.go b/internal/controllers/managedcontrolplane/services.go new file mode 100644 index 0000000..4661630 --- /dev/null +++ b/internal/controllers/managedcontrolplane/services.go @@ -0,0 +1,118 @@ +package managedcontrolplane + +import ( + "context" + "fmt" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" + + errutils "github.com/openmcp-project/controller-utils/pkg/errors" + "github.com/openmcp-project/controller-utils/pkg/logging" + + cconst "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1/constants" + corev2alpha1 "github.com/openmcp-project/openmcp-operator/api/core/v2alpha1" + providerv1alpha1 "github.com/openmcp-project/openmcp-operator/api/provider/v1alpha1" +) + +// deleteDependingServices deletes service resources that belong to service providers which have a 'services.openmcp.cloud/' finalizer on the ManagedControlPlane. +// It returns a set of service provider names for which still resources exist (should be in deletion by the time this function returns) and the total number of resources that are still left. +// Deletion of the MCP should wait until the set is empty and the count is zero. +func (r *ManagedControlPlaneReconciler) deleteDependingServices(ctx context.Context, mcp *corev2alpha1.ManagedControlPlaneV2) (map[string][]*unstructured.Unstructured, errutils.ReasonableError) { + log := logging.FromContextOrPanic(ctx) + + // delete depending service resources, if any + serviceProviderNames := sets.New[string]() + + if mcp == nil { + log.Debug("MCP is nil, no need to check for services") + return nil, nil + } + + // identify service finalizers + for _, fin := range mcp.Finalizers { + if service, ok := strings.CutPrefix(fin, corev2alpha1.ServiceDependencyFinalizerPrefix); ok { + serviceProviderNames.Insert(service) + } + } + + if serviceProviderNames.Len() == 0 { + log.Debug("No service finalizers found on MCP") + return nil, nil + } + + // fetch service resources, if any exist + resources := map[string][]*unstructured.Unstructured{} + errs := errutils.NewReasonableErrorList() + for providerName := range serviceProviderNames { + sp := &providerv1alpha1.ServiceProvider{} + sp.SetName(providerName) + if err := r.PlatformCluster.Client().Get(ctx, client.ObjectKeyFromObject(sp), sp); err != nil { + errs.Append(errutils.WithReason(fmt.Errorf("failed to get ServiceProvider %s: %w", providerName, err), cconst.ReasonPlatformClusterInteractionProblem)) + continue + } + + if len(sp.Status.Resources) == 0 { + errs.Append(errutils.WithReason(fmt.Errorf("a dependency finalizer for ServiceProvider '%s' exist on MCP, but the provider does not expose any service resources", providerName), cconst.ReasonInternalError)) + } + serviceResources := []*unstructured.Unstructured{} + for _, resourceType := range sp.Status.Resources { + res := &unstructured.Unstructured{} + res.SetAPIVersion(resourceType.Group + "/" + resourceType.Version) + res.SetKind(resourceType.Kind) + res.SetName(mcp.Name) + res.SetNamespace(mcp.Namespace) + if err := r.OnboardingCluster.Client().Get(ctx, client.ObjectKeyFromObject(res), res); err != nil { + if !apierrors.IsNotFound(err) { + errs.Append(errutils.WithReason(fmt.Errorf("error getting service resource [%s.%s] '%s/%s' for ServiceProvider '%s': %w", res.GetKind(), res.GetAPIVersion(), res.GetNamespace(), res.GetName(), providerName, err), cconst.ReasonOnboardingClusterInteractionProblem)) + } + continue + } + serviceResources = append(serviceResources, res) + } + + resources[providerName] = serviceResources + } + if rerr := errs.Aggregate(); rerr != nil { + return nil, rerr + } + + // delete service resources + errs = errutils.NewReasonableErrorList() + remainingResources := map[string][]*unstructured.Unstructured{} + for providerName, serviceResources := range resources { + if len(serviceResources) == 0 { + log.Debug("No service resources found for ServiceProvider", "providerName", providerName) + continue + } + remainingServiceResources := []*unstructured.Unstructured{} + for _, res := range serviceResources { + if !res.GetDeletionTimestamp().IsZero() { + log.Debug("Service resource already marked for deletion", "resourceKind", res.GetKind(), "resourceAPIVersion", res.GetAPIVersion(), "provider", providerName) + remainingServiceResources = append(remainingServiceResources, res) + continue + } + log.Info("Deleting service resource", "resourceKind", res.GetKind(), "resourceAPIVersion", res.GetAPIVersion(), "provider", providerName) + if err := r.OnboardingCluster.Client().Delete(ctx, res); err != nil { + if !apierrors.IsNotFound(err) { + errs.Append(errutils.WithReason(fmt.Errorf("error deleting service resource [%s.%s] '%s/%s' for ServiceProvider '%s': %w", res.GetKind(), res.GetAPIVersion(), res.GetNamespace(), res.GetName(), providerName, err), cconst.ReasonOnboardingClusterInteractionProblem)) + } else { + log.Debug("Service resource not found during deletion", "resourceKind", res.GetKind(), "resourceAPIVersion", res.GetAPIVersion(), "provider", providerName) + } + continue + } + remainingServiceResources = append(remainingServiceResources, res) + } + if len(remainingServiceResources) > 0 { + remainingResources[providerName] = remainingServiceResources + } + } + if rerr := errs.Aggregate(); rerr != nil { + return remainingResources, rerr + } + + return remainingResources, nil +} diff --git a/internal/controllers/managedcontrolplane/suite_test.go b/internal/controllers/managedcontrolplane/suite_test.go new file mode 100644 index 0000000..22c1342 --- /dev/null +++ b/internal/controllers/managedcontrolplane/suite_test.go @@ -0,0 +1,14 @@ +package managedcontrolplane_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestComponentUtils(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "ManagedControlPlane Controller Test Suite") +} diff --git a/internal/controllers/managedcontrolplane/testdata/test-01/config.yaml b/internal/controllers/managedcontrolplane/testdata/test-01/config.yaml new file mode 100644 index 0000000..d693772 --- /dev/null +++ b/internal/controllers/managedcontrolplane/testdata/test-01/config.yaml @@ -0,0 +1,7 @@ +managedControlPlane: + mcpClusterPurpose: mcp + reconcileMCPEveryXDays: 7 + defaultOIDCProvider: + issuer: https://example.com/oidc + groupsPrefix: 'mygroups:' + usernamePrefix: 'myuser:' diff --git a/internal/controllers/managedcontrolplane/testdata/test-01/onboarding/mcp-01.yaml b/internal/controllers/managedcontrolplane/testdata/test-01/onboarding/mcp-01.yaml new file mode 100644 index 0000000..688ba15 --- /dev/null +++ b/internal/controllers/managedcontrolplane/testdata/test-01/onboarding/mcp-01.yaml @@ -0,0 +1,49 @@ +apiVersion: core.openmcp.cloud/v2alpha1 +kind: ManagedControlPlaneV2 +metadata: + name: mcp-01 + namespace: test + finalizers: + - services.openmcp.cloud/sp-01 + - services.openmcp.cloud/sp-02 +spec: + iam: + roleBindings: + - subjects: + - kind: User + name: user1 + - kind: User + name: user2 + roleRefs: + - kind: ClusterRole + name: cluster-admin + - subjects: + - kind: Group + name: group1 + roleRefs: + - kind: ClusterRole + name: cluster-viewer + + oidcProviders: + - name: add1 + issuer: https://example.com/add1 + groupsPrefix: 'add1groups:' + usernamePrefix: 'add1user:' + roleBindings: + - subjects: + - kind: User + name: user3 + roleRefs: + - kind: ClusterRole + name: cluster-admin + - name: add2 + issuer: https://example.com/add2 + groupsPrefix: 'add2groups:' + usernamePrefix: 'add2user:' + roleBindings: + - subjects: + - kind: Group + name: group2 + roleRefs: + - kind: ClusterRole + name: cluster-viewer diff --git a/internal/controllers/managedcontrolplane/testdata/test-01/onboarding/sr-01-01.yaml b/internal/controllers/managedcontrolplane/testdata/test-01/onboarding/sr-01-01.yaml new file mode 100644 index 0000000..27c429a --- /dev/null +++ b/internal/controllers/managedcontrolplane/testdata/test-01/onboarding/sr-01-01.yaml @@ -0,0 +1,9 @@ +# this is a fake service resource +# we use standard k8s resources for testing purposes, so we don't have to add a custom resource definition or schema +apiVersion: v1 +kind: ConfigMap +metadata: + name: mcp-01 + namespace: test + finalizers: + - dummy diff --git a/internal/controllers/managedcontrolplane/testdata/test-01/onboarding/sr-01-02.yaml b/internal/controllers/managedcontrolplane/testdata/test-01/onboarding/sr-01-02.yaml new file mode 100644 index 0000000..94c1fac --- /dev/null +++ b/internal/controllers/managedcontrolplane/testdata/test-01/onboarding/sr-01-02.yaml @@ -0,0 +1,9 @@ +# this is a fake service resource +# we use standard k8s resources for testing purposes, so we don't have to add a custom resource definition or schema +apiVersion: v1 +kind: ServiceAccount +metadata: + name: mcp-01 + namespace: test + finalizers: + - dummy diff --git a/internal/controllers/managedcontrolplane/testdata/test-01/onboarding/sr-02-01.yaml b/internal/controllers/managedcontrolplane/testdata/test-01/onboarding/sr-02-01.yaml new file mode 100644 index 0000000..2c08ab8 --- /dev/null +++ b/internal/controllers/managedcontrolplane/testdata/test-01/onboarding/sr-02-01.yaml @@ -0,0 +1,9 @@ +# this is a fake service resource +# we use standard k8s resources for testing purposes, so we don't have to add a custom resource definition or schema +apiVersion: v1 +kind: Secret +metadata: + name: mcp-01 + namespace: test + finalizers: + - dummy diff --git a/internal/controllers/managedcontrolplane/testdata/test-01/platform/sp-01.yaml b/internal/controllers/managedcontrolplane/testdata/test-01/platform/sp-01.yaml new file mode 100644 index 0000000..78e6408 --- /dev/null +++ b/internal/controllers/managedcontrolplane/testdata/test-01/platform/sp-01.yaml @@ -0,0 +1,13 @@ +apiVersion: openmcp.cloud/v1alpha1 +kind: ServiceProvider +metadata: + name: sp-01 +spec: {} +status: + resources: + - group: "" + version: v1 + kind: ConfigMap + - group: "" + version: v1 + kind: ServiceAccount diff --git a/internal/controllers/managedcontrolplane/testdata/test-01/platform/sp-02.yaml b/internal/controllers/managedcontrolplane/testdata/test-01/platform/sp-02.yaml new file mode 100644 index 0000000..40aaa29 --- /dev/null +++ b/internal/controllers/managedcontrolplane/testdata/test-01/platform/sp-02.yaml @@ -0,0 +1,10 @@ +apiVersion: openmcp.cloud/v1alpha1 +kind: ServiceProvider +metadata: + name: sp-02 +spec: {} +status: + resources: + - group: "" + version: v1 + kind: Secret diff --git a/internal/controllers/provider/controller_test.go b/internal/controllers/provider/controller_test.go index 25e7443..09e5f85 100644 --- a/internal/controllers/provider/controller_test.go +++ b/internal/controllers/provider/controller_test.go @@ -11,6 +11,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" + commonapi "github.com/openmcp-project/openmcp-operator/api/common" apiinstall "github.com/openmcp-project/openmcp-operator/api/install" "github.com/openmcp-project/openmcp-operator/api/provider/v1alpha1" "github.com/openmcp-project/openmcp-operator/internal/controllers/provider/install" @@ -21,7 +22,7 @@ var _ = Describe("Deployment Controller", func() { Context("Reconcile", func() { var ( - scheme = apiinstall.InstallOperatorAPIs(runtime.NewScheme()) + scheme = apiinstall.InstallOperatorAPIsPlatform(runtime.NewScheme()) environment = "test-environment" systemNamespace = "openmcp-system" ) @@ -147,7 +148,7 @@ var _ = Describe("Deployment Controller", func() { It("should convert a deploymentSpec into an unstructured and back", func() { spec := &v1alpha1.DeploymentSpec{ Image: "test-image:v0.1.0", - ImagePullSecrets: []v1alpha1.ObjectReference{ + ImagePullSecrets: []commonapi.LocalObjectReference{ {Name: "test-secret-1"}, {Name: "test-secret-2"}, }, diff --git a/internal/controllers/provider/install/deployment.go b/internal/controllers/provider/install/deployment.go index 96fd311..f251bdd 100644 --- a/internal/controllers/provider/install/deployment.go +++ b/internal/controllers/provider/install/deployment.go @@ -92,7 +92,7 @@ func (m *deploymentMutator) Mutate(d *appsv1.Deployment) error { } // Set the provider as owner of the deployment, so that the provider controller gets an event if the deployment changes. - if err := controllerutil.SetControllerReference(m.values.provider, d, install.InstallOperatorAPIs(runtime.NewScheme())); err != nil { + if err := controllerutil.SetControllerReference(m.values.provider, d, install.InstallOperatorAPIsPlatform(runtime.NewScheme())); err != nil { return fmt.Errorf("failed to set deployment controller as owner of deployment: %w", err) } diff --git a/internal/controllers/provider/install/job.go b/internal/controllers/provider/install/job.go index bc76f35..97943f9 100644 --- a/internal/controllers/provider/install/job.go +++ b/internal/controllers/provider/install/job.go @@ -84,17 +84,19 @@ func (m *jobMutator) Mutate(j *v1.Job) error { ImagePullPolicy: corev1.PullIfNotPresent, Args: initCmd, Env: env, + VolumeMounts: m.values.deploymentSpec.ExtraVolumeMounts, }, }, ServiceAccountName: m.values.NamespacedResourceName(initPrefix), ImagePullSecrets: m.values.ImagePullSecrets(), RestartPolicy: corev1.RestartPolicyNever, + Volumes: m.values.deploymentSpec.ExtraVolumes, }, }, } // Set the provider as owner of the job, so that the provider controller gets an event if the job changes. - if err := controllerutil.SetControllerReference(m.values.provider, j, install.InstallOperatorAPIs(runtime.NewScheme())); err != nil { + if err := controllerutil.SetControllerReference(m.values.provider, j, install.InstallOperatorAPIsPlatform(runtime.NewScheme())); err != nil { return fmt.Errorf("failed to set deployment controller as owner of init job: %w", err) } diff --git a/internal/controllers/scheduler/controller_test.go b/internal/controllers/scheduler/controller_test.go index cb66866..fc6c9dc 100644 --- a/internal/controllers/scheduler/controller_test.go +++ b/internal/controllers/scheduler/controller_test.go @@ -24,7 +24,7 @@ import ( "github.com/openmcp-project/openmcp-operator/internal/controllers/scheduler" ) -var scheme = install.InstallOperatorAPIs(runtime.NewScheme()) +var scheme = install.InstallOperatorAPIsPlatform(runtime.NewScheme()) // defaultTestSetup initializes a new environment for testing the scheduler controller. // Expected folder structure is a 'config.yaml' file next to a folder named 'cluster' containing the manifests.