diff --git a/Makefile b/Makefile index e298476c..130dc91b 100644 --- a/Makefile +++ b/Makefile @@ -127,6 +127,12 @@ build-installer: manifests generate kustomize ## Generate a consolidated YAML wi cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} $(KUSTOMIZE) build config/default > dist/install.yaml +KIND_CLUSTER_NAME ?= kind + +.PHONY: kind-load +kind-load: ## Loads the docker image into a local kind cluster. + kind load docker-image ${IMG} --name "$(KIND_CLUSTER_NAME)" + ##@ Deployment ifndef ignore-not-found @@ -134,20 +140,20 @@ ifndef ignore-not-found endif .PHONY: install -install: kubectl kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. +install: kubectl kustomize ## Install CRDs into the K8s cluster specified by $KUBECONFIG. $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - .PHONY: uninstall -uninstall: kubectl kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. +uninstall: kubectl kustomize ## Uninstall CRDs from the K8s cluster specified by $KUBECONFIG. Call with ignore-not-found=true to ignore resource not found errors during deletion. $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - .PHONY: deploy -deploy: kubectl kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. +deploy: kubectl kustomize ## Deploy controller to the K8s cluster specified by $KUBECONFIG. cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - .PHONY: undeploy -undeploy: kubectl kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. +undeploy: kubectl kustomize ## Undeploy controller from the K8s cluster specified by $KUBECONFIG. Call with ignore-not-found=true to ignore resource not found errors during deletion. $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - ##@ Dependencies diff --git a/cmd/main.go b/cmd/main.go index a3220668..aa70ce69 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -39,6 +39,7 @@ import ( "github.com/kcp-dev/kcp-operator/internal/controller/cacheserver" "github.com/kcp-dev/kcp-operator/internal/controller/frontproxy" "github.com/kcp-dev/kcp-operator/internal/controller/kubeconfig" + kubeconfigrbac "github.com/kcp-dev/kcp-operator/internal/controller/kubeconfig-rbac" "github.com/kcp-dev/kcp-operator/internal/controller/rootshard" "github.com/kcp-dev/kcp-operator/internal/controller/shard" "github.com/kcp-dev/kcp-operator/internal/reconciling" @@ -188,6 +189,13 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "Kubeconfig") os.Exit(1) } + if err = (&kubeconfigrbac.KubeconfigRBACReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "KubeconfigRBAC") + os.Exit(1) + } // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/bases/operator.kcp.io_frontproxies.yaml b/config/crd/bases/operator.kcp.io_frontproxies.yaml index fe08f207..e0f7bf10 100644 --- a/config/crd/bases/operator.kcp.io_frontproxies.yaml +++ b/config/crd/bases/operator.kcp.io_frontproxies.yaml @@ -210,7 +210,7 @@ spec: DNS names determined automatically by the kcp-operator. If DNSNames is used together with IssuerRef, DNSNames will be uses as-is and not merged. If IssuerRef is not set, DNSNames will be merged with the defaults. This is to avoid - trying to guess what DNSNames configued issuer might support. + trying to guess what DNSNames configured issuer might support. items: type: string type: array diff --git a/config/crd/bases/operator.kcp.io_kubeconfigs.yaml b/config/crd/bases/operator.kcp.io_kubeconfigs.yaml index f7c0dff9..66d5c3b6 100644 --- a/config/crd/bases/operator.kcp.io_kubeconfigs.yaml +++ b/config/crd/bases/operator.kcp.io_kubeconfigs.yaml @@ -49,6 +49,27 @@ spec: spec: description: KubeconfigSpec defines the desired state of Kubeconfig. properties: + authorization: + description: Authorization allows to provision permissions for this + kubeconfig. + properties: + clusterRoleBindings: + properties: + cluster: + description: Cluster can be either a cluster name or a workspace + path. + type: string + clusterRoles: + items: + type: string + type: array + required: + - cluster + - clusterRoles + type: object + required: + - clusterRoleBindings + type: object certificateTemplate: description: |- CertificateTemplate allows to customize the properties on the generated @@ -77,7 +98,7 @@ spec: DNS names determined automatically by the kcp-operator. If DNSNames is used together with IssuerRef, DNSNames will be uses as-is and not merged. If IssuerRef is not set, DNSNames will be merged with the defaults. This is to avoid - trying to guess what DNSNames configued issuer might support. + trying to guess what DNSNames configured issuer might support. items: type: string type: array @@ -350,6 +371,13 @@ spec: status: description: KubeconfigStatus defines the observed state of Kubeconfig properties: + authorization: + properties: + provisionedCluster: + type: string + required: + - provisionedCluster + type: object conditions: items: description: Condition contains details for one aspect of the current diff --git a/config/crd/bases/operator.kcp.io_rootshards.yaml b/config/crd/bases/operator.kcp.io_rootshards.yaml index c4867b1d..e71fa7f3 100644 --- a/config/crd/bases/operator.kcp.io_rootshards.yaml +++ b/config/crd/bases/operator.kcp.io_rootshards.yaml @@ -296,7 +296,7 @@ spec: DNS names determined automatically by the kcp-operator. If DNSNames is used together with IssuerRef, DNSNames will be uses as-is and not merged. If IssuerRef is not set, DNSNames will be merged with the defaults. This is to avoid - trying to guess what DNSNames configued issuer might support. + trying to guess what DNSNames configured issuer might support. items: type: string type: array @@ -1766,7 +1766,7 @@ spec: DNS names determined automatically by the kcp-operator. If DNSNames is used together with IssuerRef, DNSNames will be uses as-is and not merged. If IssuerRef is not set, DNSNames will be merged with the defaults. This is to avoid - trying to guess what DNSNames configued issuer might support. + trying to guess what DNSNames configured issuer might support. items: type: string type: array diff --git a/config/crd/bases/operator.kcp.io_shards.yaml b/config/crd/bases/operator.kcp.io_shards.yaml index 824c62d8..e650267c 100644 --- a/config/crd/bases/operator.kcp.io_shards.yaml +++ b/config/crd/bases/operator.kcp.io_shards.yaml @@ -280,7 +280,7 @@ spec: DNS names determined automatically by the kcp-operator. If DNSNames is used together with IssuerRef, DNSNames will be uses as-is and not merged. If IssuerRef is not set, DNSNames will be merged with the defaults. This is to avoid - trying to guess what DNSNames configued issuer might support. + trying to guess what DNSNames configured issuer might support. items: type: string type: array diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index c54dbc1d..88133df2 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -5,3 +5,4 @@ resources: images: - name: controller newName: ghcr.io/kcp-dev/kcp-operator + newTag: e2e diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index f301f103..3551ef01 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -50,19 +50,15 @@ spec: # - linux securityContext: runAsNonRoot: true - # TODO(user): For common cases that do not require escalating privileges - # it is recommended to ensure that all your Pods/Containers are restrictive. - # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted - # Please uncomment the following code if your project does NOT have to work on old Kubernetes - # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). - # seccompProfile: - # type: RuntimeDefault + seccompProfile: + type: RuntimeDefault containers: - command: - /manager args: - --leader-elect - --health-probe-bind-address=:8081 + - --zap-time-encoding=iso8601 image: controller:latest name: manager securityContext: diff --git a/docs/content/architecture/.pages b/docs/content/architecture/.pages index 51b5579d..9220bd9f 100644 --- a/docs/content/architecture/.pages +++ b/docs/content/architecture/.pages @@ -2,4 +2,5 @@ nav: - index.md - basics.md - front-proxy.md + - kubeconfig.md - Certificate Management: pki.md diff --git a/docs/content/architecture/index.md b/docs/content/architecture/index.md index e6bb2ba4..c6c9fe1d 100644 --- a/docs/content/architecture/index.md +++ b/docs/content/architecture/index.md @@ -7,7 +7,7 @@ This section describes how the kcp-operator is designed and meant to be used. - [Basics](basics.md) – A general overview over the resources provided by the kcp-operator. - [front-proxy](front-proxy.md) – Explains how the kcp front-proxy can be used to ingest traffic. - [Certificate Management](pki.md) – This page describes the various CAs and certificates used in a kcp installation. +- [Kubeconfig](kubeconfig.md) – Shows how `Kubeconfig` objects can be used to provide credentials to kcp. diff --git a/docs/content/architecture/kubeconfig.md b/docs/content/architecture/kubeconfig.md new file mode 100644 index 00000000..74f541c7 --- /dev/null +++ b/docs/content/architecture/kubeconfig.md @@ -0,0 +1,84 @@ +--- +description: > + Shows how `Kubeconfig` objects can be used to provide credentials to kcp. +--- + +# Kubeconfigs + +Besides provisioning kcp itself, the kcp-operator can also provide [kubeconfigs](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) to access kcp. Each kubeconfig will internally be backed by a dedicated client certificate. + +## Basics + +A minimal `Kubeconfig` object typically looks like this: + +```yaml +apiVersion: operator.kcp.io/v1alpha1 +kind: Kubeconfig +metadata: + name: susan + namespace: my-kcp +spec: + # Required: the username inside Kubernetes; + # this will be the client certificate's common name. + username: susan + + # required: groups to attach to the user; + # this will be the organizations in the client cert. + groups: + - system:kcp:admin + + # Required: in what Secret the generated kubeconfig should be stored. + secretRef: + name: susan-kubeconfig + + # Required: a Kubeconfig must target either a FrontProxy, Shard or RootShard. + target: + frontProxyRef: + name: my-front-proxy + + # Required: how long the certificate should be valid for; + # the operator will automatically renew the certificate, after which the + # Secret will be renewed and have to be re-downloaded. + validity: 8766h +``` + +`Kubeconfig` objects must exist in the same namespace as the kcp installation they are meant for. + +Once the `Kubeconfig` has been created, you can observe its status to wait for it to be ready. After that, retrieve the Secret mentioned in the `secretRef` to find the finished kubeconfig, ready to use. + +!!! warning + Deleting a `Kubeconfig` will also delete the underlying Secret from the hosting cluster, however this will not invalidate the existing certificate that is embedded in the kubeconfig. This means anyone with a copy of the kubeconfig can keep using it until the certificate expires. + + To disarm an old kubeconfig, make sure to revoke any permissions granted through RBAC for the user and/or their groups. + +!!! note + The `Kubeconfig`'s name is embedded into the certificate in form of a group (organization) named `kubeconfig:`. This is to allow a unique mapping from RBAC rules to `Kubeconfig` objects for the authorization (see further down). Take note that this means the `Kubeconfig`' name is leaked to whoever gets the kubeconfig. + +## Authorization + +Without any further configuration than shown in the basics section above, the created identity (username + groups) will not get any permissions in kcp. So while the kubeconfig is valid and allows proper authentication, pretty much no actions will be permitted yet. + +The administrator has to either rely on externally-managed RBAC rules to provide permissions, or use the kcp-operator to provision such RBAC in a workspace. + +To make the kcp-operator manage RBAC, use `spec.authorization` inside a `Kubeconfig`: + +```yaml +apiVersion: operator.kcp.io/v1alpha1 +kind: Kubeconfig +metadata: + name: susan + namespace: my-kcp +spec: + #...snip... + + authorization: + clusterRoleBindings: + # This can be a workspace path (root:something) or a cluster name (ID). + cluster: root:initech:teamgibbons + clusterRoles: + - cluster-admin +``` + +This configuration would bind the group `kubeconfig:susan` to the ClusterRole `cluster-admin` inside the given workspace. Note that this is specifically not bound to the user (common name), so that two `Kubeconfig` objects that both have the same `spec.name` to not have colliding RBAC. + +When deleting a `Kubeconfig` with authorization settings, the kcp-operator will first unprovision (delete) the `ClusterRoleBindings` before the `Kubeconfig` can be deleted. diff --git a/docs/content/contributing/local-setup.md b/docs/content/contributing/local-setup.md index 457b8c58..9a6fab95 100644 --- a/docs/content/contributing/local-setup.md +++ b/docs/content/contributing/local-setup.md @@ -58,19 +58,20 @@ run the operator as a binary. Build the image: ```sh -make docker-build IMG=ghcr.io/kcp-dev/kcp-operator:1 +export IMG=ghcr.io/kcp-dev/kcp-operator:local +make docker-build ``` Load the image into the kind cluster: ```sh -kind load docker-image ghcr.io/kcp-dev/kcp-operator:1 +kind load docker-image "$IMG" ``` Deploy the operator manifests into the cluster: ```sh -make deploy IMG=ghcr.io/kcp-dev/kcp-operator:1 +make deploy ``` ### Option 2: Run Operator Directly @@ -87,12 +88,12 @@ Then start the operator via `go run`: go run ./cmd/main.go ``` -## Create kcp Instance +## Create kcp Instance Now you can create a root shard: ```sh -kubectl apply -f config/samples/operator.kcp.io_v1alpha1_rootshard.yaml +kubectl apply -f config/samples/operator.kcp.io_v1alpha1_rootshard.yaml ``` Create the additional shard: diff --git a/go.mod b/go.mod index 46fc179b..3b37b74a 100644 --- a/go.mod +++ b/go.mod @@ -36,6 +36,7 @@ require ( github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/egymgmbh/go-prefix-writer v0.0.0-20180609083313-7326ea162eca // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/color v1.17.0 // indirect diff --git a/go.sum b/go.sum index 2731ba1a..e2e7b7d1 100644 --- a/go.sum +++ b/go.sum @@ -21,6 +21,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/egymgmbh/go-prefix-writer v0.0.0-20180609083313-7326ea162eca h1:7oodhZp9MZW0DBkrZXyUsJWKQFy35SVxjZ8K4vHXnk8= +github.com/egymgmbh/go-prefix-writer v0.0.0-20180609083313-7326ea162eca/go.mod h1:UhMFM+dnOcm1f0Pve8uqRaxAhEYki+/CuA2BTDp2T04= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= diff --git a/hack/run-e2e-tests.sh b/hack/run-e2e-tests.sh index 5d6bf532..dffbcb73 100755 --- a/hack/run-e2e-tests.sh +++ b/hack/run-e2e-tests.sh @@ -16,7 +16,7 @@ set -euo pipefail -KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-e2e}" +export KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-e2e}" DATA_DIR=".e2e-$KIND_CLUSTER_NAME" OPERATOR_PID=0 PROTOKOL_PID=0 @@ -35,12 +35,6 @@ kind create cluster --name "$KIND_CLUSTER_NAME" chmod 600 "$KUBECONFIG" teardown_kind() { - if [[ $OPERATOR_PID -gt 0 ]]; then - echo "Stopping kcp-operator..." - kill -TERM $OPERATOR_PID - wait $OPERATOR_PID - fi - if [[ $PROTOKOL_PID -gt 0 ]]; then echo "Stopping protokol..." kill -TERM $PROTOKOL_PID @@ -59,7 +53,7 @@ fi echo "Kubeconfig is in $KUBECONFIG." # apply kernel limits job first and wait for completion -echo "Applying kernel limits job…" +echo "Applying kernel limits job..." kubectl apply --filename hack/ci/kernel.yaml kubectl wait --for=condition=Complete job/kernel-limits --timeout=300s echo "Kernel limits job completed." @@ -85,20 +79,13 @@ _tools/helm upgrade \ kubectl apply --filename hack/ci/testdata/clusterissuer.yaml -# start the operator locally -echo "Starting kcp-operator..." -_build/manager \ - -kubeconfig "$KUBECONFIG" \ - -zap-log-level debug \ - -zap-encoder console \ - -zap-time-encoding iso8601 \ - -health-probe-bind-address="" \ - >"$DATA_DIR/kcp-operator.log" 2>&1 & -OPERATOR_PID=$! -echo "Running as process $OPERATOR_PID." +# build operator image and deploy it into kind +echo "Building and deploying kcp-operator..." +export IMG="ghcr.io/kcp-dev/kcp-operator:e2e" +make --no-print-directory docker-build kind-load deploy if command -v protokol &> /dev/null; then - protokol --namespace 'e2e-*' --output "$DATA_DIR/kind-logs" 2>/dev/null & + protokol --namespace 'e2e-*' --namespace kcp-operator-system --output "$DATA_DIR/kind-logs" 2>/dev/null & PROTOKOL_PID=$! else echo "Install https://codeberg.org/xrstf/protokol to automatically" diff --git a/internal/client/clients.go b/internal/client/clients.go new file mode 100644 index 00000000..305966f9 --- /dev/null +++ b/internal/client/clients.go @@ -0,0 +1,140 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + + "github.com/kcp-dev/logicalcluster/v3" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kcp-dev/kcp-operator/internal/resources" + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" +) + +func NewRootShardClient(ctx context.Context, c ctrlruntimeclient.Client, rootShard *operatorv1alpha1.RootShard, cluster logicalcluster.Name, scheme *runtime.Scheme) (ctrlruntimeclient.Client, error) { + baseUrl := fmt.Sprintf("https://%s.%s.svc.cluster.local:6443", resources.GetRootShardServiceName(rootShard), rootShard.Namespace) + + if !cluster.Empty() { + baseUrl = fmt.Sprintf("%s/clusters/%s", baseUrl, cluster.String()) + } + + return newClient(ctx, c, baseUrl, scheme, rootShard, nil, nil) +} + +func NewRootShardProxyClient(ctx context.Context, c ctrlruntimeclient.Client, rootShard *operatorv1alpha1.RootShard, cluster logicalcluster.Name, scheme *runtime.Scheme) (ctrlruntimeclient.Client, error) { + baseUrl := fmt.Sprintf("https://%s.%s.svc.cluster.local:6443", resources.GetRootShardProxyServiceName(rootShard), rootShard.Namespace) + + if !cluster.Empty() { + baseUrl = fmt.Sprintf("%s/clusters/%s", baseUrl, cluster.String()) + } + + return newClient(ctx, c, baseUrl, scheme, rootShard, nil, nil) +} + +func NewShardClient(ctx context.Context, c ctrlruntimeclient.Client, shard *operatorv1alpha1.Shard, cluster logicalcluster.Name, scheme *runtime.Scheme) (ctrlruntimeclient.Client, error) { + baseUrl := fmt.Sprintf("https://%s.%s.svc.cluster.local:6443", resources.GetShardServiceName(shard), shard.Namespace) + + if !cluster.Empty() { + baseUrl = fmt.Sprintf("%s/clusters/%s", baseUrl, cluster.String()) + } + + return newClient(ctx, c, baseUrl, scheme, nil, shard, nil) +} + +func newClient( + ctx context.Context, + c ctrlruntimeclient.Client, + url string, + scheme *runtime.Scheme, + // only one of these three should be provided, the others nil + rootShard *operatorv1alpha1.RootShard, + shard *operatorv1alpha1.Shard, + frontProxy *operatorv1alpha1.FrontProxy, +) (ctrlruntimeclient.Client, error) { + tlsConfig, err := getTLSConfig(ctx, c, rootShard, shard, frontProxy) + if err != nil { + return nil, fmt.Errorf("failed to determine TLS settings: %w", err) + } + + cfg := &rest.Config{ + Host: url, + TLSClientConfig: tlsConfig, + } + + return ctrlruntimeclient.New(cfg, ctrlruntimeclient.Options{Scheme: scheme}) +} + +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get + +func getTLSConfig(ctx context.Context, c ctrlruntimeclient.Client, rootShard *operatorv1alpha1.RootShard, shard *operatorv1alpha1.Shard, frontProxy *operatorv1alpha1.FrontProxy) (rest.TLSClientConfig, error) { + rootShard, err := getRootShard(ctx, c, rootShard, shard, frontProxy) + if err != nil { + return rest.TLSClientConfig{}, fmt.Errorf("failed to determine effective RootShard: %w", err) + } + + // get the secret for the kcp-operator client cert + key := types.NamespacedName{ + Namespace: rootShard.Namespace, + Name: resources.GetRootShardCertificateName(rootShard, operatorv1alpha1.OperatorCertificate), + } + + certSecret := &corev1.Secret{} + if err := c.Get(ctx, key, certSecret); err != nil { + return rest.TLSClientConfig{}, fmt.Errorf("failed to get root shard proxy Secret: %w", err) + } + + return rest.TLSClientConfig{ + CAData: certSecret.Data["ca.crt"], + CertData: certSecret.Data["tls.crt"], + KeyData: certSecret.Data["tls.key"], + }, nil +} + +// +kubebuilder:rbac:groups=operator.kcp.io,resources=rootshards,verbs=get + +func getRootShard(ctx context.Context, c ctrlruntimeclient.Client, rootShard *operatorv1alpha1.RootShard, shard *operatorv1alpha1.Shard, frontProxy *operatorv1alpha1.FrontProxy) (*operatorv1alpha1.RootShard, error) { + if rootShard != nil { + return rootShard, nil + } + + var ref *corev1.LocalObjectReference + + switch { + case shard != nil: + ref = shard.Spec.RootShard.Reference + + case frontProxy != nil: + ref = frontProxy.Spec.RootShard.Reference + + default: + panic("Must be called with either RootShard, Shard or FrontProxy.") + } + + rootShard = &operatorv1alpha1.RootShard{} + if err := c.Get(ctx, types.NamespacedName{Namespace: rootShard.Namespace, Name: ref.Name}, rootShard); err != nil { + return nil, fmt.Errorf("failed to get RootShard: %w", err) + } + + return rootShard, nil +} diff --git a/internal/client/frontproxy.go b/internal/client/frontproxy.go new file mode 100644 index 00000000..23534aa5 --- /dev/null +++ b/internal/client/frontproxy.go @@ -0,0 +1,71 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "errors" + "fmt" + + "github.com/kcp-dev/logicalcluster/v3" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" +) + +// +kubebuilder:rbac:groups=operator.kcp.io,resources=rootshards;shards;frontproxies,verbs=get + +func NewInternalKubeconfigClient(ctx context.Context, c ctrlruntimeclient.Client, kubeconfig *operatorv1alpha1.Kubeconfig, cluster logicalcluster.Name, scheme *runtime.Scheme) (ctrlruntimeclient.Client, error) { + target := kubeconfig.Spec.Target + + switch { + case target.RootShardRef != nil: + rootShard := &operatorv1alpha1.RootShard{} + if err := c.Get(ctx, types.NamespacedName{Name: target.RootShardRef.Name, Namespace: kubeconfig.Namespace}, rootShard); err != nil { + return nil, fmt.Errorf("failed to get RootShard: %w", err) + } + + return NewRootShardClient(ctx, c, rootShard, cluster, scheme) + + case target.ShardRef != nil: + shard := &operatorv1alpha1.Shard{} + if err := c.Get(ctx, types.NamespacedName{Name: target.ShardRef.Name, Namespace: kubeconfig.Namespace}, shard); err != nil { + return nil, fmt.Errorf("failed to get Shard: %w", err) + } + + return NewShardClient(ctx, c, shard, cluster, scheme) + + case target.FrontProxyRef != nil: + frontProxy := &operatorv1alpha1.FrontProxy{} + if err := c.Get(ctx, types.NamespacedName{Name: target.FrontProxyRef.Name, Namespace: kubeconfig.Namespace}, frontProxy); err != nil { + return nil, fmt.Errorf("failed to get FrontProxy: %w", err) + } + + rootShard := &operatorv1alpha1.RootShard{} + if err := c.Get(ctx, types.NamespacedName{Name: frontProxy.Spec.RootShard.Reference.Name, Namespace: kubeconfig.Namespace}, rootShard); err != nil { + return nil, fmt.Errorf("failed to get RootShard: %w", err) + } + + return NewRootShardProxyClient(ctx, c, rootShard, cluster, scheme) + + default: + return nil, errors.New("no valid target configured in Kubeconfig: neither rootShard, shard nor frontProxy ref set") + } +} diff --git a/internal/controller/cacheserver/controller.go b/internal/controller/cacheserver/controller.go index 8c95b636..23936eb9 100644 --- a/internal/controller/cacheserver/controller.go +++ b/internal/controller/cacheserver/controller.go @@ -57,6 +57,7 @@ func (r *CacheServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) // SetupWithManager sets up the controller with the Manager. func (r *CacheServerReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). + Named("cacheserver"). For(&operatorv1alpha1.CacheServer{}). Complete(r) } diff --git a/internal/controller/frontproxy/controller.go b/internal/controller/frontproxy/controller.go index 84f663cd..d833e6df 100644 --- a/internal/controller/frontproxy/controller.go +++ b/internal/controller/frontproxy/controller.go @@ -71,6 +71,7 @@ func (r *FrontProxyReconciler) SetupWithManager(mgr ctrl.Manager) error { }) return ctrl.NewControllerManagedBy(mgr). + Named("frontproxy"). For(&operatorv1alpha1.FrontProxy{}). Owns(&appsv1.Deployment{}). Owns(&corev1.ConfigMap{}). diff --git a/internal/controller/kubeconfig-rbac/controller.go b/internal/controller/kubeconfig-rbac/controller.go new file mode 100644 index 00000000..f50fe545 --- /dev/null +++ b/internal/controller/kubeconfig-rbac/controller.go @@ -0,0 +1,286 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfigrbac + +import ( + "context" + "fmt" + "slices" + + "github.com/kcp-dev/logicalcluster/v3" + "k8c.io/reconciler/pkg/reconciling" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + ctrl "sigs.k8s.io/controller-runtime" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/kcp-dev/kcp-operator/internal/client" + "github.com/kcp-dev/kcp-operator/internal/resources/kubeconfig" + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" +) + +const cleanupFinalizer = "operator.kcp.io/cleanup-rbac" + +// KubeconfigRBACReconciler reconciles a Kubeconfig object +type KubeconfigRBACReconciler struct { + ctrlruntimeclient.Client + Scheme *runtime.Scheme +} + +// SetupWithManager sets up the controller with the Manager. +func (r *KubeconfigRBACReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&operatorv1alpha1.Kubeconfig{}). + Named("kubeconfig-rbac"). + Complete(r) +} + +// +kubebuilder:rbac:groups=operator.kcp.io,resources=kubeconfigs,verbs=get;update;patch +// +kubebuilder:rbac:groups=operator.kcp.io,resources=kubeconfigs/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=operator.kcp.io,resources=kubeconfigs/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *KubeconfigRBACReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + logger.V(4).Info("Reconciling") + + config := &operatorv1alpha1.Kubeconfig{} + if err := r.Get(ctx, req.NamespacedName, config); err != nil { + return ctrl.Result{}, ctrlruntimeclient.IgnoreNotFound(err) + } + + err := r.reconcile(ctx, config) + + return ctrl.Result{}, err +} + +func (r *KubeconfigRBACReconciler) reconcile(ctx context.Context, config *operatorv1alpha1.Kubeconfig) error { + if config.DeletionTimestamp != nil { + return r.handleDeletion(ctx, config) + } + + var ( + oldCluster, newCluster string + ) + + if auth := config.Status.Authorization; auth != nil { + oldCluster = auth.ProvisionedCluster + } + if auth := config.Spec.Authorization; auth != nil { + newCluster = auth.ClusterRoleBindings.Cluster + } + + // All `return nil` here are because the Kubeconfig has been modified and will be requeued anyway. + + // If there was something provisioned, but the spec changed, we have to unprovision first. + if oldCluster != "" && newCluster != oldCluster { + if err := r.unprovisionCluster(ctx, config); err != nil { + return err + } + + return nil + } + + // If nothing is configured (anymore), allwe have to do is get rid of the finalizer + if newCluster == "" { + if err := r.removeFinalizer(ctx, config); err != nil { + return fmt.Errorf("failed to remove cleanup finalizer: %w", err) + } + + return nil + } + + // Otherwise we ensure the finalizer exists, because we will soon ensure the bindings. + if updated, err := r.ensureFinalizer(ctx, config); err != nil { + return fmt.Errorf("failed to ensure cleanup finalizer: %w", err) + } else if updated { + return nil + } + + // Before we actually create anything, remember the cluster so if something happens, + // we can properly cleanup any leftovers. + if updated, err := r.patchProvisionedCluster(ctx, config, newCluster); err != nil { + return fmt.Errorf("failed to update status: %w", err) + } else if updated { + return nil + } + + // Make sure whatever is in the workspace matches what is configured in the Kubeconfig + if err := r.reconcileBindings(ctx, config); err != nil { + return fmt.Errorf("failed to ensure ClusterRoleBindings: %w", err) + } + + return nil +} + +func (r *KubeconfigRBACReconciler) reconcileBindings(ctx context.Context, kc *operatorv1alpha1.Kubeconfig) error { + targetClient, err := client.NewInternalKubeconfigClient(ctx, r.Client, kc, logicalcluster.Name(kc.Spec.Authorization.ClusterRoleBindings.Cluster), nil) + if err != nil { + return fmt.Errorf("failed to create client to kubeconfig target: %w", err) + } + + // find all existing bindings + ownerLabels := kubeconfig.OwnerLabels(kc) + crbList := &rbacv1.ClusterRoleBindingList{} + if err := targetClient.List(ctx, crbList, ctrlruntimeclient.MatchingLabels(ownerLabels)); err != nil { + return fmt.Errorf("failed to list existing ClusterRoleBindings: %w", err) + } + + // delete those not configured in the kubeconfig anymore + var desiredBindings sets.Set[string] + if a := kc.Spec.Authorization; a != nil { + desiredBindings = sets.New(a.ClusterRoleBindings.ClusterRoles...) + } + + logger := log.FromContext(ctx) + + for _, crb := range crbList.Items { + roleName := crb.RoleRef.Name + + if !desiredBindings.Has(roleName) { + logger.V(2).WithValues("name", crb.Name, "clusterrole", roleName).Info("Deleting overhanging ClusterRoleBinding") + + if err := targetClient.Delete(ctx, &crb); err != nil { + return fmt.Errorf("failed to delete overhanging ClusterRoleBinding %s: %w", crb.Name, err) + } + } + } + + // create reconcilers for each intended binding + subject := rbacv1.Subject{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Group", + Name: kubeconfig.KubeconfigGroup(kc), + } + + reconcilers := make([]reconciling.NamedClusterRoleBindingReconcilerFactory, 0, desiredBindings.Len()) + for _, roleName := range sets.List(desiredBindings) { + reconcilers = append(reconcilers, kubeconfig.ClusterRoleBindingReconciler(kc, roleName, subject)) + } + + if err := reconciling.ReconcileClusterRoleBindings(ctx, reconcilers, "", targetClient); err != nil { + return fmt.Errorf("failed to ensure ClusterRoleBindings: %w", err) + } + + return nil +} + +func (r *KubeconfigRBACReconciler) handleDeletion(ctx context.Context, kc *operatorv1alpha1.Kubeconfig) error { + // Did we already perform our cleanup or did this kubeconfig never have any bindings? + if !slices.Contains(kc.Finalizers, cleanupFinalizer) { + return nil + } + + if err := r.unprovisionCluster(ctx, kc); err != nil { + return err + } + + // when all are gone, remove the finalizer + if err := r.removeFinalizer(ctx, kc); err != nil { + return fmt.Errorf("failed to remove cleanup finalizer: %w", err) + } + + return nil +} + +func (r *KubeconfigRBACReconciler) unprovisionCluster(ctx context.Context, kc *operatorv1alpha1.Kubeconfig) error { + cluster := kc.Status.Authorization.ProvisionedCluster + if cluster == "" { + return nil + } + + targetClient, err := client.NewInternalKubeconfigClient(ctx, r.Client, kc, logicalcluster.Name(cluster), nil) + if err != nil { + return fmt.Errorf("failed to create client to kubeconfig target: %w", err) + } + + // find all existing bindings + ownerLabels := kubeconfig.OwnerLabels(kc) + crbList := &rbacv1.ClusterRoleBindingList{} + if err := targetClient.List(ctx, crbList, ctrlruntimeclient.MatchingLabels(ownerLabels)); err != nil { + return fmt.Errorf("failed to list existing ClusterRoleBindings: %w", err) + } + + // delete all of them + logger := log.FromContext(ctx) + + for _, crb := range crbList.Items { + logger.V(2).WithValues("name", crb.Name).Info("Deleting ClusterRoleBinding") + + if err := targetClient.Delete(ctx, &crb); err != nil { + return fmt.Errorf("failed to delete ClusterRoleBinding %s: %w", crb.Name, err) + } + } + + // clean status + if _, err := r.patchProvisionedCluster(ctx, kc, ""); err != nil { + return fmt.Errorf("failed to finish unprovisioning: %w", err) + } + + return nil +} + +func (r *KubeconfigRBACReconciler) patchProvisionedCluster(ctx context.Context, kc *operatorv1alpha1.Kubeconfig, newValue string) (updated bool, err error) { + if auth := kc.Status.Authorization; auth != nil && auth.ProvisionedCluster == newValue { + return false, nil + } + + oldKubeconfig := kc.DeepCopy() + + if kc.Status.Authorization == nil { + kc.Status.Authorization = &operatorv1alpha1.KubeconfigAuthorizationStatus{} + } + kc.Status.Authorization.ProvisionedCluster = newValue + + return true, r.Status().Patch(ctx, kc, ctrlruntimeclient.MergeFrom(oldKubeconfig)) +} + +func (r *KubeconfigRBACReconciler) ensureFinalizer(ctx context.Context, config *operatorv1alpha1.Kubeconfig) (updated bool, err error) { + finalizers := sets.New(config.GetFinalizers()...) + if finalizers.Has(cleanupFinalizer) { + return false, nil + } + + original := config.DeepCopy() + + finalizers.Insert(cleanupFinalizer) + config.SetFinalizers(sets.List(finalizers)) + + if err := r.Patch(ctx, config, ctrlruntimeclient.MergeFrom(original)); err != nil { + return false, err + } + + return true, nil +} + +func (r *KubeconfigRBACReconciler) removeFinalizer(ctx context.Context, config *operatorv1alpha1.Kubeconfig) error { + finalizers := sets.New(config.GetFinalizers()...) + if !finalizers.Has(cleanupFinalizer) { + return nil + } + + original := config.DeepCopy() + + finalizers.Delete(cleanupFinalizer) + config.SetFinalizers(sets.List(finalizers)) + + return r.Patch(ctx, config, ctrlruntimeclient.MergeFrom(original)) +} diff --git a/internal/controller/kubeconfig-rbac/controller_test.go b/internal/controller/kubeconfig-rbac/controller_test.go new file mode 100644 index 00000000..07127ae2 --- /dev/null +++ b/internal/controller/kubeconfig-rbac/controller_test.go @@ -0,0 +1,107 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfigrbac + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntimefakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kcp-dev/kcp-operator/internal/controller/util" + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" +) + +func TestReconciling(t *testing.T) { + const namespace = "kubeconfig-tests" + + testcases := []struct { + name string + rootShard *operatorv1alpha1.RootShard + kubeConfig *operatorv1alpha1.Kubeconfig + }{ + { + name: "vanilla", + rootShard: &operatorv1alpha1.RootShard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rooty", + Namespace: namespace, + }, + Spec: operatorv1alpha1.RootShardSpec{ + External: operatorv1alpha1.ExternalConfig{ + Hostname: "example.kcp.io", + Port: 6443, + }, + CommonShardSpec: operatorv1alpha1.CommonShardSpec{ + Etcd: operatorv1alpha1.EtcdConfig{ + Endpoints: []string{"https://localhost:2379"}, + }, + }, + }, + }, + kubeConfig: &operatorv1alpha1.Kubeconfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "confy", + Namespace: namespace, + }, + Spec: operatorv1alpha1.KubeconfigSpec{ + Validity: metav1.Duration{Duration: 24 * time.Hour}, + SecretRef: corev1.LocalObjectReference{ + Name: "confy-secret", + }, + Target: operatorv1alpha1.KubeconfigTarget{ + RootShardRef: &corev1.LocalObjectReference{ + Name: "rooty", + }, + }, + }, + }, + }, + } + + scheme := util.GetTestScheme() + + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + client := ctrlruntimefakeclient. + NewClientBuilder(). + WithScheme(scheme). + WithStatusSubresource(testcase.rootShard). + WithObjects(testcase.rootShard, testcase.kubeConfig). + Build() + + ctx := context.Background() + + controllerReconciler := &KubeconfigRBACReconciler{ + Client: client, + Scheme: client.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: ctrlruntimeclient.ObjectKeyFromObject(testcase.kubeConfig), + }) + require.NoError(t, err) + }) + } +} diff --git a/internal/controller/kubeconfig/controller.go b/internal/controller/kubeconfig/controller.go index fc6a4059..b7d1c936 100644 --- a/internal/controller/kubeconfig/controller.go +++ b/internal/controller/kubeconfig/controller.go @@ -55,6 +55,7 @@ type KubeconfigReconciler struct { // SetupWithManager sets up the controller with the Manager. func (r *KubeconfigReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). + Named("kubeconfig"). For(&operatorv1alpha1.Kubeconfig{}). Watches(&operatorv1alpha1.RootShard{}, handler.EnqueueRequestsFromMapFunc(r.mapRootShardToKubeconfigs)). Watches(&operatorv1alpha1.Shard{}, handler.EnqueueRequestsFromMapFunc(r.mapShardToKubeconfigs)). diff --git a/internal/controller/rootshard/controller.go b/internal/controller/rootshard/controller.go index fc021b2c..51575a21 100644 --- a/internal/controller/rootshard/controller.go +++ b/internal/controller/rootshard/controller.go @@ -71,6 +71,7 @@ func (r *RootShardReconciler) SetupWithManager(mgr ctrl.Manager) error { }) return ctrl.NewControllerManagedBy(mgr). + Named("rootshard"). For(&operatorv1alpha1.RootShard{}). Owns(&appsv1.Deployment{}). Owns(&corev1.ConfigMap{}). diff --git a/internal/controller/shard/controller.go b/internal/controller/shard/controller.go index 1fd52766..64232b43 100644 --- a/internal/controller/shard/controller.go +++ b/internal/controller/shard/controller.go @@ -73,6 +73,7 @@ func (r *ShardReconciler) SetupWithManager(mgr ctrl.Manager) error { }) return ctrl.NewControllerManagedBy(mgr). + Named("shard"). For(&operatorv1alpha1.Shard{}). Owns(&appsv1.Deployment{}). Owns(&corev1.Secret{}). diff --git a/internal/kubernetes/metadata.go b/internal/kubernetes/metadata.go index 50154ae5..e109bf11 100644 --- a/internal/kubernetes/metadata.go +++ b/internal/kubernetes/metadata.go @@ -24,24 +24,20 @@ import ( func EnsureLabels(o metav1.Object, toEnsure map[string]string) { labels := maps.Clone(o.GetLabels()) - if labels == nil { labels = make(map[string]string) } - for key, value := range toEnsure { - labels[key] = value - } + + maps.Copy(labels, toEnsure) o.SetLabels(labels) } func EnsureAnnotations(o metav1.Object, toEnsure map[string]string) { annotations := maps.Clone(o.GetAnnotations()) - if annotations == nil { annotations = make(map[string]string) } - for key, value := range toEnsure { - annotations[key] = value - } + + maps.Copy(annotations, toEnsure) o.SetAnnotations(annotations) } diff --git a/internal/resources/frontproxy/reconciler.go b/internal/resources/frontproxy/reconciler.go index 3fc4da45..3fae8c92 100644 --- a/internal/resources/frontproxy/reconciler.go +++ b/internal/resources/frontproxy/reconciler.go @@ -55,6 +55,10 @@ func NewRootShardProxy(rootShard *operatorv1alpha1.RootShard) *reconciler { } } +// +kubebuilder:rbac:groups=core,resources=configmaps;secrets;services,verbs=get;update;patch +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;update;patch +// +kubebuilder:rbac:groups=cert-manager.io,resources=certificates,verbs=get;update;patch + func (r *reconciler) Reconcile(ctx context.Context, client ctrlruntimeclient.Client, namespace string) error { var errs []error diff --git a/internal/resources/kubeconfig/certificate.go b/internal/resources/kubeconfig/certificate.go index 14944a40..3e131039 100644 --- a/internal/resources/kubeconfig/certificate.go +++ b/internal/resources/kubeconfig/certificate.go @@ -20,6 +20,8 @@ import ( certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" certmanagermetav1 "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "github.com/kcp-dev/kcp-operator/internal/reconciling" "github.com/kcp-dev/kcp-operator/internal/resources" "github.com/kcp-dev/kcp-operator/internal/resources/utils" @@ -27,6 +29,9 @@ import ( ) func ClientCertificateReconciler(kubeConfig *operatorv1alpha1.Kubeconfig, issuerName string) reconciling.NamedCertificateReconcilerFactory { + orgs := sets.New(kubeConfig.Spec.Groups...) + orgs.Insert(KubeconfigGroup(kubeConfig)) + return func() (string, reconciling.CertificateReconciler) { return kubeConfig.GetCertificateName(), func(cert *certmanagerv1.Certificate) (*certmanagerv1.Certificate, error) { cert.SetLabels(kubeConfig.Labels) @@ -50,7 +55,7 @@ func ClientCertificateReconciler(kubeConfig *operatorv1alpha1.Kubeconfig, issuer CommonName: kubeConfig.Spec.Username, Subject: &certmanagerv1.X509Subject{ - Organizations: kubeConfig.Spec.Groups, + Organizations: sets.List(orgs), }, IssuerRef: certmanagermetav1.ObjectReference{ diff --git a/internal/resources/kubeconfig/rbac.go b/internal/resources/kubeconfig/rbac.go new file mode 100644 index 00000000..ff39bdad --- /dev/null +++ b/internal/resources/kubeconfig/rbac.go @@ -0,0 +1,58 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "fmt" + + "k8c.io/reconciler/pkg/reconciling" + + rbacv1 "k8s.io/api/rbac/v1" + + "github.com/kcp-dev/kcp-operator/internal/kubernetes" + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" +) + +func OwnerLabels(owner *operatorv1alpha1.Kubeconfig) map[string]string { + return map[string]string{ + "operator.kcp.io/kubeconfig": string(owner.UID), + } +} + +func KubeconfigGroup(kc *operatorv1alpha1.Kubeconfig) string { + return fmt.Sprintf("kubeconfig:%s", kc.Name) +} + +func ClusterRoleBindingReconciler(owner *operatorv1alpha1.Kubeconfig, clusterRole string, subject rbacv1.Subject) reconciling.NamedClusterRoleBindingReconcilerFactory { + name := fmt.Sprintf("%s:%s", owner.UID, clusterRole) + + return func() (string, reconciling.ClusterRoleBindingReconciler) { + return name, func(crb *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, error) { + kubernetes.EnsureLabels(crb, OwnerLabels(owner)) + + crb.RoleRef = rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: clusterRole, + } + + crb.Subjects = []rbacv1.Subject{subject} + + return crb, nil + } + } +} diff --git a/internal/resources/utils/certificates.go b/internal/resources/utils/certificates.go index 700f5d69..f7c1766d 100644 --- a/internal/resources/utils/certificates.go +++ b/internal/resources/utils/certificates.go @@ -71,10 +71,10 @@ func applyCertificateSpecTemplate(cert *certmanagerv1.Certificate, tpl *operator return cert } - // If DNSNames is provided in the template and issuer is overrided, + // If DNSNames is provided in the template and issuer is overridden, // it will replace any existing DNSNames. // We don't merge as we don't know if issuer supports our default names. - // Its users responsibility to add them back if needed. + // It's user responsibility to add them back if needed. if len(tpl.DNSNames) > 0 && tpl.IssuerRef != nil { cert.Spec.DNSNames = tpl.DNSNames } else if len(tpl.DNSNames) > 0 { diff --git a/internal/resources/utils/logging_test.go b/internal/resources/utils/logging_test.go index 527a5c08..bb8c6a08 100644 --- a/internal/resources/utils/logging_test.go +++ b/internal/resources/utils/logging_test.go @@ -29,7 +29,7 @@ func TestGetLogLevelArgs(t *testing.T) { expected []string }{ { - name: "no config at alll", + name: "no config at all", logging: nil, expected: []string{}, }, diff --git a/sdk/apis/operator/v1alpha1/common.go b/sdk/apis/operator/v1alpha1/common.go index 1b7d9732..42e24f21 100644 --- a/sdk/apis/operator/v1alpha1/common.go +++ b/sdk/apis/operator/v1alpha1/common.go @@ -146,7 +146,7 @@ type CertificateSpecTemplate struct { // DNS names determined automatically by the kcp-operator. // If DNSNames is used together with IssuerRef, DNSNames will be uses as-is and not merged. // If IssuerRef is not set, DNSNames will be merged with the defaults. This is to avoid - // trying to guess what DNSNames configued issuer might support. + // trying to guess what DNSNames configured issuer might support. // // +optional DNSNames []string `json:"dnsNames,omitempty"` diff --git a/sdk/apis/operator/v1alpha1/kubeconfig_types.go b/sdk/apis/operator/v1alpha1/kubeconfig_types.go index 8937b49c..1797657d 100644 --- a/sdk/apis/operator/v1alpha1/kubeconfig_types.go +++ b/sdk/apis/operator/v1alpha1/kubeconfig_types.go @@ -42,6 +42,9 @@ type KubeconfigSpec struct { // CertificateTemplate allows to customize the properties on the generated // certificate for this kubeconfig. CertificateTemplate *CertificateTemplate `json:"certificateTemplate,omitempty"` + + // Authorization allows to provision permissions for this kubeconfig. + Authorization *KubeconfigAuthorization `json:"authorization,omitempty"` } type KubeconfigTarget struct { @@ -50,6 +53,16 @@ type KubeconfigTarget struct { FrontProxyRef *corev1.LocalObjectReference `json:"frontProxyRef,omitempty"` } +type KubeconfigAuthorization struct { + ClusterRoleBindings KubeconfigClusterRoleBindings `json:"clusterRoleBindings"` +} + +type KubeconfigClusterRoleBindings struct { + // Cluster can be either a cluster name or a workspace path. + Cluster string `json:"cluster"` + ClusterRoles []string `json:"clusterRoles"` +} + type KubeconfigPhase string const ( @@ -66,11 +79,17 @@ type KubeconfigStatus struct { // TargetName represents the name of the target resource (RootShard, Shard, or FrontProxy). TargetName string `json:"targetName,omitempty"` + Authorization *KubeconfigAuthorizationStatus `json:"authorization,omitempty"` + // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty"` } +type KubeconfigAuthorizationStatus struct { + ProvisionedCluster string `json:"provisionedCluster"` +} + // +genclient // +kubebuilder:object:root=true // +kubebuilder:subresource:status diff --git a/sdk/apis/operator/v1alpha1/zz_generated.deepcopy.go b/sdk/apis/operator/v1alpha1/zz_generated.deepcopy.go index faa20522..2e407c5e 100644 --- a/sdk/apis/operator/v1alpha1/zz_generated.deepcopy.go +++ b/sdk/apis/operator/v1alpha1/zz_generated.deepcopy.go @@ -889,6 +889,57 @@ func (in *Kubeconfig) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeconfigAuthorization) DeepCopyInto(out *KubeconfigAuthorization) { + *out = *in + in.ClusterRoleBindings.DeepCopyInto(&out.ClusterRoleBindings) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigAuthorization. +func (in *KubeconfigAuthorization) DeepCopy() *KubeconfigAuthorization { + if in == nil { + return nil + } + out := new(KubeconfigAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeconfigAuthorizationStatus) DeepCopyInto(out *KubeconfigAuthorizationStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigAuthorizationStatus. +func (in *KubeconfigAuthorizationStatus) DeepCopy() *KubeconfigAuthorizationStatus { + if in == nil { + return nil + } + out := new(KubeconfigAuthorizationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeconfigClusterRoleBindings) DeepCopyInto(out *KubeconfigClusterRoleBindings) { + *out = *in + if in.ClusterRoles != nil { + in, out := &in.ClusterRoles, &out.ClusterRoles + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigClusterRoleBindings. +func (in *KubeconfigClusterRoleBindings) DeepCopy() *KubeconfigClusterRoleBindings { + if in == nil { + return nil + } + out := new(KubeconfigClusterRoleBindings) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeconfigList) DeepCopyInto(out *KubeconfigList) { *out = *in @@ -937,6 +988,11 @@ func (in *KubeconfigSpec) DeepCopyInto(out *KubeconfigSpec) { *out = new(CertificateTemplate) (*in).DeepCopyInto(*out) } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(KubeconfigAuthorization) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigSpec. @@ -952,6 +1008,11 @@ func (in *KubeconfigSpec) DeepCopy() *KubeconfigSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeconfigStatus) DeepCopyInto(out *KubeconfigStatus) { *out = *in + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(KubeconfigAuthorizationStatus) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]metav1.Condition, len(*in)) diff --git a/sdk/applyconfiguration/operator/v1alpha1/kubeconfigauthorization.go b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigauthorization.go new file mode 100644 index 00000000..e8a0f7f9 --- /dev/null +++ b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigauthorization.go @@ -0,0 +1,39 @@ +/* +Copyright 2024 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// KubeconfigAuthorizationApplyConfiguration represents a declarative configuration of the KubeconfigAuthorization type for use +// with apply. +type KubeconfigAuthorizationApplyConfiguration struct { + ClusterRoleBindings *KubeconfigClusterRoleBindingsApplyConfiguration `json:"clusterRoleBindings,omitempty"` +} + +// KubeconfigAuthorizationApplyConfiguration constructs a declarative configuration of the KubeconfigAuthorization type for use with +// apply. +func KubeconfigAuthorization() *KubeconfigAuthorizationApplyConfiguration { + return &KubeconfigAuthorizationApplyConfiguration{} +} + +// WithClusterRoleBindings sets the ClusterRoleBindings field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterRoleBindings field is set to the value of the last call. +func (b *KubeconfigAuthorizationApplyConfiguration) WithClusterRoleBindings(value *KubeconfigClusterRoleBindingsApplyConfiguration) *KubeconfigAuthorizationApplyConfiguration { + b.ClusterRoleBindings = value + return b +} diff --git a/sdk/applyconfiguration/operator/v1alpha1/kubeconfigauthorizationstatus.go b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigauthorizationstatus.go new file mode 100644 index 00000000..d439d48d --- /dev/null +++ b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigauthorizationstatus.go @@ -0,0 +1,39 @@ +/* +Copyright 2024 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// KubeconfigAuthorizationStatusApplyConfiguration represents a declarative configuration of the KubeconfigAuthorizationStatus type for use +// with apply. +type KubeconfigAuthorizationStatusApplyConfiguration struct { + ProvisionedCluster *string `json:"provisionedCluster,omitempty"` +} + +// KubeconfigAuthorizationStatusApplyConfiguration constructs a declarative configuration of the KubeconfigAuthorizationStatus type for use with +// apply. +func KubeconfigAuthorizationStatus() *KubeconfigAuthorizationStatusApplyConfiguration { + return &KubeconfigAuthorizationStatusApplyConfiguration{} +} + +// WithProvisionedCluster sets the ProvisionedCluster field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProvisionedCluster field is set to the value of the last call. +func (b *KubeconfigAuthorizationStatusApplyConfiguration) WithProvisionedCluster(value string) *KubeconfigAuthorizationStatusApplyConfiguration { + b.ProvisionedCluster = &value + return b +} diff --git a/sdk/applyconfiguration/operator/v1alpha1/kubeconfigclusterrolebindings.go b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigclusterrolebindings.go new file mode 100644 index 00000000..c4e7e956 --- /dev/null +++ b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigclusterrolebindings.go @@ -0,0 +1,50 @@ +/* +Copyright 2024 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// KubeconfigClusterRoleBindingsApplyConfiguration represents a declarative configuration of the KubeconfigClusterRoleBindings type for use +// with apply. +type KubeconfigClusterRoleBindingsApplyConfiguration struct { + Cluster *string `json:"cluster,omitempty"` + ClusterRoles []string `json:"clusterRoles,omitempty"` +} + +// KubeconfigClusterRoleBindingsApplyConfiguration constructs a declarative configuration of the KubeconfigClusterRoleBindings type for use with +// apply. +func KubeconfigClusterRoleBindings() *KubeconfigClusterRoleBindingsApplyConfiguration { + return &KubeconfigClusterRoleBindingsApplyConfiguration{} +} + +// WithCluster sets the Cluster field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Cluster field is set to the value of the last call. +func (b *KubeconfigClusterRoleBindingsApplyConfiguration) WithCluster(value string) *KubeconfigClusterRoleBindingsApplyConfiguration { + b.Cluster = &value + return b +} + +// WithClusterRoles adds the given value to the ClusterRoles field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ClusterRoles field. +func (b *KubeconfigClusterRoleBindingsApplyConfiguration) WithClusterRoles(values ...string) *KubeconfigClusterRoleBindingsApplyConfiguration { + for i := range values { + b.ClusterRoles = append(b.ClusterRoles, values[i]) + } + return b +} diff --git a/sdk/applyconfiguration/operator/v1alpha1/kubeconfigspec.go b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigspec.go index 4830a5cb..95039339 100644 --- a/sdk/applyconfiguration/operator/v1alpha1/kubeconfigspec.go +++ b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigspec.go @@ -26,12 +26,13 @@ import ( // KubeconfigSpecApplyConfiguration represents a declarative configuration of the KubeconfigSpec type for use // with apply. type KubeconfigSpecApplyConfiguration struct { - Target *KubeconfigTargetApplyConfiguration `json:"target,omitempty"` - Username *string `json:"username,omitempty"` - Groups []string `json:"groups,omitempty"` - Validity *v1.Duration `json:"validity,omitempty"` - SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"` - CertificateTemplate *CertificateTemplateApplyConfiguration `json:"certificateTemplate,omitempty"` + Target *KubeconfigTargetApplyConfiguration `json:"target,omitempty"` + Username *string `json:"username,omitempty"` + Groups []string `json:"groups,omitempty"` + Validity *v1.Duration `json:"validity,omitempty"` + SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"` + CertificateTemplate *CertificateTemplateApplyConfiguration `json:"certificateTemplate,omitempty"` + Authorization *KubeconfigAuthorizationApplyConfiguration `json:"authorization,omitempty"` } // KubeconfigSpecApplyConfiguration constructs a declarative configuration of the KubeconfigSpec type for use with @@ -89,3 +90,11 @@ func (b *KubeconfigSpecApplyConfiguration) WithCertificateTemplate(value *Certif b.CertificateTemplate = value return b } + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *KubeconfigSpecApplyConfiguration) WithAuthorization(value *KubeconfigAuthorizationApplyConfiguration) *KubeconfigSpecApplyConfiguration { + b.Authorization = value + return b +} diff --git a/sdk/applyconfiguration/operator/v1alpha1/kubeconfigstatus.go b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigstatus.go index 483c3f2e..5aa874e4 100644 --- a/sdk/applyconfiguration/operator/v1alpha1/kubeconfigstatus.go +++ b/sdk/applyconfiguration/operator/v1alpha1/kubeconfigstatus.go @@ -27,9 +27,10 @@ import ( // KubeconfigStatusApplyConfiguration represents a declarative configuration of the KubeconfigStatus type for use // with apply. type KubeconfigStatusApplyConfiguration struct { - Phase *operatorv1alpha1.KubeconfigPhase `json:"phase,omitempty"` - TargetName *string `json:"targetName,omitempty"` - Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` + Phase *operatorv1alpha1.KubeconfigPhase `json:"phase,omitempty"` + TargetName *string `json:"targetName,omitempty"` + Authorization *KubeconfigAuthorizationStatusApplyConfiguration `json:"authorization,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` } // KubeconfigStatusApplyConfiguration constructs a declarative configuration of the KubeconfigStatus type for use with @@ -54,6 +55,14 @@ func (b *KubeconfigStatusApplyConfiguration) WithTargetName(value string) *Kubec return b } +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *KubeconfigStatusApplyConfiguration) WithAuthorization(value *KubeconfigAuthorizationStatusApplyConfiguration) *KubeconfigStatusApplyConfiguration { + b.Authorization = value + return b +} + // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. diff --git a/sdk/applyconfiguration/utils.go b/sdk/applyconfiguration/utils.go index 8a984f51..d2c79e2b 100644 --- a/sdk/applyconfiguration/utils.go +++ b/sdk/applyconfiguration/utils.go @@ -87,6 +87,12 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &operatorv1alpha1.ImageSpecApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("Kubeconfig"): return &operatorv1alpha1.KubeconfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("KubeconfigAuthorization"): + return &operatorv1alpha1.KubeconfigAuthorizationApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("KubeconfigAuthorizationStatus"): + return &operatorv1alpha1.KubeconfigAuthorizationStatusApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("KubeconfigClusterRoleBindings"): + return &operatorv1alpha1.KubeconfigClusterRoleBindingsApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("KubeconfigSpec"): return &operatorv1alpha1.KubeconfigSpecApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("KubeconfigStatus"): diff --git a/test/e2e/frontproxies/frontproxies_test.go b/test/e2e/frontproxies/frontproxies_test.go index 0cb356f6..56e6cc23 100644 --- a/test/e2e/frontproxies/frontproxies_test.go +++ b/test/e2e/frontproxies/frontproxies_test.go @@ -20,11 +20,11 @@ package frontproxies import ( "context" - "fmt" "testing" "time" "github.com/go-logr/logr" + "github.com/kcp-dev/logicalcluster/v3" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -36,8 +36,6 @@ import ( ) func TestCreateFrontProxy(t *testing.T) { - fmt.Println() - ctrlruntime.SetLogger(logr.Discard()) client := utils.GetKubeClient(t) @@ -81,7 +79,7 @@ func TestCreateFrontProxy(t *testing.T) { // verify that we can use frontproxy kubeconfig to access rootshard workspaces t.Log("Connecting to FrontProxy...") - kcpClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace.Name, fpConfig.Name) + kcpClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace.Name, fpConfig.Name, logicalcluster.None) // proof of life: list something every logicalcluster in kcp has t.Log("Should be able to list Secrets.") secrets := &corev1.SecretList{} diff --git a/test/e2e/kubeconfig-rbac/frontproxies_test.go b/test/e2e/kubeconfig-rbac/frontproxies_test.go new file mode 100644 index 00000000..16f9ece0 --- /dev/null +++ b/test/e2e/kubeconfig-rbac/frontproxies_test.go @@ -0,0 +1,167 @@ +//go:build e2e + +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfigrbac + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/go-logr/logr" + kcptenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" + "github.com/kcp-dev/logicalcluster/v3" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + ctrlruntime "sigs.k8s.io/controller-runtime" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" + "github.com/kcp-dev/kcp-operator/test/utils" +) + +func TestProvisionFrontProxyRBAC(t *testing.T) { + ctrlruntime.SetLogger(logr.Discard()) + + client := utils.GetKubeClient(t) + ctx := context.Background() + + rootCluster := logicalcluster.NewPath("root") + namespace := utils.CreateSelfDestructingNamespace(t, ctx, client, "provision-frontproxy-rbac") + externalHostname := fmt.Sprintf("front-proxy-front-proxy.%s.svc.cluster.local", namespace.Name) + + // deploy rootshard + rootShard := utils.DeployRootShard(ctx, t, client, namespace.Name, externalHostname) + + // deploy front-proxy + frontProxy := utils.DeployFrontProxy(ctx, t, client, namespace.Name, rootShard.Name, externalHostname) + + // create a dummy workspace where we later want to provision RBAC in + t.Log("Creating dummy workspace…") + workspace := &kcptenancyv1alpha1.Workspace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: kcptenancyv1alpha1.WorkspaceSpec{ + Type: kcptenancyv1alpha1.WorkspaceTypeReference{ + Name: "universal", + }, + }, + } + + dummyCluster := rootCluster.Join(workspace.Name) + proxyClient := utils.ConnectWithRootShardProxy(t, ctx, client, &rootShard, rootCluster) + if err := proxyClient.Create(ctx, workspace); err != nil { + t.Fatalf("Failed to create workspace: %v", err) + } + + // wait for workspace to be ready + t.Log("Waiting for workspace to be ready…") + dummyClient := utils.ConnectWithRootShardProxy(t, ctx, client, &rootShard, dummyCluster) + + err := wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (done bool, err error) { + return dummyClient.List(ctx, &corev1.SecretList{}) == nil, nil + }) + if err != nil { + t.Fatalf("Failed to wait for workspace to become available: %v", err) + } + + // create my-config kubeconfig + configSecretName := "kubeconfig-my-config-e2e" + + // as of now, this Kubeconfig will not grant any permissions yet + fpConfig := operatorv1alpha1.Kubeconfig{} + fpConfig.Name = "my-config" + fpConfig.Namespace = namespace.Name + fpConfig.Spec = operatorv1alpha1.KubeconfigSpec{ + Target: operatorv1alpha1.KubeconfigTarget{ + FrontProxyRef: &corev1.LocalObjectReference{ + Name: frontProxy.Name, + }, + }, + Username: "e2e", + Validity: metav1.Duration{Duration: 2 * time.Hour}, + SecretRef: corev1.LocalObjectReference{ + Name: configSecretName, + }, + } + + t.Log("Creating kubeconfig with no permissions attached…") + if err := client.Create(ctx, &fpConfig); err != nil { + t.Fatal(err) + } + utils.WaitForObject(t, ctx, client, &corev1.Secret{}, types.NamespacedName{Namespace: fpConfig.Namespace, Name: fpConfig.Spec.SecretRef.Name}) + + t.Log("Connecting to FrontProxy…") + kcpClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace.Name, fpConfig.Name, dummyCluster) + + // This should not work yet. + t.Logf("Should not be able to list Secrets in %v.", dummyCluster) + if err := kcpClient.List(ctx, &corev1.SecretList{}); err == nil { + t.Fatal("Should not have been able to list Secrets, but was. Where have my permissions come from?") + } + + // Now we extend the Kubeconfig with additional permissions. + if err := client.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(&fpConfig), &fpConfig); err != nil { + t.Fatal(err) + } + + fpConfig.Spec.Authorization = &operatorv1alpha1.KubeconfigAuthorization{ + ClusterRoleBindings: operatorv1alpha1.KubeconfigClusterRoleBindings{ + Cluster: dummyCluster.String(), + ClusterRoles: []string{"cluster-admin"}, + }, + } + + t.Log("Updating kubeconfig with permissions attached…") + if err := client.Update(ctx, &fpConfig); err != nil { + t.Fatal(err) + } + + t.Logf("Should now be able to list Secrets in %v.", dummyCluster) + err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (done bool, err error) { + return kcpClient.List(ctx, &corev1.SecretList{}) == nil, nil + }) + if err != nil { + t.Fatalf("Failed to list Secrets in dummy workspace: %v", err) + } + + // And now we remove the permissions again. + t.Log("Updating kubeconfig to remove the attached permissions…") + if err := client.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(&fpConfig), &fpConfig); err != nil { + t.Fatal(err) + } + + fpConfig.Spec.Authorization = nil + + if err := client.Update(ctx, &fpConfig); err != nil { + t.Fatal(err) + } + + t.Logf("Should no longer be able to list Secrets in %v.", dummyCluster) + err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (done bool, err error) { + return kcpClient.List(ctx, &corev1.SecretList{}) != nil, nil + }) + if err != nil { + t.Fatalf("Failed to wait for permissions to be gone: %v", err) + } +} diff --git a/test/e2e/rootshards/proxy_test.go b/test/e2e/rootshards/proxy_test.go index e12ed877..0a77d741 100644 --- a/test/e2e/rootshards/proxy_test.go +++ b/test/e2e/rootshards/proxy_test.go @@ -45,10 +45,9 @@ func TestRootShardProxy(t *testing.T) { client := utils.GetKubeClient(t) ctx := context.Background() - namespaceSuffix := "rootshard-proxy" - namespace := utils.CreateSelfDestructingNamespace(t, ctx, client, namespaceSuffix) - externalHostname := fmt.Sprintf("front-proxy-front-proxy.e2e-%s.svc.cluster.local", namespaceSuffix) + namespace := utils.CreateSelfDestructingNamespace(t, ctx, client, "rootshard-proxy") + externalHostname := fmt.Sprintf("front-proxy-front-proxy.%s.svc.cluster.local", namespace.Name) // deploy a root shard incl. etcd rootShard := utils.DeployRootShard(ctx, t, client, namespace.Name, externalHostname) @@ -87,7 +86,7 @@ func TestRootShardProxy(t *testing.T) { utils.WaitForObject(t, ctx, client, &corev1.Secret{}, types.NamespacedName{Namespace: rsConfig.Namespace, Name: rsConfig.Spec.SecretRef.Name}) t.Log("Connecting to RootShard...") - rootShardClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace.Name, rsConfig.Name) + rootShardClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace.Name, rsConfig.Name, logicalcluster.None) // wait until the 2nd shard has registered itself successfully at the root shard shardKey := types.NamespacedName{Name: shardName} @@ -131,9 +130,6 @@ func TestRootShardProxy(t *testing.T) { // build a client through the proxy to the new workspace proxyClient := utils.ConnectWithRootShardProxy(t, ctx, client, &rootShard, logicalcluster.NewPath("root").Join(workspace.Name)) - if err != nil { - t.Fatalf("Failed to create root shard proxy client: %v", err) - } // proof of life: list something every logicalcluster in kcp has t.Log("Should be able to list Secrets in the new workspace.") diff --git a/test/e2e/rootshards/rootshards_test.go b/test/e2e/rootshards/rootshards_test.go index 799d1369..6e378daa 100644 --- a/test/e2e/rootshards/rootshards_test.go +++ b/test/e2e/rootshards/rootshards_test.go @@ -24,6 +24,7 @@ import ( "time" "github.com/go-logr/logr" + "github.com/kcp-dev/logicalcluster/v3" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -70,7 +71,7 @@ func TestCreateRootShard(t *testing.T) { utils.WaitForObject(t, ctx, client, &corev1.Secret{}, types.NamespacedName{Namespace: rsConfig.Namespace, Name: rsConfig.Spec.SecretRef.Name}) t.Log("Connecting to RootShard...") - kcpClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace.Name, rsConfig.Name) + kcpClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace.Name, rsConfig.Name, logicalcluster.None) // proof of life: list something every logicalcluster in kcp has t.Log("Should be able to list Secrets.") diff --git a/test/e2e/shards/shards_test.go b/test/e2e/shards/shards_test.go index 0d2b7a80..f7d4aeea 100644 --- a/test/e2e/shards/shards_test.go +++ b/test/e2e/shards/shards_test.go @@ -26,6 +26,7 @@ import ( "github.com/go-logr/logr" kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" + "github.com/kcp-dev/logicalcluster/v3" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -80,7 +81,7 @@ func TestCreateShard(t *testing.T) { utils.WaitForObject(t, ctx, client, &corev1.Secret{}, types.NamespacedName{Namespace: rsConfig.Namespace, Name: rsConfig.Spec.SecretRef.Name}) t.Log("Connecting to RootShard...") - rootShardClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace.Name, rsConfig.Name) + rootShardClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace.Name, rsConfig.Name, logicalcluster.None) // wait until the 2nd shard has registered itself successfully at the root shard shardKey := types.NamespacedName{Name: shardName} @@ -115,7 +116,7 @@ func TestCreateShard(t *testing.T) { utils.WaitForObject(t, ctx, client, &corev1.Secret{}, types.NamespacedName{Namespace: shardConfig.Namespace, Name: shardConfig.Spec.SecretRef.Name}) t.Log("Connecting to Shard...") - kcpClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace.Name, shardConfig.Name) + kcpClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace.Name, shardConfig.Name, logicalcluster.None) // proof of life: list something every logicalcluster in kcp has t.Log("Should be able to list Secrets.") diff --git a/test/utils/utils.go b/test/utils/utils.go index 0ec6c229..ac12da6a 100644 --- a/test/utils/utils.go +++ b/test/utils/utils.go @@ -22,6 +22,7 @@ import ( "net" "net/url" "os/exec" + "regexp" "strconv" "strings" "testing" @@ -29,6 +30,7 @@ import ( kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" kcptenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" + "github.com/kcp-dev/kcp/sdk/testing/server" "github.com/kcp-dev/logicalcluster/v3" corev1 "k8s.io/api/core/v1" @@ -154,13 +156,18 @@ func SelfDestuctingPortForward( }) } -var currentPort = 56029 +func getPort(t *testing.T) int { + port, err := server.GetFreePort(t) + if err != nil { + t.Fatalf("Failed to get free port: %v", err) + } -func getPort() int { - port := currentPort - currentPort++ + portNum, err := strconv.Atoi(port) + if err != nil { + t.Fatalf("Failed to parse port %q as number: %v", port, err) + } - return port + return portNum } func ConnectWithKubeconfig( @@ -169,6 +176,7 @@ func ConnectWithKubeconfig( client ctrlruntimeclient.Client, namespace string, kubeconfigName string, + cluster logicalcluster.Path, ) ctrlruntimeclient.Client { t.Helper() @@ -212,13 +220,18 @@ func ConnectWithKubeconfig( } // start a port forwarding - localPort := getPort() + localPort := getPort(t) SelfDestuctingPortForward(t, ctx, namespace, "svc/"+serviceName, int(portNum), localPort) // patch the target server parsed.Host = net.JoinHostPort("localhost", fmt.Sprintf("%d", localPort)) clientConfig.Host = parsed.String() + // switch to another workspace is desired + if !cluster.Empty() { + clientConfig.Host = changeClusterInURL(clientConfig.Host, cluster) + } + // create a client through the tunnel kcpClient, err := ctrlruntimeclient.New(clientConfig, ctrlruntimeclient.Options{Scheme: NewScheme(t)}) if err != nil { @@ -249,7 +262,7 @@ func ConnectWithRootShardProxy( } // start a port forwarding - localPort := getPort() + localPort := getPort(t) servicePort := 6443 serviceName := resources.GetRootShardProxyServiceName(rootShard) @@ -259,7 +272,7 @@ func ConnectWithRootShardProxy( proxyUrl := fmt.Sprintf("https://%s", net.JoinHostPort("localhost", fmt.Sprintf("%d", localPort))) if !cluster.Empty() { - proxyUrl = fmt.Sprintf("%s/clusters/%s", proxyUrl, cluster.String()) + proxyUrl = changeClusterInURL(proxyUrl, cluster) } cfg := &rest.Config{ @@ -279,3 +292,19 @@ func ConnectWithRootShardProxy( return kcpClient } + +var clusterRegexp = regexp.MustCompile(`/clusters/([^/]+)`) + +func changeClusterInURL(u string, newCluster logicalcluster.Path) string { + newPath := fmt.Sprintf("/clusters/%s", newCluster) + + matches := clusterRegexp.FindAllString(u, 1) + if len(matches) == 0 { + return u + newPath + } + + // make sure that if a URL is "/clusters/root/apis/example.com/v1/namespaces/bla/clusters/mycluster", + // we only replace the first match, especially important if the URL was "/clusters/X/apis/example.com/v1/clusters/X" + // (i.e. accessing the cluster resource X in the kcp cluster also called X) + return strings.Replace(u, matches[0], newPath, 1) +}