diff --git a/cmd/api-syncagent/main.go b/cmd/api-syncagent/main.go index d103680..8fbca5c 100644 --- a/cmd/api-syncagent/main.go +++ b/cmd/api-syncagent/main.go @@ -100,45 +100,45 @@ func run(ctx context.Context, log *zap.SugaredLogger, opts *Options) error { } // load the kcp kubeconfig - platformRestConfig, err := loadKubeconfig(opts.PlatformKubeconfig) + kcpRestConfig, err := loadKubeconfig(opts.KcpKubeconfig) if err != nil { - return fmt.Errorf("failed to load platform kubeconfig: %w", err) + return fmt.Errorf("failed to load kcp kubeconfig: %w", err) } // sanity check - if !strings.Contains(platformRestConfig.Host, "/clusters/") { - return fmt.Errorf("platform kubeconfig does not point to a specific workspace") + if !strings.Contains(kcpRestConfig.Host, "/clusters/") { + return fmt.Errorf("kcp kubeconfig does not point to a specific workspace") } - // We check if the APIExport exists and extract information we need to set up our platformCluster. - apiExport, lcPath, lcName, err := resolveAPIExport(ctx, platformRestConfig, opts.APIExportRef) + // We check if the APIExport exists and extract information we need to set up our kcpCluster. + apiExport, lcPath, lcName, err := resolveAPIExport(ctx, kcpRestConfig, opts.APIExportRef) if err != nil { return fmt.Errorf("failed to resolve APIExport: %w", err) } log.Infow("Resolved APIExport", "apigroup", opts.APIExportRef, "workspace", lcPath, "logicalcluster", lcName) - // init the "permanent" platform cluster connection - platformCluster, err := setupPlatformCluster(platformRestConfig, opts) + // init the "permanent" kcp cluster connection + kcpCluster, err := setupKcpCluster(kcpRestConfig, opts) if err != nil { - return fmt.Errorf("failed to initialize platform cluster: %w", err) + return fmt.Errorf("failed to initialize kcp cluster: %w", err) } - // start the platform cluster caches when the manager boots up + // start the kcp cluster caches when the manager boots up // (happens regardless of leader election status) - if err := mgr.Add(platformCluster); err != nil { - return fmt.Errorf("failed to add platform cluster runnable: %w", err) + if err := mgr.Add(kcpCluster); err != nil { + return fmt.Errorf("failed to add kcp cluster runnable: %w", err) } - if err := apiresourceschema.Add(mgr, platformCluster, lcName, log, 4, opts.AgentName, opts.APIExportRef, opts.PublishedResourceSelector); err != nil { + if err := apiresourceschema.Add(mgr, kcpCluster, lcName, log, 4, opts.AgentName, opts.APIExportRef, opts.PublishedResourceSelector); err != nil { return fmt.Errorf("failed to add apiresourceschema controller: %w", err) } - if err := apiexport.Add(mgr, platformCluster, lcName, log, opts.APIExportRef, opts.AgentName, opts.PublishedResourceSelector); err != nil { + if err := apiexport.Add(mgr, kcpCluster, lcName, log, opts.APIExportRef, opts.AgentName, opts.PublishedResourceSelector); err != nil { return fmt.Errorf("failed to add apiexport controller: %w", err) } - if err := syncmanager.Add(ctx, mgr, platformCluster, platformRestConfig, log, apiExport, opts.PublishedResourceSelector); err != nil { + if err := syncmanager.Add(ctx, mgr, kcpCluster, kcpRestConfig, log, apiExport, opts.PublishedResourceSelector); err != nil { return fmt.Errorf("failed to add syncmanager controller: %w", err) } @@ -231,9 +231,9 @@ func resolveAPIExport(ctx context.Context, restConfig *rest.Config, apiExportRef return apiExport, lcPath, lcName, nil } -// setupPlatformCluster sets up a plain, non-kcp-aware ctrl-runtime Cluster object +// setupKcpCluster sets up a plain, non-kcp-aware ctrl-runtime Cluster object // that is solvely used to interact with the APIExport and APIResourceSchemas. -func setupPlatformCluster(restConfig *rest.Config, opts *Options) (cluster.Cluster, error) { +func setupKcpCluster(restConfig *rest.Config, opts *Options) (cluster.Cluster, error) { scheme := runtime.NewScheme() if err := kcpdevv1alpha1.AddToScheme(scheme); err != nil { @@ -246,7 +246,7 @@ func setupPlatformCluster(restConfig *rest.Config, opts *Options) (cluster.Clust return cluster.New(restConfig, func(o *cluster.Options) { o.Scheme = scheme - // RBAC on the platform cluster is very tight and does not allow to list/watch all objects; + // RBAC in kcp might be very tight and might not allow to list/watch all objects; // restrict the cache's selectors accordingly so we can still make use of caching. o.Cache = cache.Options{ Scheme: scheme, diff --git a/cmd/api-syncagent/options.go b/cmd/api-syncagent/options.go index 6401424..2530d70 100644 --- a/cmd/api-syncagent/options.go +++ b/cmd/api-syncagent/options.go @@ -35,10 +35,10 @@ type Options struct { // work. // KubeconfigFile string - // PlatformKubeconfig is the kubeconfig that gives access to kcp. This + // KcpKubeconfig is the kubeconfig that gives access to kcp. This // kubeconfig's cluster URL has to point to the workspace where the APIExport // referenced via APIExportRef lives. - PlatformKubeconfig string + KcpKubeconfig string // Namespace is the namespace that the Sync Agent runs in. Namespace string @@ -49,7 +49,7 @@ type Options struct { // AgentName can be used to give this Sync Agent instance a custom name. This name is used // for the Sync Agent resource inside kcp. This value must not be changed after a Sync Agent - // has registered for the first time in the platform. + // has registered for the first time in kcp. // If not given, defaults to "-syncagent". AgentName string @@ -77,7 +77,7 @@ func NewOptions() *Options { func (o *Options) AddFlags(flags *pflag.FlagSet) { o.LogOptions.AddPFlags(flags) - flags.StringVar(&o.PlatformKubeconfig, "platform-kubeconfig", o.PlatformKubeconfig, "kubeconfig file of kcp") + flags.StringVar(&o.KcpKubeconfig, "kcp-kubeconfig", o.KcpKubeconfig, "kubeconfig file of kcp") flags.StringVar(&o.Namespace, "namespace", o.Namespace, "Kubernetes namespace the Sync Agent is running in") flags.StringVar(&o.AgentName, "agent-name", o.AgentName, "name of this Sync Agent, must not be changed after the first run, can be left blank to auto-generate a name") flags.StringVar(&o.APIExportRef, "apiexport-ref", o.APIExportRef, "name of the APIExport in kcp that this Sync Agent is powering") @@ -108,8 +108,8 @@ func (o *Options) Validate() error { errs = append(errs, errors.New("--apiexport-ref is required")) } - if len(o.PlatformKubeconfig) == 0 { - errs = append(errs, errors.New("--platform-kubeconfig is required")) + if len(o.KcpKubeconfig) == 0 { + errs = append(errs, errors.New("--kcp-kubeconfig is required")) } if s := o.PublishedResourceSelectorString; len(s) > 0 { diff --git a/deploy/crd/kcp.io/syncagent.kcp.io_publishedresources.yaml b/deploy/crd/kcp.io/syncagent.kcp.io_publishedresources.yaml index cfc66ad..fec0d4f 100644 --- a/deploy/crd/kcp.io/syncagent.kcp.io_publishedresources.yaml +++ b/deploy/crd/kcp.io/syncagent.kcp.io_publishedresources.yaml @@ -244,7 +244,7 @@ spec: description: ConfigMap or Secret type: string origin: - description: '"service" or "platform"' + description: '"service" or "kcp"' type: string reference: properties: diff --git a/docs/README.md b/docs/README.md index 0e45197..a46dc2a 100644 --- a/docs/README.md +++ b/docs/README.md @@ -17,10 +17,9 @@ The intended usecase follows roughly these steps: inside of kcp. 4. The service owner uses the Sync Agent Helm chart (or similar deployment technique) to install the Sync Agent in their cluster. -5. To actually make resources available in the platform, the service owner now has to create a - set of `PublishedResource` objects. The configuration happens from their point of view, meaning - they define how to publish a CRD to the platform, defining renaming rules and other projection - settings. +5. To actually make resources available in kcp, the service owner now has to create a set of + `PublishedResource` objects. The configuration happens from their point of view, meaning they + define how to publish a CRD to kcp, defining renaming rules and other projection settings. 6. Once a `PublishedResource` is created in the service cluster, the Sync Agent will pick it up, find the referenced CRD, convert/project this CRD into an `APIResourceSchema` (ARS) for kcp and then create the ARS in org workspace. @@ -28,7 +27,7 @@ The intended usecase follows roughly these steps: `APIExport` in the org workspace. This APIExport can then be bound in the org workspace itself (or later any workspaces (depending on permissions)) and be used there. 8. kcp automatically provides a virtual workspace for the `APIExport` and this is what the Sync Agent - then uses to watch all objects for the relevant resources in the platform (i.e. in all workspaces). + then uses to watch all objects for the relevant resources in kcp (i.e. in all workspaces). 9. The Sync Agent will now begin to synchronize objects back and forth between the service cluster and kcp. @@ -100,8 +99,8 @@ In addition to projecting (mapping) the GVK, the `PublishedResource` also contai rules, which influence how the local objects that the Sync Agent is creating are named. As a single Sync Agent serves a single service, the API group used in kcp is the same for all -`PublishedResources`. It's the API group configured in the `APIExport` inside the platform (created -in step 1 in the overview above). +`PublishedResources`. It's the API group configured in the `APIExport` inside kcp (created in step 1 +in the overview above). To prevent chaos, `PublishedResources` are immutable: handling the case that a PR first wants to publish `kubermatic.k8c.io/v1 Cluster` and then suddenly `kubermatic.k8c.io/v1 User` resources would diff --git a/docs/getting-started.md b/docs/getting-started.md index a2e4aef..835a55f 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -73,7 +73,7 @@ syncAgent: # Required: Name of the Kubernetes Secret that contains a "kubeconfig" key, with the kubeconfig # provided by kcp to access it. - platformKubeconfig: kcp-kubeconfig + kcpKubeconfig: kcp-kubeconfig # Create additional RBAC on the service cluster. These rules depend somewhat on the Sync Agent # configuration, but the following two rules are very common. If you configure the Sync Agent to diff --git a/docs/publish-resources.md b/docs/publish-resources.md index 8fe91a4..12e8fdb 100644 --- a/docs/publish-resources.md +++ b/docs/publish-resources.md @@ -1,9 +1,9 @@ # Publishing Resources The guide describes the process of making a resource (usually defined by a CustomResourceDefinition) -of one Kubernetes cluster (the "service cluster" or "local cluster") available for use in kcp (the -"platform cluster" or "workspaces"). This involves setting up an `APIExport` and then installing -the Sync Agent and defining `PublishedResources` in the local cluster. +of one Kubernetes cluster (the "service cluster" or "local cluster") available for use in kcp. This +involves setting up an `APIExport` and then installing the Sync Agent and defining +`PublishedResources` in the local cluster. All of the documentation and API types are worded and named from the perspective of a service owner, the person(s) who own a service and want to make it available to consumers in kcp. @@ -12,7 +12,7 @@ the person(s) who own a service and want to make it available to consumers in kc A "service" comprises a set of resources within a single Kubernetes API group. It doesn't need to be _all_ of the resources in that group, service owners are free and encouraged to only make a subset -of resources (i.e. a subset of CRDs) available for use in the platform. +of resources (i.e. a subset of CRDs) available for use in kcp. For each of the CRDs on the service cluster that should be published, the service owner creates a `PublishedResource` object, which will contain both which CRD to publish, as well as numerous other @@ -80,15 +80,15 @@ CRD. ### Projection For stronger separation of concerns and to enable whitelabelling of services, the type meta for -can be projected, i.e. changed between the local service cluster and the platform. You could for -example rename `Certificate` from cert-manager to `Sertifikat` inside the platform. +can be projected, i.e. changed between the local service cluster and kcp. You could for example +rename `Certificate` from cert-manager to `Sertifikat` inside kcp. Note that the API group of all published resources is always changed to the one defined in the -APIExport object (meaning 1 Sync Agent serves all the selected published resources under the -same API group). That is why changing the API group cannot be configured in the projection. +APIExport object (meaning 1 Sync Agent serves all the selected published resources under the same +API group). That is why changing the API group cannot be configured in the projection. Besides renaming the Kind and Version, dependent fields like Plural, ShortNames and Categories -can be adjusted to fit the desired naming scheme in the platform. The Plural name is computed +can be adjusted to fit the desired naming scheme in kcp. The Plural name is computed automatically, but can be overridden. ShortNames and Categories are copied unless overwritten in the `PublishedResource`. @@ -111,7 +111,7 @@ spec: # scope: Namespaced # change only when you know what you're doing ``` -Consumers (end users) in the platform would then ultimately see projected names only. Note that GVK +Consumers (end users) in kcp would then ultimately see projected names only. Note that GVK projection applies only to the synced object itself and has no effect on the contents of these objects. To change the contents, use external solutions like Crossplane to transform objects. @@ -134,8 +134,8 @@ are available: * `$remoteNameHash` – first 20 hex characters of the SHA-1 hash of `$remoteName` If nothing is configured, the default ensures that no collisions will happen: Each workspace in -the platform will create a namespace on the local cluster, with a combination of namespace and -name hashes used for the actual resource names. +kcp will create a namespace on the local cluster, with a combination of namespace and name hashes +used for the actual resource names. ```yaml apiVersion: syncagent.kcp.io/v1alpha1 @@ -162,7 +162,7 @@ Configuration happens `spec.mutation` and there are two fields: be other top-level fields) from the remote side to the local side. Use this to apply defaulting, normalising, and enforcing rules. * `status` contains the mutation rules when syncing the `status` subresource back from the local - cluster up into the platform. Use this to normalize names and values (e.g. if you rewrote + cluster up into kcp. Use this to normalize names and values (e.g. if you rewrote `.spec.secretName` from `"foo"` to `"dfkbssbfh"`, make sure the status does not "leak" this name by accident). @@ -285,7 +285,7 @@ spec: # "connection-details" or "credentials". identifier: tls-secret - # "service" or "platform" + # "service" or "kcp" origin: service # for now, only "Secret" and "ConfigMap" are supported; @@ -354,7 +354,7 @@ spec: name: "$remoteClusterName-$remoteNamespaceHash-$remoteNameHash" related: - - origin: service # service or platform + - origin: service # service or kcp kind: Secret # for now, only "Secret" and "ConfigMap" are supported; # there is no GVK projection for related resources @@ -383,10 +383,9 @@ The following sections go into more details of the behind the scenes magic. ### Synchronization Even though the whole configuration is written from the standpoint of the service owner, the actual -synchronization logic considers the platform side as the canonical source of truth. The Sync Agent -continuously tries to make the local objects look like the ones in the platform, while pushing -status updates back into the platform (if the given `PublishedResource` (i.e. CRD) has a `status` -subresource enabled). +synchronization logic considers the kcp side as the canonical source of truth. The Sync Agent +continuously tries to make the local objects look like the ones in kcp, while pushing status updates +back into kcp (if the given `PublishedResource` (i.e. CRD) has a `status` subresource enabled). ### Local <-> Remote Connection @@ -399,7 +398,7 @@ reconciliations, the (potentially costly, but probably not) renaming logic does applied again. This allows the Sync Agent to change defaults and also allows the service owner to make changes to the naming rules without breaking existing objects. -Since we do not want to store metadata on the platform side, we instead rely on label selectors on +Since we do not want to store metadata on the kcp side, we instead rely on label selectors on the local objects. Each object on the service cluster has a label for the remote cluster name, namespace and object name, and when trying to find the matching local object, the Sync Agent simply does a label-based search. @@ -432,8 +431,8 @@ service cluster is called the `destination object`. #### Phase 2: Handle Deletion -A finalizer is used in the platform workspaces to prevent orphans in the service cluster side. This -is the only real evidence in the platform side that the Sync Agent is even doing things. When a remote +A finalizer is used in the kcp workspaces to prevent orphans in the service cluster side. This +is the only real evidence in the kcp side that the Sync Agent is even doing things. When a remote (source) object is deleted, the corresponding local object is deleted as well. Once the local object is gone, the finalizer is removed from the source object. diff --git a/internal/controller/apiexport/controller.go b/internal/controller/apiexport/controller.go index 541c3b0..05c90d3 100644 --- a/internal/controller/apiexport/controller.go +++ b/internal/controller/apiexport/controller.go @@ -50,20 +50,20 @@ const ( ) type Reconciler struct { - localClient ctrlruntimeclient.Client - platformClient ctrlruntimeclient.Client - log *zap.SugaredLogger - recorder record.EventRecorder - lcName logicalcluster.Name - apiExportName string - agentName string - prFilter labels.Selector + localClient ctrlruntimeclient.Client + kcpClient ctrlruntimeclient.Client + log *zap.SugaredLogger + recorder record.EventRecorder + lcName logicalcluster.Name + apiExportName string + agentName string + prFilter labels.Selector } // Add creates a new controller and adds it to the given manager. func Add( mgr manager.Manager, - platformCluster cluster.Cluster, + kcpCluster cluster.Cluster, lcName logicalcluster.Name, log *zap.SugaredLogger, apiExportName string, @@ -71,14 +71,14 @@ func Add( prFilter labels.Selector, ) error { reconciler := &Reconciler{ - localClient: mgr.GetClient(), - platformClient: platformCluster.GetClient(), - lcName: lcName, - log: log.Named(ControllerName), - recorder: mgr.GetEventRecorderFor(ControllerName), - apiExportName: apiExportName, - agentName: agentName, - prFilter: prFilter, + localClient: mgr.GetClient(), + kcpClient: kcpCluster.GetClient(), + lcName: lcName, + log: log.Named(ControllerName), + recorder: mgr.GetEventRecorderFor(ControllerName), + apiExportName: apiExportName, + agentName: agentName, + prFilter: prFilter, } hasARS := predicate.NewPredicateFuncs(func(object ctrlruntimeclient.Object) bool { @@ -96,10 +96,10 @@ func Add( // we reconcile a single object in kcp, no need for parallel workers MaxConcurrentReconciles: 1, }). - // Watch for changes to APIExport on the platform side to start/restart the actual syncing controllers; + // Watch for changes to APIExport on the kcp side to start/restart the actual syncing controllers; // the cache is already restricted by a fieldSelector in the main.go to respect the RBC restrictions, // so there is no need here to add an additional filter. - WatchesRawSource(source.Kind(platformCluster.GetCache(), &kcpdevv1alpha1.APIExport{}, controllerutil.EnqueueConst[*kcpdevv1alpha1.APIExport]("dummy"))). + WatchesRawSource(source.Kind(kcpCluster.GetCache(), &kcpdevv1alpha1.APIExport{}, controllerutil.EnqueueConst[*kcpdevv1alpha1.APIExport]("dummy"))). // Watch for changes to PublishedResources on the local service cluster Watches(&syncagentv1alpha1.PublishedResource{}, controllerutil.EnqueueConst[ctrlruntimeclient.Object]("dummy"), builder.WithPredicates(predicateutil.ByLabels(prFilter), hasARS)). Build(reconciler) @@ -134,7 +134,7 @@ func (r *Reconciler) reconcile(ctx context.Context) error { // PublishedResources use kinds, but the PermissionClaims use resource names (plural), // so we must translate accordingly - mapper := r.platformClient.RESTMapper() + mapper := r.kcpClient.RESTMapper() for _, pubResource := range filteredPubResources { arsList.Insert(pubResource.Status.ResourceSchemaName) @@ -162,14 +162,14 @@ func (r *Reconciler) reconcile(ctx context.Context) error { return nil } - // reconcile an APIExport in the platform + // reconcile an APIExport in kcp factories := []reconciling.NamedAPIExportReconcilerFactory{ r.createAPIExportReconciler(arsList, claimedResources, r.agentName, r.apiExportName), } wsCtx := kontext.WithCluster(ctx, r.lcName) - if err := reconciling.ReconcileAPIExports(wsCtx, factories, "", r.platformClient); err != nil { + if err := reconciling.ReconcileAPIExports(wsCtx, factories, "", r.kcpClient); err != nil { return fmt.Errorf("failed to reconcile APIExport: %w", err) } @@ -180,7 +180,7 @@ func (r *Reconciler) reconcile(ctx context.Context) error { // apiExport := &kcpdevv1alpha1.APIExport{} // key := types.NamespacedName{Name: exportName} - // if err := r.platformClient.Get(wsCtx, key, apiExport); ctrlruntimeclient.IgnoreNotFound(err) != nil { + // if err := r.kcpClient.Get(wsCtx, key, apiExport); ctrlruntimeclient.IgnoreNotFound(err) != nil { // return false, err // } diff --git a/internal/controller/apiexport/doc.go b/internal/controller/apiexport/doc.go index 8f64a92..7cb319f 100644 --- a/internal/controller/apiexport/doc.go +++ b/internal/controller/apiexport/doc.go @@ -22,6 +22,6 @@ created by the accompanying controller in the Sync Agent. Note that for the time being, to prevent data loss, only new ARS will be added to the APIExport. Once an ARS is listed in the APIExport, it is supposed to remain -until an administrator/other process performs garbage collection in the platform. +until an administrator/other process performs garbage collection in kcp. */ package apiexport diff --git a/internal/controller/apiresourceschema/controller.go b/internal/controller/apiresourceschema/controller.go index 3467ebd..a20ed7f 100644 --- a/internal/controller/apiresourceschema/controller.go +++ b/internal/controller/apiresourceschema/controller.go @@ -55,19 +55,19 @@ const ( ) type Reconciler struct { - localClient ctrlruntimeclient.Client - platformClient ctrlruntimeclient.Client - log *zap.SugaredLogger - recorder record.EventRecorder - lcName logicalcluster.Name - agentName string - apiExportName string + localClient ctrlruntimeclient.Client + kcpClient ctrlruntimeclient.Client + log *zap.SugaredLogger + recorder record.EventRecorder + lcName logicalcluster.Name + agentName string + apiExportName string } // Add creates a new controller and adds it to the given manager. func Add( mgr manager.Manager, - platformCluster cluster.Cluster, + kcpCluster cluster.Cluster, lcName logicalcluster.Name, log *zap.SugaredLogger, numWorkers int, @@ -76,13 +76,13 @@ func Add( prFilter labels.Selector, ) error { reconciler := &Reconciler{ - localClient: mgr.GetClient(), - platformClient: platformCluster.GetClient(), - lcName: lcName, - log: log.Named(ControllerName), - recorder: mgr.GetEventRecorderFor(ControllerName), - agentName: agentName, - apiExportName: apiExportName, + localClient: mgr.GetClient(), + kcpClient: kcpCluster.GetClient(), + lcName: lcName, + log: log.Named(ControllerName), + recorder: mgr.GetEventRecorderFor(ControllerName), + agentName: agentName, + apiExportName: apiExportName, } _, err := builder.ControllerManagedBy(mgr). @@ -143,7 +143,7 @@ func (r *Reconciler) reconcile(ctx context.Context, log *zap.SugaredLogger, pubR // service owners to somehow publish updated CRDs without changing their API version. wsCtx := kontext.WithCluster(ctx, r.lcName) ars := &kcpdevv1alpha1.APIResourceSchema{} - err = r.platformClient.Get(wsCtx, types.NamespacedName{Name: arsName}, ars, &ctrlruntimeclient.GetOptions{}) + err = r.kcpClient.Get(wsCtx, types.NamespacedName{Name: arsName}, ars, &ctrlruntimeclient.GetOptions{}) if apierrors.IsNotFound(err) { if err := r.createAPIResourceSchema(wsCtx, log, r.apiExportName, projectedCRD, arsName); err != nil { @@ -192,7 +192,7 @@ func (r *Reconciler) createAPIResourceSchema(ctx context.Context, log *zap.Sugar log.With("name", arsName).Info("Creating APIResourceSchema…") - return r.platformClient.Create(ctx, ars) + return r.kcpClient.Create(ctx, ars) } func (r *Reconciler) projectResourceNames(apiGroup string, crd *apiextensionsv1.CustomResourceDefinition, projection *syncagentv1alpha1.ResourceProjection) *apiextensionsv1.CustomResourceDefinition { diff --git a/internal/controller/apiresourceschema/doc.go b/internal/controller/apiresourceschema/doc.go index 7d752ac..ee46bc8 100644 --- a/internal/controller/apiresourceschema/doc.go +++ b/internal/controller/apiresourceschema/doc.go @@ -26,7 +26,7 @@ with an editor and re-applied, it won't turn into the same ARS, as we cannot sim turn an ARS for a Pod into an ARS for a StorageClass. There is no extra cleanup procedure in either of the clusters when a PublishedResource -is deleted. This is to prevent accidental data loss in the platform in case a -service owner accidentally (and temporarily) removed a PublishedResource. +is deleted. This is to prevent accidental data loss in kcp in case a service owner +accidentally (and temporarily) removed a PublishedResource. */ package apiresourceschema diff --git a/internal/controller/sync/doc.go b/internal/controller/sync/doc.go index ebd3229..9692a0d 100644 --- a/internal/controller/sync/doc.go +++ b/internal/controller/sync/doc.go @@ -15,10 +15,9 @@ limitations under the License. */ /* -Package sync contains a controller that watches the APIExport we manage -in the platform cluster. Once the virtual workspace URL for said APIExport -is ready, the controller will begin to synchronize resources back and forth -between the platform cluster (i.e. all relevant workspaces) and the service -cluster. +Package sync contains a controller that watches the APIExport we manage in kcp. +Once the virtual workspace URL for said APIExport is ready, the controller will +begin to synchronize resources back and forth between kcp (i.e. all relevant +workspaces) and the service cluster. */ package sync diff --git a/internal/controller/syncmanager/controller.go b/internal/controller/syncmanager/controller.go index 49b4331..e99d3b4 100644 --- a/internal/controller/syncmanager/controller.go +++ b/internal/controller/syncmanager/controller.go @@ -62,13 +62,13 @@ type Reconciler struct { // also triggered. ctx context.Context - localManager manager.Manager - platformCluster cluster.Cluster - platformRestConfig *rest.Config - log *zap.SugaredLogger - recorder record.EventRecorder - discoveryClient *discovery.Client - prFilter labels.Selector + localManager manager.Manager + kcpCluster cluster.Cluster + kcpRestConfig *rest.Config + log *zap.SugaredLogger + recorder record.EventRecorder + discoveryClient *discovery.Client + prFilter labels.Selector apiExport *kcpdevv1alpha1.APIExport @@ -88,23 +88,23 @@ type Reconciler struct { func Add( ctx context.Context, localManager manager.Manager, - platformCluster cluster.Cluster, - platformRestConfig *rest.Config, + kcpCluster cluster.Cluster, + kcpRestConfig *rest.Config, log *zap.SugaredLogger, apiExport *kcpdevv1alpha1.APIExport, prFilter labels.Selector, ) error { reconciler := &Reconciler{ - ctx: ctx, - localManager: localManager, - apiExport: apiExport, - platformCluster: platformCluster, - platformRestConfig: platformRestConfig, - log: log, - recorder: localManager.GetEventRecorderFor(ControllerName), - syncWorkers: map[string]lifecycle.Controller{}, - discoveryClient: discovery.NewClient(localManager.GetClient()), - prFilter: prFilter, + ctx: ctx, + localManager: localManager, + apiExport: apiExport, + kcpCluster: kcpCluster, + kcpRestConfig: kcpRestConfig, + log: log, + recorder: localManager.GetEventRecorderFor(ControllerName), + syncWorkers: map[string]lifecycle.Controller{}, + discoveryClient: discovery.NewClient(localManager.GetClient()), + prFilter: prFilter, } _, err := builder.ControllerManagedBy(localManager). @@ -113,10 +113,10 @@ func Add( // this controller is meant to control others, so we only want 1 thread MaxConcurrentReconciles: 1, }). - // Watch for changes to APIExport on the platform side to start/restart the actual syncing controllers; + // Watch for changes to APIExport on the kcp side to start/restart the actual syncing controllers; // the cache is already restricted by a fieldSelector in the main.go to respect the RBC restrictions, // so there is no need here to add an additional filter. - WatchesRawSource(source.Kind(platformCluster.GetCache(), &kcpdevv1alpha1.APIExport{}, controllerutil.EnqueueConst[*kcpdevv1alpha1.APIExport]("dummy"))). + WatchesRawSource(source.Kind(kcpCluster.GetCache(), &kcpdevv1alpha1.APIExport{}, controllerutil.EnqueueConst[*kcpdevv1alpha1.APIExport]("dummy"))). // Watch for changes to the PublishedResources Watches(&syncagentv1alpha1.PublishedResource{}, controllerutil.EnqueueConst[ctrlruntimeclient.Object]("dummy"), builder.WithPredicates(predicate.ByLabels(prFilter))). Build(reconciler) @@ -131,7 +131,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, _ reconcile.Request) (reconc key := types.NamespacedName{Name: r.apiExport.Name} apiExport := &kcpdevv1alpha1.APIExport{} - if err := r.platformCluster.GetClient().Get(wsCtx, key, apiExport); ctrlruntimeclient.IgnoreNotFound(err) != nil { + if err := r.kcpCluster.GetClient().Get(wsCtx, key, apiExport); ctrlruntimeclient.IgnoreNotFound(err) != nil { return reconcile.Result{}, fmt.Errorf("failed to retrieve APIExport: %w", err) } @@ -188,7 +188,7 @@ func (r *Reconciler) ensureVirtualWorkspaceCluster(log *zap.SugaredLogger, vwURL if r.vwCluster == nil { log.Info("Setting up virtual workspace cluster…") - stoppableCluster, err := lifecycle.NewCluster(vwURL, r.platformRestConfig) + stoppableCluster, err := lifecycle.NewCluster(vwURL, r.kcpRestConfig) if err != nil { return fmt.Errorf("failed to initialize cluster: %w", err) } diff --git a/internal/controller/syncmanager/doc.go b/internal/controller/syncmanager/doc.go index 22239d0..d0f2a91 100644 --- a/internal/controller/syncmanager/doc.go +++ b/internal/controller/syncmanager/doc.go @@ -16,9 +16,8 @@ limitations under the License. /* Package syncmanager contains a controller that watches the APIExport we manage -in the platform cluster. Once the virtual workspace URL for said APIExport -is ready, the controller will begin to synchronize resources back and forth -between the platform cluster (i.e. all relevant workspaces) and the service -cluster. +in kcp. Once the virtual workspace URL for said APIExport is ready, the +controller will begin to synchronize resources back and forth between kcp +(i.e. all relevant workspaces) and the service cluster. */ package syncmanager diff --git a/internal/projection/projection.go b/internal/projection/projection.go index 9d3ab6b..e01377b 100644 --- a/internal/projection/projection.go +++ b/internal/projection/projection.go @@ -34,7 +34,7 @@ func PublishedResourceSourceGVK(pubRes *syncagentv1alpha1.PublishedResource) sch // PublishedResourceProjectedGVK returns the effective GVK after the projection // rules have been applied according to the PublishedResource. -func PublishedResourceProjectedGVK(pubRes *syncagentv1alpha1.PublishedResource, platformAPIGroup string) schema.GroupVersionKind { +func PublishedResourceProjectedGVK(pubRes *syncagentv1alpha1.PublishedResource, kcpAPIGroup string) schema.GroupVersionKind { apiVersion := pubRes.Spec.Resource.Version kind := pubRes.Spec.Resource.Kind @@ -49,7 +49,7 @@ func PublishedResourceProjectedGVK(pubRes *syncagentv1alpha1.PublishedResource, } return schema.GroupVersionKind{ - Group: platformAPIGroup, + Group: kcpAPIGroup, Version: apiVersion, Kind: kind, } diff --git a/internal/sync/syncer.go b/internal/sync/syncer.go index e9b002f..17ef97f 100644 --- a/internal/sync/syncer.go +++ b/internal/sync/syncer.go @@ -152,7 +152,7 @@ func (s *ResourceSyncer) Process(ctx Context, remoteObj *unstructured.Unstructur // status subresource even exists whether an update happens) syncStatusBack: true, // perform cleanup on the service cluster side when the source object - // in the platform is deleted + // in kcp is deleted blockSourceDeletion: true, // use the configured mutations from the PublishedResource mutator: s.mutator, diff --git a/internal/sync/syncer_related.go b/internal/sync/syncer_related.go index 3e4594a..320f9d5 100644 --- a/internal/sync/syncer_related.go +++ b/internal/sync/syncer_related.go @@ -140,13 +140,13 @@ func (s *ResourceSyncer) processRelatedResource(log *zap.SugaredLogger, stateSto }, // ConfigMaps and Secrets have no subresources subresources: nil, - // only sync the status back if the object originates in the platform, + // only sync the status back if the object originates in kcp, // as the service side should never have to rely on new status infos coming - // from the platform side - syncStatusBack: relRes.Origin == "platform", + // from the kcp side + syncStatusBack: relRes.Origin == "kcp", // if the origin is on the remote side, we want to add a finalizer to make // sure we can clean up properly - blockSourceDeletion: relRes.Origin == "platform", + blockSourceDeletion: relRes.Origin == "kcp", // apply mutation rules configured for the related resource mutator: mutation.NewMutator(nil), // relRes.Mutation } diff --git a/sdk/apis/syncagent/v1alpha1/published_resource.go b/sdk/apis/syncagent/v1alpha1/published_resource.go index 320709b..6906ec5 100644 --- a/sdk/apis/syncagent/v1alpha1/published_resource.go +++ b/sdk/apis/syncagent/v1alpha1/published_resource.go @@ -165,7 +165,7 @@ type RelatedResourceSpec struct { // The identifier must be an alphanumeric string. Identifier string `json:"identifier"` - // "service" or "platform" + // "service" or "kcp" Origin string `json:"origin"` // ConfigMap or Secret