Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 18 additions & 18 deletions cmd/api-syncagent/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,45 +100,45 @@ func run(ctx context.Context, log *zap.SugaredLogger, opts *Options) error {
}

// load the kcp kubeconfig
platformRestConfig, err := loadKubeconfig(opts.PlatformKubeconfig)
kcpRestConfig, err := loadKubeconfig(opts.KcpKubeconfig)
if err != nil {
return fmt.Errorf("failed to load platform kubeconfig: %w", err)
return fmt.Errorf("failed to load kcp kubeconfig: %w", err)
}

// sanity check
if !strings.Contains(platformRestConfig.Host, "/clusters/") {
return fmt.Errorf("platform kubeconfig does not point to a specific workspace")
if !strings.Contains(kcpRestConfig.Host, "/clusters/") {
return fmt.Errorf("kcp kubeconfig does not point to a specific workspace")
}

// We check if the APIExport exists and extract information we need to set up our platformCluster.
apiExport, lcPath, lcName, err := resolveAPIExport(ctx, platformRestConfig, opts.APIExportRef)
// We check if the APIExport exists and extract information we need to set up our kcpCluster.
apiExport, lcPath, lcName, err := resolveAPIExport(ctx, kcpRestConfig, opts.APIExportRef)
if err != nil {
return fmt.Errorf("failed to resolve APIExport: %w", err)
}

log.Infow("Resolved APIExport", "apigroup", opts.APIExportRef, "workspace", lcPath, "logicalcluster", lcName)

// init the "permanent" platform cluster connection
platformCluster, err := setupPlatformCluster(platformRestConfig, opts)
// init the "permanent" kcp cluster connection
kcpCluster, err := setupKcpCluster(kcpRestConfig, opts)
if err != nil {
return fmt.Errorf("failed to initialize platform cluster: %w", err)
return fmt.Errorf("failed to initialize kcp cluster: %w", err)
}

// start the platform cluster caches when the manager boots up
// start the kcp cluster caches when the manager boots up
// (happens regardless of leader election status)
if err := mgr.Add(platformCluster); err != nil {
return fmt.Errorf("failed to add platform cluster runnable: %w", err)
if err := mgr.Add(kcpCluster); err != nil {
return fmt.Errorf("failed to add kcp cluster runnable: %w", err)
}

if err := apiresourceschema.Add(mgr, platformCluster, lcName, log, 4, opts.AgentName, opts.APIExportRef, opts.PublishedResourceSelector); err != nil {
if err := apiresourceschema.Add(mgr, kcpCluster, lcName, log, 4, opts.AgentName, opts.APIExportRef, opts.PublishedResourceSelector); err != nil {
return fmt.Errorf("failed to add apiresourceschema controller: %w", err)
}

if err := apiexport.Add(mgr, platformCluster, lcName, log, opts.APIExportRef, opts.AgentName, opts.PublishedResourceSelector); err != nil {
if err := apiexport.Add(mgr, kcpCluster, lcName, log, opts.APIExportRef, opts.AgentName, opts.PublishedResourceSelector); err != nil {
return fmt.Errorf("failed to add apiexport controller: %w", err)
}

if err := syncmanager.Add(ctx, mgr, platformCluster, platformRestConfig, log, apiExport, opts.PublishedResourceSelector); err != nil {
if err := syncmanager.Add(ctx, mgr, kcpCluster, kcpRestConfig, log, apiExport, opts.PublishedResourceSelector); err != nil {
return fmt.Errorf("failed to add syncmanager controller: %w", err)
}

Expand Down Expand Up @@ -231,9 +231,9 @@ func resolveAPIExport(ctx context.Context, restConfig *rest.Config, apiExportRef
return apiExport, lcPath, lcName, nil
}

// setupPlatformCluster sets up a plain, non-kcp-aware ctrl-runtime Cluster object
// setupKcpCluster sets up a plain, non-kcp-aware ctrl-runtime Cluster object
// that is solvely used to interact with the APIExport and APIResourceSchemas.
func setupPlatformCluster(restConfig *rest.Config, opts *Options) (cluster.Cluster, error) {
func setupKcpCluster(restConfig *rest.Config, opts *Options) (cluster.Cluster, error) {
scheme := runtime.NewScheme()

if err := kcpdevv1alpha1.AddToScheme(scheme); err != nil {
Expand All @@ -246,7 +246,7 @@ func setupPlatformCluster(restConfig *rest.Config, opts *Options) (cluster.Clust

return cluster.New(restConfig, func(o *cluster.Options) {
o.Scheme = scheme
// RBAC on the platform cluster is very tight and does not allow to list/watch all objects;
// RBAC in kcp might be very tight and might not allow to list/watch all objects;
// restrict the cache's selectors accordingly so we can still make use of caching.
o.Cache = cache.Options{
Scheme: scheme,
Expand Down
12 changes: 6 additions & 6 deletions cmd/api-syncagent/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,10 @@ type Options struct {
// work.
// KubeconfigFile string

// PlatformKubeconfig is the kubeconfig that gives access to kcp. This
// KcpKubeconfig is the kubeconfig that gives access to kcp. This
// kubeconfig's cluster URL has to point to the workspace where the APIExport
// referenced via APIExportRef lives.
PlatformKubeconfig string
KcpKubeconfig string

// Namespace is the namespace that the Sync Agent runs in.
Namespace string
Expand All @@ -49,7 +49,7 @@ type Options struct {

// AgentName can be used to give this Sync Agent instance a custom name. This name is used
// for the Sync Agent resource inside kcp. This value must not be changed after a Sync Agent
// has registered for the first time in the platform.
// has registered for the first time in kcp.
// If not given, defaults to "<service ref>-syncagent".
AgentName string

Expand Down Expand Up @@ -77,7 +77,7 @@ func NewOptions() *Options {
func (o *Options) AddFlags(flags *pflag.FlagSet) {
o.LogOptions.AddPFlags(flags)

flags.StringVar(&o.PlatformKubeconfig, "platform-kubeconfig", o.PlatformKubeconfig, "kubeconfig file of kcp")
flags.StringVar(&o.KcpKubeconfig, "kcp-kubeconfig", o.KcpKubeconfig, "kubeconfig file of kcp")
flags.StringVar(&o.Namespace, "namespace", o.Namespace, "Kubernetes namespace the Sync Agent is running in")
flags.StringVar(&o.AgentName, "agent-name", o.AgentName, "name of this Sync Agent, must not be changed after the first run, can be left blank to auto-generate a name")
flags.StringVar(&o.APIExportRef, "apiexport-ref", o.APIExportRef, "name of the APIExport in kcp that this Sync Agent is powering")
Expand Down Expand Up @@ -108,8 +108,8 @@ func (o *Options) Validate() error {
errs = append(errs, errors.New("--apiexport-ref is required"))
}

if len(o.PlatformKubeconfig) == 0 {
errs = append(errs, errors.New("--platform-kubeconfig is required"))
if len(o.KcpKubeconfig) == 0 {
errs = append(errs, errors.New("--kcp-kubeconfig is required"))
}

if s := o.PublishedResourceSelectorString; len(s) > 0 {
Expand Down
2 changes: 1 addition & 1 deletion deploy/crd/kcp.io/syncagent.kcp.io_publishedresources.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ spec:
description: ConfigMap or Secret
type: string
origin:
description: '"service" or "platform"'
description: '"service" or "kcp"'
type: string
reference:
properties:
Expand Down
13 changes: 6 additions & 7 deletions docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,17 @@ The intended usecase follows roughly these steps:
inside of kcp.
4. The service owner uses the Sync Agent Helm chart (or similar deployment technique) to install the
Sync Agent in their cluster.
5. To actually make resources available in the platform, the service owner now has to create a
set of `PublishedResource` objects. The configuration happens from their point of view, meaning
they define how to publish a CRD to the platform, defining renaming rules and other projection
settings.
5. To actually make resources available in kcp, the service owner now has to create a set of
`PublishedResource` objects. The configuration happens from their point of view, meaning they
define how to publish a CRD to kcp, defining renaming rules and other projection settings.
6. Once a `PublishedResource` is created in the service cluster, the Sync Agent will pick it up,
find the referenced CRD, convert/project this CRD into an `APIResourceSchema` (ARS) for kcp and
then create the ARS in org workspace.
7. Finally the Sync Agent will take all `PublishedResources` and bundle them into the pre-existing
`APIExport` in the org workspace. This APIExport can then be bound in the org workspace itself
(or later any workspaces (depending on permissions)) and be used there.
8. kcp automatically provides a virtual workspace for the `APIExport` and this is what the Sync Agent
then uses to watch all objects for the relevant resources in the platform (i.e. in all workspaces).
then uses to watch all objects for the relevant resources in kcp (i.e. in all workspaces).
9. The Sync Agent will now begin to synchronize objects back and forth between the service cluster
and kcp.

Expand Down Expand Up @@ -100,8 +99,8 @@ In addition to projecting (mapping) the GVK, the `PublishedResource` also contai
rules, which influence how the local objects that the Sync Agent is creating are named.

As a single Sync Agent serves a single service, the API group used in kcp is the same for all
`PublishedResources`. It's the API group configured in the `APIExport` inside the platform (created
in step 1 in the overview above).
`PublishedResources`. It's the API group configured in the `APIExport` inside kcp (created in step 1
in the overview above).

To prevent chaos, `PublishedResources` are immutable: handling the case that a PR first wants to
publish `kubermatic.k8c.io/v1 Cluster` and then suddenly `kubermatic.k8c.io/v1 User` resources would
Expand Down
2 changes: 1 addition & 1 deletion docs/getting-started.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ syncAgent:

# Required: Name of the Kubernetes Secret that contains a "kubeconfig" key, with the kubeconfig
# provided by kcp to access it.
platformKubeconfig: kcp-kubeconfig
kcpKubeconfig: kcp-kubeconfig

# Create additional RBAC on the service cluster. These rules depend somewhat on the Sync Agent
# configuration, but the following two rules are very common. If you configure the Sync Agent to
Expand Down
43 changes: 21 additions & 22 deletions docs/publish-resources.md
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
# Publishing Resources

The guide describes the process of making a resource (usually defined by a CustomResourceDefinition)
of one Kubernetes cluster (the "service cluster" or "local cluster") available for use in kcp (the
"platform cluster" or "workspaces"). This involves setting up an `APIExport` and then installing
the Sync Agent and defining `PublishedResources` in the local cluster.
of one Kubernetes cluster (the "service cluster" or "local cluster") available for use in kcp. This
involves setting up an `APIExport` and then installing the Sync Agent and defining
`PublishedResources` in the local cluster.

All of the documentation and API types are worded and named from the perspective of a service owner,
the person(s) who own a service and want to make it available to consumers in kcp.
Expand All @@ -12,7 +12,7 @@ the person(s) who own a service and want to make it available to consumers in kc

A "service" comprises a set of resources within a single Kubernetes API group. It doesn't need to be
_all_ of the resources in that group, service owners are free and encouraged to only make a subset
of resources (i.e. a subset of CRDs) available for use in the platform.
of resources (i.e. a subset of CRDs) available for use in kcp.

For each of the CRDs on the service cluster that should be published, the service owner creates a
`PublishedResource` object, which will contain both which CRD to publish, as well as numerous other
Expand Down Expand Up @@ -80,15 +80,15 @@ CRD.
### Projection

For stronger separation of concerns and to enable whitelabelling of services, the type meta for
can be projected, i.e. changed between the local service cluster and the platform. You could for
example rename `Certificate` from cert-manager to `Sertifikat` inside the platform.
can be projected, i.e. changed between the local service cluster and kcp. You could for example
rename `Certificate` from cert-manager to `Sertifikat` inside kcp.

Note that the API group of all published resources is always changed to the one defined in the
APIExport object (meaning 1 Sync Agent serves all the selected published resources under the
same API group). That is why changing the API group cannot be configured in the projection.
APIExport object (meaning 1 Sync Agent serves all the selected published resources under the same
API group). That is why changing the API group cannot be configured in the projection.

Besides renaming the Kind and Version, dependent fields like Plural, ShortNames and Categories
can be adjusted to fit the desired naming scheme in the platform. The Plural name is computed
can be adjusted to fit the desired naming scheme in kcp. The Plural name is computed
automatically, but can be overridden. ShortNames and Categories are copied unless overwritten in the
`PublishedResource`.

Expand All @@ -111,7 +111,7 @@ spec:
# scope: Namespaced # change only when you know what you're doing
```

Consumers (end users) in the platform would then ultimately see projected names only. Note that GVK
Consumers (end users) in kcp would then ultimately see projected names only. Note that GVK
projection applies only to the synced object itself and has no effect on the contents of these
objects. To change the contents, use external solutions like Crossplane to transform objects.
<!-- To change the contents, use *Mutations*. -->
Expand All @@ -134,8 +134,8 @@ are available:
* `$remoteNameHash` – first 20 hex characters of the SHA-1 hash of `$remoteName`

If nothing is configured, the default ensures that no collisions will happen: Each workspace in
the platform will create a namespace on the local cluster, with a combination of namespace and
name hashes used for the actual resource names.
kcp will create a namespace on the local cluster, with a combination of namespace and name hashes
used for the actual resource names.

```yaml
apiVersion: syncagent.kcp.io/v1alpha1
Expand All @@ -162,7 +162,7 @@ Configuration happens `spec.mutation` and there are two fields:
be other top-level fields) from the remote side to the local side. Use this to apply defaulting,
normalising, and enforcing rules.
* `status` contains the mutation rules when syncing the `status` subresource back from the local
cluster up into the platform. Use this to normalize names and values (e.g. if you rewrote
cluster up into kcp. Use this to normalize names and values (e.g. if you rewrote
`.spec.secretName` from `"foo"` to `"dfkbssbfh"`, make sure the status does not "leak" this name
by accident).

Expand Down Expand Up @@ -285,7 +285,7 @@ spec:
# "connection-details" or "credentials".
identifier: tls-secret

# "service" or "platform"
# "service" or "kcp"
origin: service

# for now, only "Secret" and "ConfigMap" are supported;
Expand Down Expand Up @@ -354,7 +354,7 @@ spec:
name: "$remoteClusterName-$remoteNamespaceHash-$remoteNameHash"

related:
- origin: service # service or platform
- origin: service # service or kcp
kind: Secret # for now, only "Secret" and "ConfigMap" are supported;
# there is no GVK projection for related resources

Expand Down Expand Up @@ -383,10 +383,9 @@ The following sections go into more details of the behind the scenes magic.
### Synchronization

Even though the whole configuration is written from the standpoint of the service owner, the actual
synchronization logic considers the platform side as the canonical source of truth. The Sync Agent
continuously tries to make the local objects look like the ones in the platform, while pushing
status updates back into the platform (if the given `PublishedResource` (i.e. CRD) has a `status`
subresource enabled).
synchronization logic considers the kcp side as the canonical source of truth. The Sync Agent
continuously tries to make the local objects look like the ones in kcp, while pushing status updates
back into kcp (if the given `PublishedResource` (i.e. CRD) has a `status` subresource enabled).

### Local <-> Remote Connection

Expand All @@ -399,7 +398,7 @@ reconciliations, the (potentially costly, but probably not) renaming logic does
applied again. This allows the Sync Agent to change defaults and also allows the service owner to make
changes to the naming rules without breaking existing objects.

Since we do not want to store metadata on the platform side, we instead rely on label selectors on
Since we do not want to store metadata on the kcp side, we instead rely on label selectors on
the local objects. Each object on the service cluster has a label for the remote cluster name,
namespace and object name, and when trying to find the matching local object, the Sync Agent simply
does a label-based search.
Expand Down Expand Up @@ -432,8 +431,8 @@ service cluster is called the `destination object`.

#### Phase 2: Handle Deletion

A finalizer is used in the platform workspaces to prevent orphans in the service cluster side. This
is the only real evidence in the platform side that the Sync Agent is even doing things. When a remote
A finalizer is used in the kcp workspaces to prevent orphans in the service cluster side. This
is the only real evidence in the kcp side that the Sync Agent is even doing things. When a remote
(source) object is deleted, the corresponding local object is deleted as well. Once the local object
is gone, the finalizer is removed from the source object.

Expand Down
Loading
Loading