diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 2925fcdfc91..38474cfb7de 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -120,8 +120,8 @@ krews: token: "{{ .Env.KREW_GITHUB_TOKEN }}" homepage: "https://kcp.io/" description: | - KCP cli plugin for kubectl. Enables you to work with KCP. - short_description: "KCP cli plugin for kubectl." + kcp cli plugin for kubectl. Enables you to work with kcp. + short_description: "kcp cli plugin for kubectl." skip_upload: auto - name: ws ids: @@ -132,8 +132,8 @@ krews: token: "{{ .Env.KREW_GITHUB_TOKEN }}" homepage: "https://kcp.io/" description: | - KCP workspace cli plugin for kubectl. Enables you to manage your KCP workspaces. - short_description: "KCP workspace cli plugin for kubectl." + kcp workspace cli plugin for kubectl. Enables you to manage your kcp workspaces. + short_description: "kcp workspace cli plugin for kubectl." skip_upload: auto - name: create-workspace ids: @@ -144,6 +144,6 @@ krews: token: "{{ .Env.KREW_GITHUB_TOKEN }}" homepage: "https://kcp.io/" description: | - KCP create workspace cli plugin for kubectl. Enables you to create KCP workspaces. - short_description: "KCP create workspace cli plugin for kubectl." + kcp create workspace cli plugin for kubectl. Enables you to create kcp workspaces. + short_description: "kcp create workspace cli plugin for kubectl." skip_upload: auto diff --git a/ADOPTERS.md b/ADOPTERS.md index 6861db810f6..81fd0d10eaf 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -8,4 +8,4 @@ Listed below are organizations that have adopted kcp in one way or another. We a | Kubermatic | Kubermatic is building Kubermatic Developer Platform (KDP), an internal developer platform (IdP) product that uses kcp as its global API control plane. | Development | [Product Website](https://www.kubermatic.com/products/kubermatic-developer-platform/) | | Faros.sh | Faros is building a control-plane-as-a-service to access & manage multiple Kubernetes clusters across public and private deployments. | Development | - | | SAP | SAP is developing an open reference architecture (ApeiroRA) with a Platform Mesh that leverages kcp as its foundation, enabling service providers to seamlessly connect and interact through unified KRM-based APIs in a cloud-edge continuum. | Development | [Website](https://apeirora.eu/) | -| Upbound | We use KCP within our Cloud Managed Control Planes product to provide multi-tenant access to the underlying hostcluster. | Production | - | +| Upbound | We use kcp within our Cloud Managed Control Planes product to provide multi-tenant access to the underlying hostcluster. | Production | - | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 68bbd8df7a5..1a98c6f4c59 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,9 +4,9 @@ We're thrilled that you're interested in contributing to kcp! Please visit our [full contributing guide](https://docs.kcp.io/kcp/main/contributing) on our documentation site. Beside that, what coverns the project and all contributions to it must follow -the [KCP Project Governance](./GOVERNANCE.md). +the [kcp Project Governance](./GOVERNANCE.md). -From the KCP Project Governance, the following manifesto should guide the technical +From the kcp Project Governance, the following manifesto should guide the technical decisions through-out all contributions: > kcp maintainers strive to be good citizens in the Kubernetes project. diff --git a/FAQ.md b/FAQ.md index 62114c5845d..ccf4fc2a3bb 100644 --- a/FAQ.md +++ b/FAQ.md @@ -8,12 +8,11 @@ kcp is a highly-multi-tenant Kubernetes control-plane, built for SaaS service-pr Check out our [concepts](https://github.com/kcp-dev/kcp/blob/main/docs/concepts.md) document and feel free to open an issue if something is not covered. - ## If kcp is a Kubernetes API server without pod-like APIs, how do resources like Deployments get scheduled? kcp has a concept called [syncer](https://github.com/kcp-dev/kcp/blob/main/docs/concepts.md#syncer) which is installed on each [SyncTarget](https://github.com/kcp-dev/kcp/blob/main/docs/concepts.md#workload-cluster). The [syncer](https://github.com/kcp-dev/kcp/blob/main/docs/concepts.md#syncer) negotiates, with kcp, a set of APIs to make accessible in the workspace. This may include things like [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) or other resources you may explicitly configure the syncer to synchronize to kcp. Once these APIs are made available in your [Workspace](https://github.com/kcp-dev/kcp/blob/main/docs/concepts.md#workspace) you may then create resources of that type. From there, the [Location and Placement](https://github.com/kcp-dev/kcp/blob/main/docs/concepts.md#location) APIs help determine which [Location](https://github.com/kcp-dev/kcp/blob/main/docs/concepts.md#location) your deployable resource lands on. -## Will KCP be able to pass the K8S conformance tests in [CNCF Conformance Suites](https://www.cncf.io/certification/software-conformance/)? +## Will kcp be able to pass the K8S conformance tests in [CNCF Conformance Suites](https://www.cncf.io/certification/software-conformance/)? No, the Kubernetes conformance suites require that all Kubernetes APIs are supported and kcp does not support all APIs out of the box (for instance, Pods). @@ -69,3 +68,6 @@ Shards in kcp represent a single apiserver and etcd/db instance. This is how kc You're in the right place. Clone this repo and run `make install WHAT=./cli/cmd/kubectl-kcp`. +## What does kcp stand for / how to spell it? + +`kcp` stands for "Kube for Control Plane" and should always be written in lowercase letters. diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 3c15927bd0f..576463e9531 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -1,4 +1,4 @@ -# KCP Project Governance +# kcp Project Governance The kcp project is dedicated to democratizing Control Planes beyond container orchestration. This governance explains how the project is run. @@ -177,7 +177,7 @@ mailing list and put on hold until the necessary majority has been reached. Any Maintainer may submit a [vote](#voting) to create a new subproject under the kcp-dev GitHub organization. Subprojects are governed by all Maintainers, but may -take on additional Subproject Maintainers that are only responsible for the +take on additional Subproject Maintainers that are only responsible for the specific subproject. It is the combined responsibility of Maintainers and Subproject Maintainers diff --git a/cli/cmd/kubectl-kcp/cmd/kubectlKcp.go b/cli/cmd/kubectl-kcp/cmd/kubectlKcp.go index 87e2ba3342f..297ccf664bc 100644 --- a/cli/cmd/kubectl-kcp/cmd/kubectlKcp.go +++ b/cli/cmd/kubectl-kcp/cmd/kubectlKcp.go @@ -38,9 +38,9 @@ import ( func KubectlKcpCommand() *cobra.Command { root := &cobra.Command{ Use: "kcp", - Short: "kubectl plugin for KCP", + Short: "kubectl plugin for kcp", Long: help.Doc(` - KCP is the easiest way to manage Kubernetes applications against one or + kcp is the easiest way to manage Kubernetes applications against one or more clusters, by giving you a personal control plane that schedules your workloads onto one or many clusters, and making it simple to pick up and move. Advanced use cases including spreading your apps across clusters for @@ -48,7 +48,7 @@ func KubectlKcpCommand() *cobra.Command { and enabling collaboration for individual teams without having access to the underlying clusters. - This command provides KCP specific sub-command for kubectl. + This command provides kcp-specific sub-command for kubectl. `), SilenceUsage: true, SilenceErrors: true, diff --git a/cli/pkg/workspace/cmd/cmd.go b/cli/pkg/workspace/cmd/cmd.go index 3d32934eb78..4c259a8557d 100644 --- a/cli/pkg/workspace/cmd/cmd.go +++ b/cli/pkg/workspace/cmd/cmd.go @@ -89,14 +89,14 @@ func New(streams genericclioptions.IOStreams) (*cobra.Command, error) { cmd := &cobra.Command{ Aliases: []string{"ws", "workspaces"}, Use: "workspace [create|create-context|use|current||..|.|-|~|] [-i|--interactive]", - Short: "Manages KCP workspaces", + Short: "Manages kcp workspaces", Example: fmt.Sprintf(workspaceExample, cliName), SilenceUsage: true, TraverseChildren: true, RunE: func(cmd *cobra.Command, args []string) error { if interactive { if len(args) != 0 { - return fmt.Errorf("interactive mode does not accept arguments") + return errors.New("interactive mode does not accept arguments") } treeOpts.Interactive = true if err := treeOpts.Validate(); err != nil { diff --git a/cli/pkg/workspace/plugin/scheme.go b/cli/pkg/workspace/plugin/scheme.go index a774c26527d..2952c4c2b90 100644 --- a/cli/pkg/workspace/plugin/scheme.go +++ b/cli/pkg/workspace/plugin/scheme.go @@ -24,7 +24,7 @@ import ( func init() { // The metav1.TableXXX types (that are in the metav1 scheme) are not added by default - // to the generated KCP clientset scheme. + // to the generated kcp clientset scheme. // So when we want to get the result of a request done with this clientset as a table, // it doesn't know the Table types and returns an error. // diff --git a/cmd/cache-server/main.go b/cmd/cache-server/main.go index 7e490531ddb..08d541a581c 100644 --- a/cmd/cache-server/main.go +++ b/cmd/cache-server/main.go @@ -51,7 +51,7 @@ func main() { serverOptions := options.NewOptions(*rootDir) cmd := &cobra.Command{ Use: "cache-server", - Short: "Runs the cache server for KCP", + Short: "Runs the cache server for kcp", Long: help.Doc(` Starts a server that hosts data/resources that are required by shards. It serves as a cache helping to reduce the storage that would have to diff --git a/cmd/kcp-front-proxy/options/options.go b/cmd/kcp-front-proxy/options/options.go index 3881e197e40..a666ca07626 100644 --- a/cmd/kcp-front-proxy/options/options.go +++ b/cmd/kcp-front-proxy/options/options.go @@ -48,7 +48,7 @@ func (o *Options) AddFlags(fss *cliflag.NamedFlagSets) { logsapiv1.AddFlags(o.Logs, fss.FlagSet("logging")) // add flags that are filtered out from upstream, but overridden here with our own version - fss.FlagSet("KCP").Var(kcpfeatures.NewFlagValue(), "feature-gates", ""+ + fss.FlagSet("kcp").Var(kcpfeatures.NewFlagValue(), "feature-gates", ""+ "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ "Options are:\n"+strings.Join(kcpfeatures.KnownFeatures(), "\n")) // hide kube-only gates } diff --git a/cmd/kcp/kcp.go b/cmd/kcp/kcp.go index 9bb78598a54..111a8ef9f92 100644 --- a/cmd/kcp/kcp.go +++ b/cmd/kcp/kcp.go @@ -48,18 +48,22 @@ import ( func main() { cmd := &cobra.Command{ Use: "kcp", - Short: "Kube for Control Plane (KCP)", + Short: "Kube for Control Plane (kcp)", Long: help.Doc(` - KCP is the easiest way to manage Kubernetes applications against one or - more clusters, by giving you a personal control plane that schedules your - workloads onto one or many clusters, and making it simple to pick up and - move. It supports advanced use cases such as spreading your apps across - clusters for resiliency, scheduling batch workloads onto clusters with - free capacity, and enabling collaboration for individual teams without - having access to the underlying clusters. - - To get started, launch a new cluster with 'kcp start', which will - initialize your personal control plane and write an admin kubeconfig file + kcp is a Kubernetes-like control plane focusing on + + * being a control plane for many independent, isolated "clusters" + known as workspaces, + * enabling API service providers to offer APIs centrally + using multi-tenant operators and + * easy API consumption for users in their workspaces. + + kcp can be a building block for SaaS service providers who need a + massively multi-tenant platform to offer services to a large number of + fully isolated tenants using Kubernetes-native APIs. + + To get started, launch a new instance with 'kcp start', which will + initialize your personal shard and write an admin kubeconfig file to disk. `), SilenceUsage: true, diff --git a/cmd/kcp/options/flags.go b/cmd/kcp/options/flags.go index 07b07d09113..f3b73a8b923 100644 --- a/cmd/kcp/options/flags.go +++ b/cmd/kcp/options/flags.go @@ -29,12 +29,12 @@ var ( "misc", "secure serving", "traces", - "KCP Authentication", - "KCP Authorization", - "KCP Virtual Workspaces", - "KCP Controllers", - "KCP Home Workspaces", - "KCP Cache Server", - "KCP", + "kcp Authentication", + "kcp Authorization", + "kcp Virtual Workspaces", + "kcp Controllers", + "kcp Home Workspaces", + "kcp Cache Server", + "kcp", } ) diff --git a/cmd/kcp/options/generic.go b/cmd/kcp/options/generic.go index 2eeae4c28fa..bacf245ba82 100644 --- a/cmd/kcp/options/generic.go +++ b/cmd/kcp/options/generic.go @@ -38,7 +38,7 @@ func NewGeneric(rootDir string) *GenericOptions { } func (o *GenericOptions) AddFlags(fss *cliflag.NamedFlagSets) { - fs := fss.FlagSet("KCP") + fs := fss.FlagSet("kcp") fs.StringVar(&o.RootDirectory, "root-directory", o.RootDirectory, "Root directory. Set to \"\" to disable file (e.g. certificates) generation in a root directory.") fs.StringVar(&o.MappingFile, "miniproxy-mapping-file", o.MappingFile, "DEVELOPMENT ONLY. Path to additional mapping file to be used by mini-front-proxy. This should not be used in production. For production usecase use front-proxy component instead.") } @@ -75,7 +75,7 @@ func (o *GenericOptions) Validate() []error { return nil } -// mkdirRoot creates the root configuration directory for the KCP +// mkdirRoot creates the root configuration directory for the kcp // server. This has to be done early before we start bringing up server // components to ensure that we set the initial permissions correctly, // since otherwise components will create it as a side-effect. diff --git a/cmd/virtual-workspaces/options/options.go b/cmd/virtual-workspaces/options/options.go index 160999e6946..05e3833df30 100644 --- a/cmd/virtual-workspaces/options/options.go +++ b/cmd/virtual-workspaces/options/options.go @@ -98,7 +98,7 @@ func (o *Options) AddFlags(flags *pflag.FlagSet) { flags.StringVar(&o.ShardExternalURL, "shard-external-url", o.ShardExternalURL, "URL used by outside clients to talk to the kcp shard this virtual workspace is related to") flags.StringVar(&o.KubeconfigFile, "kubeconfig", o.KubeconfigFile, - "The kubeconfig file of the KCP instance that hosts workspaces.") + "The kubeconfig file of the kcp instance that hosts workspaces.") _ = cobra.MarkFlagRequired(flags, "kubeconfig") flags.StringVar(&o.Context, "context", o.Context, "Name of the context in the kubeconfig file to use") diff --git a/config/crds/apis.kcp.io_apibindings.yaml b/config/crds/apis.kcp.io_apibindings.yaml index c2234fbd27f..c3309657137 100644 --- a/config/crds/apis.kcp.io_apibindings.yaml +++ b/config/crds/apis.kcp.io_apibindings.yaml @@ -79,7 +79,7 @@ spec: This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. - Note that one must look this up for a particular KCP instance. + Note that one must look this up for a particular kcp instance. type: string resource: description: |- @@ -193,7 +193,7 @@ spec: This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. - Note that one must look this up for a particular KCP instance. + Note that one must look this up for a particular kcp instance. type: string resource: description: |- @@ -373,7 +373,7 @@ spec: This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. - Note that one must look this up for a particular KCP instance. + Note that one must look this up for a particular kcp instance. type: string resource: description: |- @@ -493,7 +493,7 @@ spec: This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. - Note that one must look this up for a particular KCP instance. + Note that one must look this up for a particular kcp instance. type: string resource: description: |- @@ -643,7 +643,7 @@ spec: This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. - Note that one must look this up for a particular KCP instance. + Note that one must look this up for a particular kcp instance. type: string resource: description: |- @@ -863,7 +863,7 @@ spec: This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. - Note that one must look this up for a particular KCP instance. + Note that one must look this up for a particular kcp instance. type: string resource: description: |- diff --git a/config/crds/apis.kcp.io_apiexports.yaml b/config/crds/apis.kcp.io_apiexports.yaml index dfdcf268350..41cc2e2cb0f 100644 --- a/config/crds/apis.kcp.io_apiexports.yaml +++ b/config/crds/apis.kcp.io_apiexports.yaml @@ -166,7 +166,7 @@ spec: This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. - Note that one must look this up for a particular KCP instance. + Note that one must look this up for a particular kcp instance. type: string resource: description: |- @@ -409,7 +409,7 @@ spec: This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. - Note that one must look this up for a particular KCP instance. + Note that one must look this up for a particular kcp instance. type: string resource: description: |- diff --git a/config/crds/core.kcp.io_shards.yaml b/config/crds/core.kcp.io_shards.yaml index f45e06eabc3..dc4744e3ae0 100644 --- a/config/crds/core.kcp.io_shards.yaml +++ b/config/crds/core.kcp.io_shards.yaml @@ -60,7 +60,7 @@ spec: properties: baseURL: description: |- - baseURL is the address of the KCP shard for direct connections, e.g. by some + baseURL is the address of the kcp shard for direct connections, e.g. by some front-proxy doing the fan-out to the shards. format: uri minLength: 1 diff --git a/config/root-phase0/apiexport-shards.core.kcp.io.yaml b/config/root-phase0/apiexport-shards.core.kcp.io.yaml index 1da67ceccda..e20b4d89131 100644 --- a/config/root-phase0/apiexport-shards.core.kcp.io.yaml +++ b/config/root-phase0/apiexport-shards.core.kcp.io.yaml @@ -7,7 +7,7 @@ spec: resources: - group: core.kcp.io name: shards - schema: v240903-d6797056a.shards.core.kcp.io + schema: v251020-aa1b96e5d.shards.core.kcp.io storage: crd: {} status: {} diff --git a/config/root-phase0/apiresourceschema-shards.core.kcp.io.yaml b/config/root-phase0/apiresourceschema-shards.core.kcp.io.yaml index cba3c352511..e4cfed78b55 100644 --- a/config/root-phase0/apiresourceschema-shards.core.kcp.io.yaml +++ b/config/root-phase0/apiresourceschema-shards.core.kcp.io.yaml @@ -2,7 +2,7 @@ apiVersion: apis.kcp.io/v1alpha1 kind: APIResourceSchema metadata: creationTimestamp: null - name: v240903-d6797056a.shards.core.kcp.io + name: v251020-aa1b96e5d.shards.core.kcp.io spec: group: core.kcp.io names: @@ -57,7 +57,7 @@ spec: properties: baseURL: description: |- - baseURL is the address of the KCP shard for direct connections, e.g. by some + baseURL is the address of the kcp shard for direct connections, e.g. by some front-proxy doing the fan-out to the shards. format: uri minLength: 1 diff --git a/contrib/kcp-dex/README.md b/contrib/kcp-dex/README.md index bc202db4494..c5be5e75646 100644 --- a/contrib/kcp-dex/README.md +++ b/contrib/kcp-dex/README.md @@ -1,4 +1,4 @@ -# KCP Dex +# kcp Dex How to run local kcp with dex. @@ -23,7 +23,7 @@ GOBIN=$(pwd)/bin go install github.com/mjudeikis/genkey * Run dex: `./bin/dex serve ../contrib/kcp-dex/kcp-config.yaml ` -### KCP +### kcp Start kcp with oidc enabled, you can either use the OIDC flags or structured authentication configuration from a file. Example configuration is shown in `auth-config.yaml`. diff --git a/contrib/kcp-dex/kcp-config.yaml b/contrib/kcp-dex/kcp-config.yaml index 9aabb71f19d..f6758e2fde2 100644 --- a/contrib/kcp-dex/kcp-config.yaml +++ b/contrib/kcp-dex/kcp-config.yaml @@ -13,7 +13,7 @@ staticClients: public: true redirectURIs: - http://localhost:8000 - name: 'KCP App' + name: 'kcp App' secret: Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== # Let dex keep a list of passwords which can be used to login to dex. diff --git a/contrib/logo/README.md b/contrib/logo/README.md index 7a3f1e96518..43f097ad21b 100644 --- a/contrib/logo/README.md +++ b/contrib/logo/README.md @@ -1,7 +1,7 @@ -# KCP Logo +# kcp Logo -The logo of KCP is a hypercube, viewed symmetrically in a way that it forms two -nested, equally oriented, centered regular hexagons with the connecting hypercube +The logo of kcp is a hypercube, viewed symmetrically in a way that it forms two +nested, equally oriented, centered regular hexagons with the connecting hypercube lines clearly visible, either through color contrast or a line. There are these variants, to be used depending on context: @@ -10,14 +10,14 @@ There are these variants, to be used depending on context: 1. high contrast Logo
2. glow Logo
-Moreover, there is a red-blue variant: +Moreover, there is a red-blue variant: 1. normal Logo
2. high contrast Logo
3. glow Logo
Depending on context one or the other variant makes more sense visually. -The KCP font is [Ubuntu](https://fonts.google.com/specimen/Ubuntu). +The kcp font is [Ubuntu](https://fonts.google.com/specimen/Ubuntu). The application used for the original files is [Amadine](https://amadine.com/). diff --git a/contrib/tilt/README.md b/contrib/tilt/README.md index dfa78d8aff2..ea356a5d475 100644 --- a/contrib/tilt/README.md +++ b/contrib/tilt/README.md @@ -1,7 +1,7 @@ # TILT -Tilt setup for KCP development. -The benefit of using Tilt here is that it can be used to build and deploy the KCP +Tilt setup for kcp development. +The benefit of using Tilt here is that it can be used to build and deploy the kcp automatically when code changes are detected. It also provides tools like Prometheus, Grafana, Loki and port forwarding into local machines for debugging. It uses a helm chart as a base and injects locally built images into kind cluster @@ -28,12 +28,12 @@ make tilt-kind-up # Output example: .... -Install KCP +Install kcp Tooling: Grafana: http://localhost:3333/ Prometheus: http://localhost:9091 -KCP API Server: https://localhost:9443 -KCP FrontProxy Server: https://localhost:9444 +kcp API Server: https://localhost:9443 +kcp FrontProxy Server: https://localhost:9444 Tilt started on http://localhost:10350/ v0.33.6, built 2023-09-29 diff --git a/contrib/tilt/Tiltfile b/contrib/tilt/Tiltfile index f490fbe30d4..31b54e5cf73 100644 --- a/contrib/tilt/Tiltfile +++ b/contrib/tilt/Tiltfile @@ -98,7 +98,7 @@ helm_remote( ) -# KCP development +# kcp development namespace_create('kcp-certs') namespace_create('kcp-proxy') namespace_create('kcp-cache') diff --git a/contrib/tilt/dex-values.yaml b/contrib/tilt/dex-values.yaml index 25a937e3c45..8bf2c1f9f75 100644 --- a/contrib/tilt/dex-values.yaml +++ b/contrib/tilt/dex-values.yaml @@ -32,7 +32,7 @@ config: redirectURIs: - https://idp.dev.local:6443/callback - http://localhost:8000 - name: 'KCP App' + name: 'kcp App' secret: Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== certmanager: diff --git a/contrib/tilt/kind.sh b/contrib/tilt/kind.sh index 1b425a2b112..ce669396d0f 100755 --- a/contrib/tilt/kind.sh +++ b/contrib/tilt/kind.sh @@ -141,13 +141,13 @@ helm upgrade -i \ # is to create some Issuers and/or ClusterIssuers. That is indeed # among the things that the kcp helm chart will do. -echo "Install KCP" +echo "Install kcp" echo "Tooling:" echo "Grafana: http://localhost:3333/" echo "Prometheus: http://localhost:9091" -echo "KCP API Server: https://localhost:9443" -echo "KCP FrontProxy Server: https://localhost:9444" +echo "kcp API Server: https://localhost:9443" +echo "kcp FrontProxy Server: https://localhost:9444" # must be last as will be blocking tilt up -f contrib/tilt/Tiltfile diff --git a/docs/content/concepts/authentication/index.md b/docs/content/concepts/authentication/index.md index d9b83ffa840..89143a6ad47 100644 --- a/docs/content/concepts/authentication/index.md +++ b/docs/content/concepts/authentication/index.md @@ -13,7 +13,7 @@ For detailed instructions on running kcp with a specific authentication strategy - [OIDC] -## KCP Front Proxy Authentication +## kcp Front Proxy Authentication kcp-front-proxy is a reverse proxy that accepts client certificates and forwards Common Name (as username) and Organizations (as groups) to the backend API servers in HTTP headers. The proxy terminates TLS and communicates with API servers via mTLS. Traffic is routed based on paths. @@ -50,7 +50,7 @@ These can be passed by setting `--authentication-pass-on-groups` and `--authenti By default, kcp-front-proxy is configured to drop `system:masters` and `system:kcp:logical-cluster-admin`. This ensures that highly privileged users do not receive elevated access when passing through the proxy. -## KCP Server Admin Authentication +## kcp Server Admin Authentication Admin Authenticator sets up user roles and groups and generates authentication tokens and `admin.kubeconfig` file. The authentication process relies on Kubernetes authenticated group authenticator. To enable admin authentication in the kcp server, you need run it in the development mode with the `--batteries-included=admin` flag set. @@ -58,22 +58,22 @@ This setting is currently enabled by default when running the `kcp` binary, but ### Users and Groups -| **User Name** | **Role** | **Groups** | -|-----------------|------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------| -| **shard-admin** | Member of the privileged system group. This user bypasses most kcp authorization checks. | system:masters| -| **kcp-admin** | Member of the system:kcp:admin group. This user is subject to kcp authorization checks. | system:kcp:admin | -| **user** | Regular non-admin user who is not a part of any predefined groups. | None | +| **User Name** | **Role** | **Groups** | +|-----------------|------------------------------------------------------------------------------------------|-------------------| +| **shard-admin** | Member of the privileged system group. This user bypasses most kcp authorization checks. | system:masters | +| **kcp-admin** | Member of the system:kcp:admin group. This user is subject to kcp authorization checks. | system:kcp:admin | +| **user** | Regular non-admin user who is not a part of any predefined groups. | None | ### Generated Kubeconfig Contexts kcp server generates a kubeconfig file (admin.kubeconfig) containing credentials for the predefined users. This file allows users to authenticate into different logical clusters. -| **Context Name** | **Cluster Endpoint** | -|------------------|----------------------| -| **root** | /clusters/root | -| **base** | /clusters/base | +| **Context Name** | **Cluster Endpoint** | +|------------------|------------------------| +| **root** | /clusters/root | +| **base** | /clusters/base | | **system:admin** | /clusters/system:admin | -| **shard-base** | /clusters/base | +| **shard-base** | /clusters/base | ## Pages diff --git a/docs/content/concepts/authorization/index.md b/docs/content/concepts/authorization/index.md index 507334551c8..2dc9053050f 100644 --- a/docs/content/concepts/authorization/index.md +++ b/docs/content/concepts/authorization/index.md @@ -5,7 +5,7 @@ description: > # Authorization -Within workspaces, KCP implements the same RBAC-based authorization mechanism as Kubernetes. +Within workspaces, kcp implements the same RBAC-based authorization mechanism as Kubernetes. Other authorization schemes (i.e. ABAC) are not supported. Generally, the same (cluster) role and (cluster) role binding principles apply exactly as in Kubernetes. diff --git a/docs/content/concepts/miscellaneous/braindump.md b/docs/content/concepts/miscellaneous/braindump.md index 727ece83805..5e0429d2037 100644 --- a/docs/content/concepts/miscellaneous/braindump.md +++ b/docs/content/concepts/miscellaneous/braindump.md @@ -1,21 +1,21 @@ --- description: > - A brain dump of thoughts behind KCP's architecture. + A brain dump of thoughts behind kcp's architecture. --- # Architecture – A Brain Dump !!! note - This document is a brain dump of thoughts behind KCP's architecture. + This document is a brain dump of thoughts behind kcp's architecture. It's a work in progress and may contain incomplete or unpolished ideas. It was recorded through ChatGPT (not generated), and hence might have a conversational tone (GPT's summarizing responses have been removed) and might contain mistakes. -## KCP Overview +## kcp Overview -KCP is an extension or a fork of the KubeAPI server, it's adding a concept +kcp is an extension or a fork of the KubeAPI server, it's adding a concept called a logical cluster, or merely it's called a workspace, a workspace concept -to one instance of KCP. When I talk about one instance, it's actually one shard, +to one instance of kcp. When I talk about one instance, it's actually one shard, and there can be multiple shards in a system. And the paper will be about the architecture to make that possible while giving the user @@ -29,26 +29,26 @@ certain kinds of APIs which are used to from Kubernetes, APIs which are like namespaces, config maps, secrets, and many more. And it has the semantics you would expect, like namespace deletion is implemented, garbage collection is implemented, airbag permission management is implemented the same way as in -Kubernetes. The big difference to Kubernetes is that one instance of KCP, we +Kubernetes. The big difference to Kubernetes is that one instance of kcp, we call that one shard in this context, can host an arbitrary number of workspaces, each being logically independent. -If you look on one shard, one instance of KCP and a number of workspaces hosted +If you look on one shard, one instance of kcp and a number of workspaces hosted by that shard. Between workspaces, there can be interactions. Interactions in the sense of one workspace can export APIs and another workspace can bind to those APIs. And the objects to define those -two concepts in KCP are named like that, API export and API binding. With a -small number of exceptions, everything in KCP is implemented using those API +two concepts in kcp are named like that, API export and API binding. With a +small number of exceptions, everything in kcp is implemented using those API export, API binding concepts. Even the workspace concept itself is based on an -API export. The workspaces of KCP have a structure, like they live in KCP as a +API export. The workspaces of kcp have a structure, like they live in kcp as a system, as a platform. They are ordered in a hierarchy, so they are placed in a hierarchy in a tree-like structure, similar to directories in a Linux or Windows file system. Every directory is here a workspace. Exports and bindings connect those. There's one very special workspace called the root workspace. The root -workspace hosts the API exports of the main KCP APIs. For example, the tendency -KCP or API group, with the workspace object, the workspace kind as a primary +workspace hosts the API exports of the main kcp APIs. For example, the tendency +kcp or API group, with the workspace object, the workspace kind as a primary type, is exported from the root workspace. So the root workspace plays a crucial -role in bootstrapping a KCP instance. +role in bootstrapping a kcp instance. The workspace hierarchy is not established through exports and bindings. The workspace hierarchy is established by defining a child workspace within a parent @@ -58,7 +58,7 @@ similar to a file system hierarchy in Linux. I want to dive a little bit into the Workspace concept, what is behind it, how it's implemented. As I described, Workspace objects within parent Workspaces define the hierarchy. Within the hierarchy, you get a path, a path like a file -system path. In KCP, we use a colon as separator. A normal example of a path is +system path. In kcp, we use a colon as separator. A normal example of a path is starting with root, colon, team name, colon, and, for example, application name. So it's root, team A, application Z, as an example. The path is constructed and also reconstructed just by this nesting of Workspaces. It's not inherent in how @@ -66,10 +66,10 @@ the data is stored, stored in the storage layer in etcd or in the kind-based SQL database. Behind the scenes, every Workspace path is mapped to a logical cluster. A logical cluster is identified by some hash value. So there's a hash, some random character string. And this is unique. So it's like UID. It's unique -throughout the KCP system. And that key is used within the etcd or the +throughout the kcp system. And that key is used within the etcd or the kind-based key structure. So it's part of the keys in the storage. This is used to separate values, so objects, of Workspaces which live in different logical -clusters. When you talk to KCP, you as a client from outside, so similarly as +clusters. When you talk to kcp, you as a client from outside, so similarly as you would talk to a kube cluster, but now you talk to a Workspace, you can talk to it through the path, or you can talk to it through the logical cluster, identify as this random UID string. @@ -77,11 +77,11 @@ identify as this random UID string. The mapping from a workspace path to the Logical Cluster ID is done through reading the leaf object of the path, so the last workspace object, the last component of the path. Inside of that, the UID of the Logical Cluster is stored. -So if you know all workspace objects in KCP, you can resolve every path which +So if you know all workspace objects in kcp, you can resolve every path which exists which points to a Logical Cluster by going through the workspace objects one by one up to the leaf, reading the Logical Cluster ID, and then accessing. -I want to talk about the consistency guarantees in KCP. A workspace is a main +I want to talk about the consistency guarantees in kcp. A workspace is a main unit of a consistent API space, like a Kubernetes cluster. So in a workspace, you have similar guarantees as in a Kubernetes cluster, which means per object kind, you have resource versions, which order changes to objects. Cross @@ -90,7 +90,7 @@ workspaces, in two logical clusters technically, and you look on objects and resource versions, there does not have to be a linear order between them. It can be, but it doesn't have to. -The fact that objects in one KCP shard are stored in the same storage, the same +The fact that objects in one kcp shard are stored in the same storage, the same etcd or kine storage, this can be exploited by listing objects across workspaces, across logical clusters which are stored on the same shard. There is a request type we call a wildcard request. And that request returns objects @@ -110,7 +110,7 @@ like a CRD or multiple CRDs, so it defines resources with things like resource name, of course, the kind name, and the list type name. If you have multiple workspaces, in theory, each could export the same kind in the same API group. So there's a problem of how to distinguish those. Different exports, same kind of -group. What KCP is introducing, basically adding to a CRD to an export, is a +group. What kcp is introducing, basically adding to a CRD to an export, is a concept called an identity of an export. The identity is a secret that the owner of the API export knows. When you know that secret, you can export the same object and workspaces which use a bind to that API will then be actually @@ -138,15 +138,15 @@ group name. That way, we get a safe system. ## API Exports Definition and Storage -An API export in KCP is similar to a CRD and defines resources by specifying +An API export in kcp is similar to a CRD and defines resources by specifying details like the resource name, kind name, and list type name. In scenarios -where multiple workspaces could export the same kind in the same API group, KCP +where multiple workspaces could export the same kind in the same API group, kcp introduces a way to distinguish between different exports of the same kind and group. ### Identity of an Export -KCP adds a concept called the identity of an export. This identity is +kcp adds a concept called the identity of an export. This identity is essentially a secret known only to the owner of the API export. This secret, when known, allows for the exporting of the same object. Workspaces that bind to this API are then actually bound to that export with the specified identity. The @@ -154,7 +154,7 @@ security implication here is that if this secret is leaked, an attacker could potentially export the same kind with the same identity and intercept information. Utilizing the Identity Hash: -To secure this system, KCP uses an SHA-256 hash of the identity as a critical +To secure this system, kcp uses an SHA-256 hash of the identity as a critical part of a binding. When a workspace wants to bind to an API export, it points the binding to the export by path (or other methods in the future) and uses the identity hash to ensure binding to the correct export. Within the etcd storage @@ -164,16 +164,16 @@ and Data Segregation in Storage: The identity hash becomes part of the key in the storage system. While client requests to a workspace use the resource name asynchronously, the binding -carries the identity hash, and KCP ensures that data objects are stored in keys +carries the identity hash, and kcp ensures that data objects are stored in keys incorporating this hash. For wildcard requests, this system allows the export owner to see all objects across all logical clusters on a shard that pertain to their export, using their identity hash. The wildcard request includes the identity hash as part of the request resource name, allowing segregation of wildcard requests for different exports of the same resource and group name. -This design presents a robust and secure system for managing API exports in KCP, +This design presents a robust and secure system for managing API exports in kcp, ensuring that only authorized entities can access the relevant data and preventing unauthorized access or data leaks. This approach to API export -management in KCP is both intricate and vital for the system's overall security +management in kcp is both intricate and vital for the system's overall security and functionality. ## Wildcard Requests @@ -191,27 +191,27 @@ clusters. This API service looks like the wildcard request we talked about earlier, but it is protected. For example, on that endpoint, the API export owner does not have to pass the identity hash. This is automatically added behind the scenes when proxying the request from the API export owner to the -actual KCP instance via the wildcard request we talked about earlier. Virtual +actual kcp instance via the wildcard request we talked about earlier. Virtual workspace API servers are a crucial tool. Here, we see them the first time for the first use of them in the system, but there are many more. We have built that -in KCP, but in theory, virtual workspace API servers can be built by third +in kcp, but in theory, virtual workspace API servers can be built by third parties and add further functionality which goes beyond a simple API service of one block. ## Sharding -Now it is time to extend the mental model of KCP, which we described until now, -to extend it to multiple shards. Imagine you have multiple instances of KCP +Now it is time to extend the mental model of kcp, which we described until now, +to extend it to multiple shards. Imagine you have multiple instances of kcp running. Let's say we have two, A and B. Let's call the first one, let's call it the root shard. So the A shard is the root shard. The root shard hosts the root workspace. By the way, small note, the root workspace is the only workspace which has a logical cluster, the identifier of its logical cluster, which matches the workspace name. So the logical cluster UID of the root workspace is root. And the root shard is the one hosting the root logical cluster. The root -logical cluster is a singleton in the KCP platform. On that root shard, there +logical cluster is a singleton in the kcp platform. On that root shard, there can be many more workspaces, many more logical clusters. The workspaces, or merely the logical clusters behind the workspace path, they are hosted on the -shards of the KCP system. If they are multiple, for every logical cluster, when +shards of the kcp system. If they are multiple, for every logical cluster, when you want to access it, you have to know on which shard that logical cluster is stored. Every logical cluster object, or let's go into some detail here, a logical cluster, as we described, is identified by a logical cluster UID. That @@ -229,11 +229,11 @@ Existence again is realized by creating the logical cluster object. The logical cluster object is always called cluster. There's just one name, one singleton per workspace, per logical cluster. -When sending a request to a multi-shard KCP, that request must be routed to the +When sending a request to a multi-shard kcp, that request must be routed to the right shard. As we have seen, the existence of the Logical Cluster object called Cluster tells the system that the given Logical Cluster identified by the UID -lives on that shard. In other words, a front proxy, as we call it in KCP, a -component sitting in front of the KCP system, if it watches all Logical Cluster +lives on that shard. In other words, a front proxy, as we call it in kcp, a +component sitting in front of the kcp system, if it watches all Logical Cluster objects on all shards, it knows how to route requests. Combining that with the resolution of WorkspacePath, as we have seen before, this front proxy watches Workspace objects and Logical Cluster objects. When resolving a request to a @@ -258,7 +258,7 @@ export, and the API export can live on different shards, as we said. In that case, it has to access a different shard to implement the functionality of the API. This is not a desired behavior, because if the target shard is unavailable for some time for reasons, all the other shards won't be able to bind APIs -anymore. To solve that, KCP introduced a concept called a cache server. A cache +anymore. To solve that, kcp introduced a concept called a cache server. A cache server stores objects which are needed to implement cross-shard functionality of APIs. For example, for the API binding process, the API exports are needed. What happens is, there is a second controller next to the API binding controller, and @@ -281,10 +281,10 @@ objects with a certain label for application, and application happens. This is a general pattern, which we will use for multi or cross workspace semantics of APIs. -The concept of a cache server introduces constraints. The cache server in KCP is +The concept of a cache server introduces constraints. The cache server in kcp is a regular Kube API server with workspace support. So it is bound to the scaling targets of Kube itself, the Kube API server itself. So imagine you have a giant -multi-talent KCP installation. The cache server has to hold all the exports. +multi-talent kcp installation. The cache server has to hold all the exports. This means that the cache server has to be able to store all exports in the system in roughly eight gigabytes of storage memory, in the case of etcd, including the airbag objects as well. While for API exports, this number doesn't @@ -303,25 +303,25 @@ offer services to other tenants. In such a platform, they would opt in into sharing their API export to the world. In such a setup, the cache server scalability would only be a limit for the APIs which are shared in that app store-like way. This doesn't seem to be a limit which limits the applicability -of KCP, because there will never be so many exports that eight gigabyte is not +of kcp, because there will never be so many exports that eight gigabyte is not enough. ## Multi-Shard Architecture and Controllers In a multi-shard setup You need multiple instances of a controller or alternatively make a controller -aware of multiple instances of KCP. A controller would in the second case have +aware of multiple instances of kcp. A controller would in the second case have multiple wildcard watches as described before not against the shards themselves but against the virtual workspace API server for API exports. And they would watch multiple of them at the same time and give behavior semantics to the API objects on all of those shards. And you can imagine that you want some kind of partitioning of course if the number of shards grows. But it's pretty clear that -some kind of awareness of KCP is necessary to run multi-workspace, +some kind of awareness of kcp is necessary to run multi-workspace, multi-cluster controllers. In particular for the API exports, we talked about having the URL of the virtual workspace API server in the status. In reality, this list of URLs or this is a list of URLs. It's not just one. There are multiple URLs to multiple virtual workspace API servers, one per shard. At least one per shard which has at least one workspace which binds against that export. -That way a controller which is KCP-enabled would have to watch the API export +That way a controller which is kcp-enabled would have to watch the API export status and spawn another instance either of an informer or even the whole controller per URL which pops up in the status of the export. @@ -355,7 +355,7 @@ workspace path may be unlocked. ## Workspace Access Authorization -To access a workspace, the KCP instance which receives the request, potentially +To access a workspace, the kcp instance which receives the request, potentially sent by the front proxy, will do an authentication of the user, whether this user is able to access the workspace as a whole, and in particular, the object which is accessed, whether it is allowed to access that using normal local @@ -391,10 +391,10 @@ existing yet. So this process of scheduling today needs privileged access, a privileged user which can create logic clusters, skipping the check whether the logic cluster actually exists. -## Bootstrapping a KCP platform and, in particular, a KCP shard in the light of multiple shards +## Bootstrapping a kcp platform and, in particular, a kcp shard in the light of multiple shards -Bootstrapping a KCP system, and in particular, a KCP shard. Bootstrapping a -single shard KCP means to create the root workspace, create the API shards for +Bootstrapping a kcp system, and in particular, a kcp shard. Bootstrapping a +single shard kcp means to create the root workspace, create the API shards for the main API groups, and that's basically it. A multi-shard setup requires to bootstrap the root shard, which is basically equivalent to what I just described, and then bootstrapping further shards in addition. When we talked @@ -403,24 +403,24 @@ The identity, or more completely, the hash of the identity string, is an important part of a resource, especially the resources which are defined in the root shards, the root workspace API exports. And those are particularly risky, so we really want this security feature of identities and identity hashes for -those, because they are central for KCP, for the security of KCP, of the whole -platform. By bootstrapping a KCP shard, it will need the identity hash of those -exports. And to do that, the KCP shard needs a bootstrapping root shard user, +those, because they are central for kcp, for the security of kcp, of the whole +platform. By bootstrapping a kcp shard, it will need the identity hash of those +exports. And to do that, the kcp shard needs a bootstrapping root shard user, which is able to read the identity hashes of the API exports. Which means, when bootstrapping a new shard for the first time the shard has started, it will need -access to the root shard. After that, every start of the KCP instance of that -shard, the KCP process, that root user is actually not needed anymore if no new +access to the root shard. After that, every start of the kcp instance of that +shard, the kcp process, that root user is actually not needed anymore if no new exports are added or something like that. Every shard caches its identities, its identity hashes, in a local contract map. So that contract maps allows to -restart every KCP shard, even when the root shard is down. What makes this -bootstrapping tricky is that to make, to start up the KCP shard and the core +restart every kcp shard, even when the root shard is down. What makes this +bootstrapping tricky is that to make, to start up the kcp shard and the core controllers, for example, the API binding controller, it has to start certain informers. And to start an informer, you have to know the identity hash of the exports of the resources you want to watch. So in the bootstrapping phase, before a shard is ready, it will need this root shard information, which is cached in the contract map, or if the contract map is not there, or incomplete, there's access to the root shard. When those informers are up, the core -controllers of the KCP shard are started and the KCP shard is ready to serve +controllers of the kcp shard are started and the kcp shard is ready to serve requests. ## Definitions of Shards @@ -430,7 +430,7 @@ starts up, it will attempt to create its object there and tries to keep status of that object up-to-date with the controller. In the future, this object might also give load information, which might control scheduling of new workspaces. Today, this is very simplistic and not really crucial for the architecture of -KCP. +kcp. ## Logical cluster path annotation @@ -468,7 +468,7 @@ workspace in the parent, because the parent doesn't exist. The front proxy will take the path annotation, and it will resolve the path from users colon username just by following the annotation. The annotation is enough to create a new root in the system. That way, we have rootless user home workspaces. In a previous -iteration of KCP, we had a hierarchy of user home workspaces in the main +iteration of kcp, we had a hierarchy of user home workspaces in the main hierarchy, which means we had to apply a multilevel hierarchy with a first letter or some hash of usernames and then multiple layers to guarantee that millions of users can store the workspace objects in the same parent. As you @@ -476,16 +476,16 @@ know in Kubernetes, there's a logical or technical limit of the number of objects of cluster wide cluster scope objects, which is probably in the 10,000 or something like that. All this complexity of a multilayer hierarchy for home workspaces goes away by having rootless workspaces. They can live anywhere in -the KCP system on every shard, and the front proxy implements them just by the +the kcp system on every shard, and the front proxy implements them just by the path annotation. To implement them, you need a privileged user which can create logical cluster objects in non-existing logical clusters, so very similar to the scheduler we talked about before. The user home workspace here is just an -example. You could have rootless logical clusters for tenants, `tenant:colon:tenant-name`, +example. You could have rootless logical clusters for tenants, `tenant:colon:tenant-name`, for example, or anything else you want to have as a start of a new hierarchy. # System Masters versus Cluster Admin Users -In a single-shard setup, when you launch KCP via KCP Start, for convenience, an +In a single-shard setup, when you launch kcp via kcp Start, for convenience, an admin user is created. An admin user has star access, wildcard access, to all resources and all verbs. In Kubernetes, there is a System Masters group in addition. System Masters is more than Cluster Admin. System Masters means that @@ -496,19 +496,19 @@ clusters. It is important to understand that every user in front of the front proxy, so a real user including cluster admins, should not be System Masters because System Masters can destroy the workspace hierarchy, bring it into an inconsistent state. Hence, System Masters users should only be used for very -specific high-risk operations on a shard. The KCP process will create a shard +specific high-risk operations on a shard. The kcp process will create a shard admin for that purpose. It's a shard local System Masters user. The admin user -which is created is not like that. The KCP Start command in a single shard setup +which is created is not like that. The kcp Start command in a single shard setup will create a token-based admin user. That token is only valid during runtime. It's not completely correct. That token is also stored locally, but the idea is that that user is just for convenience and a single shard setup. If you want to have a multi-shard setup, you have to create an admin user outside the -bootstrapping process. There's a flag for the KCP Start command to skip the +bootstrapping process. There's a flag for the kcp Start command to skip the admin user creation. You can use, for example, a client certificate which adds cluster admin permissions to a user and makes that client certificate accepted by all shards by passing the right client cert flags to the process. To -summarize, the bootstrapping of KCP in a multi-shard setup is a multistep -process. The KCP Start command alone is without any special parameters. It's +summarize, the bootstrapping of kcp in a multi-shard setup is a multistep +process. The kcp Start command alone is without any special parameters. It's really meant for a single shard and for that reason, pretty simplistic setup. This is intentional. The admin user must be created out of scope in a step on its own. @@ -533,4 +533,4 @@ let's collect some thoughts: events that actually are no deletions. It might be that extending the wildcard watch protocol with MOVE events would help here. After all this protocol is under kcp control as wildcard request semantics is not part of - Kubernetes conformance anyway. \ No newline at end of file + Kubernetes conformance anyway. diff --git a/docs/content/concepts/workspaces/workspace-types.md b/docs/content/concepts/workspaces/workspace-types.md index 7a80ba9e2c5..79d8d577395 100644 --- a/docs/content/concepts/workspaces/workspace-types.md +++ b/docs/content/concepts/workspaces/workspace-types.md @@ -5,7 +5,6 @@ description: > # Workspace Types - Workspaces have a type. A type is defined by a `WorkspaceType`. A type defines initializers. They are set on new Workspace objects and block the workspace from leaving the initializing phase. Both system components and @@ -15,7 +14,7 @@ e.g. to bootstrap resources inside the workspace, or to set up permission in its kcp comes with a built-in set of workspace types, and the admin may create objects that define additional types. -- **Root Workspace** is a singleton. It holds some data that applies +- **Root Workspace** is a singleton. It holds some data that applies to all workspaces, such as the set of defined workspace types (objects of type `WorkspaceType`). - **HomeRoot Workspace** is normally a singleton, holding the branch @@ -23,14 +22,14 @@ define additional types. Can only be a child of the root workspace, and can only have HomeBucket children. - **HomeBucket Workspace** are intermediate vertices in the hierarchy - between the HomeRoot and the user home workspaces. Can be a child - of the root or another HomeBucket workspace. Allowed children are + between the HomeRoot and the user home workspaces. Can be a child + of the root or another HomeBucket workspace. Allowed children are home and HomeBucket workspaces. -- **Home Workspace** is a user's home workspace. These hold user +- **Home Workspace** is a user's home workspace. These hold user resources such as applications with services, secrets, configmaps, - deployments, etc. Can only be a child of a HomeBucket workspace. + deployments, etc. Can only be a child of a HomeBucket workspace. - **Universal Workspace** is a basic type of workspace with no - particular nature. Has no restrictions on parent or child workspace + particular nature. Has no restrictions on parent or child workspace types. The following workspace types are created by kcp if the `workspace-types` battery @@ -84,7 +83,7 @@ any bucket. DO NOT set the bucket size to be longer than 2, as this will adversely impact performance. User-names have `(26 * [(26 + 10 + 2) * 61] * 36 = 2169648)` permutations, and buckets are made up of lowercase-alpha -chars. Invalid configurations break the scale limit in sub-buckets or users. Valid configurations should target +chars. Invalid configurations break the scale limit in sub-buckets or users. Valid configurations should target having not more than ~1000 sub-buckets per bucket and at least 5 users per bucket. ### Valid Configurations @@ -152,7 +151,8 @@ As an example, the `system:admin` workspace exists for administrative objects that are scoped to the local shard (e.g. `lease` objects for kcp internal controllers if leader election is enabled). It is accessible via `/clusters/system:admin`. -# Workspace Type Extensions and Constraints +## Workspace Type Extensions and Constraints + kcp offers extensions and constraints that enable you inherit functionality from other workspace types and create custom workspace hierarchies for your organizational structure. @@ -160,6 +160,7 @@ A `WorkspaceType` can extend one or more other `WorkspaceTypes` using the `spec. field. **Example** + ```yaml apiVersion: tenancy.kcp.io/v1alpha1 kind: WorkspaceType @@ -171,7 +172,9 @@ spec: - name: universal - name: custom ``` + In this example, the `sample` workspace type: + * inherits [initializers](./workspace-initialization.md) from the extended types * is considered as an extended type during type constraint evaluation @@ -188,8 +191,11 @@ spec: - name: standard path: root:base ``` -## Workspace Constraint Mechanisms -KCP provides two primary constraint mechanisms for workspace types: + +### Workspace Constraint Mechanisms + +kcp provides two primary constraint mechanisms for workspace types: + * `limitAllowedChildren`: Controls which workspace types can be created as children. * `limitAllowedParents`: Controls which workspace types can serve as parents. @@ -205,7 +211,9 @@ spec: - name: custom path: root ``` + You can also block all types from being used as children: + ```yaml apiVersion: tenancy.kcp.io/v1alpha1 kind: WorkspaceType @@ -215,4 +223,5 @@ spec: limitAllowedChildren: none: true ``` + This ensures that no other workspace type can be created as a child of `leaf-workspace`. diff --git a/docs/content/contributing/commit-tag.png b/docs/content/contributing/commit-tag.png deleted file mode 100644 index 657c3b300af..00000000000 Binary files a/docs/content/contributing/commit-tag.png and /dev/null differ diff --git a/docs/content/contributing/guides/rebasing-kubernetes/commit-tag.png b/docs/content/contributing/guides/rebasing-kubernetes/commit-tag.png new file mode 100644 index 00000000000..8851cc15a80 Binary files /dev/null and b/docs/content/contributing/guides/rebasing-kubernetes/commit-tag.png differ diff --git a/docs/content/contributing/guides/rebasing-kubernetes.md b/docs/content/contributing/guides/rebasing-kubernetes/index.md similarity index 93% rename from docs/content/contributing/guides/rebasing-kubernetes.md rename to docs/content/contributing/guides/rebasing-kubernetes/index.md index ee4cd713648..52cbc3b9f41 100644 --- a/docs/content/contributing/guides/rebasing-kubernetes.md +++ b/docs/content/contributing/guides/rebasing-kubernetes/index.md @@ -1,9 +1,13 @@ # Rebasing Kubernetes This describes the process of rebasing kcp onto a new Kubernetes version. For the examples below, we'll be rebasing -onto v1.33.3 +onto v1.33.3. -# 1. Update kcp-dev/apimachinery +!!! note + This guide has last been used before the [monorepo](../../monorepo.md) transition. Beware that since + then the procedure is significantly different. + +### 1. Update kcp-dev/apimachinery 1. Create a new branch for the update, such as `1.31-prep`. 2. Update go.mod: @@ -22,7 +26,7 @@ onto v1.33.3 6. Push to your fork. 7. Open a PR; get it reviewed and merged. -# 2. Update kcp-dev/code-generator +## 2. Update kcp-dev/code-generator 1. Create a new branch for the update, such as `1.26-prep`. 2. Update `go.mod`: @@ -47,7 +51,8 @@ onto v1.33.3 8. Push to your fork. 9. Open a PR; get it reviewed and merged. -# 3. Update kcp-dev/client-go +## 3. Update kcp-dev/client-go + 1. Create a new branch for the update, such as `1.26-prep`. 2. Update go.mod: 1. You may need to change the go version at the top of the file to match what's in go.mod in the root of the @@ -70,9 +75,9 @@ onto v1.33.3 7. Push to your fork. 8. Open a PR; get it reviewed and merged. -# 4. Update kcp-dev/kubernetes +## 4. Update kcp-dev/kubernetes -## Terminology +### Terminology Commits merged into `kcp-dev/kubernetes` follow this commit message format: @@ -94,7 +99,7 @@ Commits merged into `kcp-dev/kubernetes` follow this commit message format: - In general, these commits are used to maintain the codebase in ways that are branch-specific, like the update of generated files or dependencies. -## Rebase Process +### Rebase Process 1. First and foremost, take notes of what worked/didn't work well. Update this guide based on your experiences! 2. Remember, if you mess up, `git rebase --abort` and `git reflog` are your very good friends! @@ -185,7 +190,7 @@ two kube versions. 13. Commit the dependency updates: ``` git add . - git commit -m 'CARRY: : Add KCP dependencies' + git commit -m 'CARRY: : Add kcp dependencies' ``` 14. Update the vendor directory: @@ -227,7 +232,7 @@ two kube versions. 18. Open a pull request for review **against the baseline branch, e.g. kcp-1.26-baseline**, but mark it `WIP` and maybe even open it in draft mode - you don't want to merge anything just yet. -# 5. Update kcp-dev/kcp +## 5. Update kcp-dev/kcp 1. At this point, you're ready to try to integrate the updates into kcp proper. There is still likely a good amount of work to do, so don't get discouraged if you encounter dozens or hundreds of compilation issues at @@ -244,13 +249,16 @@ two kube versions. 3. Go ahead and make a commit here, as the next change we'll be making is to point kcp at your local checkout of Kubernetes. 3. Point kcp at your local checkout of Kubernetes: - ``` - # Change KUBE per your local setup - KUBE=../../../go/src/k8s.io/kubernetes - gsed -i "s,k8s.io/\(.*\) => .*/kubernetes/.*,k8s.io/\1 => $KUBE/vendor/k8s.io/\1,;s,k8s.io/kubernetes => .*,k8s.io/kubernetes => $KUBE," go.mod - ``` - !!! warning - Don't commit your changes to go.mod/go.sum. They point to your local file system. + + ``` + # Change KUBE per your local setup + KUBE=../../../go/src/k8s.io/kubernetes + gsed -i "s,k8s.io/\(.*\) => .*/kubernetes/.*,k8s.io/\1 => $KUBE/vendor/k8s.io/\1,;s,k8s.io/kubernetes => .*,k8s.io/kubernetes => $KUBE," go.mod + ``` + + !!! warning + Don't commit your changes to go.mod/go.sum. They point to your local file system. + 4. Resolve any conflicts 5. Run `make modules` 6. Run `make codegen` @@ -258,7 +266,7 @@ two kube versions. 8. Get the `lint` and `test` make targets to pass 9. Get the `e2e-*` make targets to pass. -# 6. Test CI +## 6. Test CI 1. Undo your changes to go.mod and go.sum that point to your local checkout: ``` @@ -274,7 +282,7 @@ two kube versions. pointing to your fork of Kubernetes. This is expected, so don't worry. Your job at this point is to get all the other CI jobs to pass. -# 7. Get it Merged! +## 7. Get it Merged! 1. Once CI is passing (except for the `deps` job, as expected), we're ready to merge! 2. Coordinate with another project member - show them the test results, then get them to approve your rebase PR in @@ -290,6 +298,6 @@ two kube versions. any more), or drop the `UNDO` commit and replace it with this one. 6. Check on CI. Hopefully everything is green. If not, keep iterating on it. -# 7. Update the Default Branch in kcp-dev/kubernetes +## 8. Update the Default Branch in kcp-dev/kubernetes 1. Change it to your new rebase branch, e.g. `kcp-1.31` diff --git a/docs/content/setup/kubectl-plugin.md b/docs/content/setup/kubectl-plugin.md index 965148b194d..c843299a302 100644 --- a/docs/content/setup/kubectl-plugin.md +++ b/docs/content/setup/kubectl-plugin.md @@ -22,9 +22,9 @@ The plugins will be [automatically discovered by your current `kubectl` binary]( ```sh $ kubectl kcp -KCP is the easiest way to manage Kubernetes applications against one or more clusters, by giving you a personal control plane that schedules your workloads onto one or many clusters, and making it simple to pick up and move. Advanced use cases including spreading your apps across clusters for resiliency, scheduling batch workloads onto clusters with free capacity, and enabling collaboration for individual teams without having access to the underlying clusters. +kcp is the easiest way to manage Kubernetes applications against one or more clusters, by giving you a personal control plane that schedules your workloads onto one or many clusters, and making it simple to pick up and move. Advanced use cases including spreading your apps across clusters for resiliency, scheduling batch workloads onto clusters with free capacity, and enabling collaboration for individual teams without having access to the underlying clusters. -This command provides KCP specific sub-command for kubectl. +This command provides kcp-specific sub-command for kubectl. Usage: kcp [command] @@ -35,7 +35,7 @@ Available Commands: completion Generate the autocompletion script for the specified shell crd CRD related operations help Help about any command - workspace Manages KCP workspaces + workspace Manages kcp workspaces Flags: --add_dir_header If true, adds the file directory to the header of the log messages diff --git a/hack/build-image-docker.sh b/hack/build-image-docker.sh index 49d11dae09e..87905bef213 100755 --- a/hack/build-image-docker.sh +++ b/hack/build-image-docker.sh @@ -14,20 +14,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Build container images for KCP using Docker +# Build container images for kcp using Docker # # This script builds container images using Docker (with or without buildx). # # Usage examples: # # Build locally with default settings (uses current git commit hash) # ./hack/build-image-docker.sh -# +# # # Build locally with custom repository name # REPOSITORY=my-registry/kcp ./hack/build-image-docker.sh -# +# # # Build locally without pushing (dry run) # DRY_RUN=1 ./hack/build-image-docker.sh -# +# # # Build for specific architectures only # ARCHITECTURES="amd64" ./hack/build-image-docker.sh # @@ -38,7 +38,7 @@ # KCP_GHCR_USERNAME/KCP_GHCR_PASSWORD - Registry credentials for pushing # # Build tool support: -# - docker + buildx: Multi-arch support with intelligent platform handling +# - docker + buildx: Multi-arch support with intelligent platform handling # - docker only: Single architecture fallback set -euo pipefail @@ -105,7 +105,7 @@ echo "Building container image $image ..." # Function to build images with docker buildx build_with_docker_buildx() { echo "Building multi-arch image $image ..." - + # Create platforms string for buildx platforms="" for arch in $architectures; do @@ -115,7 +115,7 @@ build_with_docker_buildx() { platforms="linux/$arch" fi done - + # For push builds, use multi-platform; for local builds, build per arch if [ -z "${DRY_RUN:-}" ]; then # Building for push - use multi-platform with --push @@ -151,7 +151,7 @@ build_with_docker() { # Use only the first architecture for regular docker arch=$(echo $architectures | cut -d' ' -f1) fullTag="$image-$arch" - + echo "Building single-arch image $fullTag (docker without buildx) ..." docker build \ --file Dockerfile \ @@ -160,7 +160,7 @@ build_with_docker() { --build-arg "TARGETOS=linux" \ --build-arg "TARGETARCH=$arch" \ . - + # Tag it as the main image too docker tag "$fullTag" "$image" } @@ -191,7 +191,7 @@ fi # push images, except in dry runs if [ -z "${DRY_RUN:-}" ]; then echo "Logging into GHCR ..." - + if [ "$DOCKER_BUILDX" = true ]; then # buildx with --push already pushed during build echo "Images already pushed during buildx build" @@ -202,7 +202,7 @@ if [ -z "${DRY_RUN:-}" ]; then else echo "Skipping login (GHCR_USERNAME/GHCR_PASSWORD not provided)" fi - + echo "Pushing images ..." docker push "$image" diff --git a/pkg/admission/apiresourceschema/validation.go b/pkg/admission/apiresourceschema/validation.go index 1f4ee01b1e6..fc0fbe81fb8 100644 --- a/pkg/admission/apiresourceschema/validation.go +++ b/pkg/admission/apiresourceschema/validation.go @@ -101,7 +101,7 @@ func ValidateAPIResourceSchemaSpec(ctx context.Context, spec *apisv1alpha1.APIRe allErrs := field.ErrorList{} // HACK: Relax naming constraints when registering legacy schema resources through CRDs - // for the KCP scenario + // for the kcp scenario if spec.Group == "" { // pass. This is the core group } else if spec.Group == "core" { diff --git a/pkg/admission/crdnooverlappinggvr/crdnooverlappinggvr_admission.go b/pkg/admission/crdnooverlappinggvr/crdnooverlappinggvr_admission.go index a77271d3080..a67a4d9d22a 100644 --- a/pkg/admission/crdnooverlappinggvr/crdnooverlappinggvr_admission.go +++ b/pkg/admission/crdnooverlappinggvr/crdnooverlappinggvr_admission.go @@ -89,7 +89,7 @@ func (p *crdNoOverlappingGVRAdmission) ValidateInitialization() error { return fmt.Errorf(PluginName + " plugin needs an LogicalCluster lister") } if p.updateLogicalCluster == nil { - return fmt.Errorf(PluginName + " plugin needs a KCP cluster client") + return fmt.Errorf(PluginName + " plugin needs a kcp cluster client") } return nil } diff --git a/pkg/admission/plugins.go b/pkg/admission/plugins.go index 80a66b12cb3..fce07392073 100644 --- a/pkg/admission/plugins.go +++ b/pkg/admission/plugins.go @@ -144,7 +144,7 @@ var defaultOnPluginsInKcp = sets.New[string]( certsigning.PluginName, // CertificateSigning certsubjectrestriction.PluginName, // CertificateSubjectRestriction - // KCP + // kcp workspace.PluginName, logicalclusterfinalizer.PluginName, shard.PluginName, diff --git a/pkg/cache/server/bootstrap/bootstrap.go b/pkg/cache/server/bootstrap/bootstrap.go index 012bfcfe20d..2bf820392bd 100644 --- a/pkg/cache/server/bootstrap/bootstrap.go +++ b/pkg/cache/server/bootstrap/bootstrap.go @@ -35,7 +35,7 @@ import ( ) // SystemCRDLogicalCluster holds a logical cluster name under which we store system-related CRDs. -// We use the same name as the KCP for symmetry. +// We use the same name as the kcp for symmetry. var SystemCRDLogicalCluster = logicalcluster.Name("system:system-crds") // SystemCacheServerShard holds a default shard name. diff --git a/pkg/crdpuller/discovery.go b/pkg/crdpuller/discovery.go index 6e980af795c..0857323df1c 100644 --- a/pkg/crdpuller/discovery.go +++ b/pkg/crdpuller/discovery.go @@ -16,7 +16,7 @@ limitations under the License. package crdpuller -// We import the generic control plane scheme to provide access to the KCP control plane scheme, +// We import the generic control plane scheme to provide access to the kcp control plane scheme, // that gathers a minimal set of Kubernetes APIs without any workload-related APIs. // // We don't want to import, from physical clusters; resources that are already part of the control @@ -156,14 +156,14 @@ func (sp *schemaPuller) PullCRDs(ctx context.Context, resourceNames ...string) ( logger := logger.WithValues("resource", apiResource.Name) if kcpscheme.Scheme.IsGroupRegistered(gv.Group) && !kcpscheme.Scheme.IsVersionRegistered(gv) { - logger.Info("ignoring an apiVersion since it is part of the core KCP resources, but not compatible with KCP version") + logger.Info("ignoring an apiVersion since it is part of the core kcp resources, but not compatible with kcp version") continue } gvk := gv.WithKind(apiResource.Kind) logger = logger.WithValues("kind", apiResource.Kind) if (kcpscheme.Scheme.Recognizes(gvk) || extensionsapiserver.Scheme.Recognizes(gvk)) && !resourcesToPull.Has(groupResource.String()) { - logger.Info("ignoring a resource since it is part of the core KCP resources") + logger.Info("ignoring a resource since it is part of the core kcp resources") continue } @@ -305,9 +305,9 @@ func (sp *schemaPuller) PullCRDs(ctx context.Context, resourceNames ...string) ( // in an `api-approved.kubernetes.io` annotation. // Without this annotation, a CRD under the *.k8s.io or *.kubernetes.io domains is rejected by the API server // - // Of course here we're simply adding already-known resources of existing physical clusters as CRDs in KCP. + // Of course here we're simply adding already-known resources of existing physical clusters as CRDs in kcp. // But to please this Kubernetes approval requirement, let's add the required annotation in imported CRDs - // with one of the KCP PRs that hacked Kubernetes CRD support for KCP. + // with one of the kcp PRs that hacked Kubernetes CRD support for kcp. if apihelpers.IsProtectedCommunityGroup(gv.Group) { value := "https://github.com/kcp-dev/kubernetes/pull/4" if crd != nil { diff --git a/pkg/crdpuller/doc.go b/pkg/crdpuller/doc.go index 229bdee5c4a..2d5961f8b21 100644 --- a/pkg/crdpuller/doc.go +++ b/pkg/crdpuller/doc.go @@ -16,7 +16,7 @@ limitations under the License. // crdpuller package provides a library to pull API resource definitions // from existing Kubernetes clusters as Custom Resource Definitions that can then be applied -// to a KCP instance. +// to a kcp instance. // // - If a CRD already exists for a given resource in the targeted cluster, then it is reused. // - If no CRD exist in the targeted cluster, then the CRD OpenAPI v3 schema is built diff --git a/pkg/openapi/zz_generated.openapi.go b/pkg/openapi/zz_generated.openapi.go index 4c64c19c1e1..481ec0baa54 100644 --- a/pkg/openapi/zz_generated.openapi.go +++ b/pkg/openapi/zz_generated.openapi.go @@ -1406,7 +1406,7 @@ func schema_sdk_apis_apis_v1alpha1_AcceptablePermissionClaim(ref common.Referenc }, "identityHash": { SchemaProps: spec.SchemaProps{ - Description: "This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. Note that one must look this up for a particular KCP instance.", + Description: "This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. Note that one must look this up for a particular kcp instance.", Type: []string{"string"}, Format: "", }, @@ -1731,7 +1731,7 @@ func schema_sdk_apis_apis_v1alpha1_PermissionClaim(ref common.ReferenceCallback) }, "identityHash": { SchemaProps: spec.SchemaProps{ - Description: "This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. Note that one must look this up for a particular KCP instance.", + Description: "This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. Note that one must look this up for a particular kcp instance.", Type: []string{"string"}, Format: "", }, @@ -2376,7 +2376,7 @@ func schema_sdk_apis_apis_v1alpha2_AcceptablePermissionClaim(ref common.Referenc }, "identityHash": { SchemaProps: spec.SchemaProps{ - Description: "This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. Note that one must look this up for a particular KCP instance.", + Description: "This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. Note that one must look this up for a particular kcp instance.", Type: []string{"string"}, Format: "", }, @@ -2676,7 +2676,7 @@ func schema_sdk_apis_apis_v1alpha2_PermissionClaim(ref common.ReferenceCallback) }, "identityHash": { SchemaProps: spec.SchemaProps{ - Description: "This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. Note that one must look this up for a particular KCP instance.", + Description: "This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. Note that one must look this up for a particular kcp instance.", Type: []string{"string"}, Format: "", }, @@ -2930,7 +2930,7 @@ func schema_sdk_apis_apis_v1alpha2_ScopedPermissionClaim(ref common.ReferenceCal }, "identityHash": { SchemaProps: spec.SchemaProps{ - Description: "This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. Note that one must look this up for a particular KCP instance.", + Description: "This is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types. Note that one must look this up for a particular kcp instance.", Type: []string{"string"}, Format: "", }, @@ -3987,7 +3987,7 @@ func schema_sdk_apis_core_v1alpha1_ShardSpec(ref common.ReferenceCallback) commo Properties: map[string]spec.Schema{ "baseURL": { SchemaProps: spec.SchemaProps{ - Description: "baseURL is the address of the KCP shard for direct connections, e.g. by some front-proxy doing the fan-out to the shards.", + Description: "baseURL is the address of the kcp shard for direct connections, e.g. by some front-proxy doing the fan-out to the shards.", Default: "", Type: []string{"string"}, Format: "", diff --git a/pkg/server/aggregatingcrdversiondiscovery/server.go b/pkg/server/aggregatingcrdversiondiscovery/server.go index 2e1dd8bb09d..9e394a2462f 100644 --- a/pkg/server/aggregatingcrdversiondiscovery/server.go +++ b/pkg/server/aggregatingcrdversiondiscovery/server.go @@ -177,7 +177,7 @@ func apiResourcesForGroupVersion(requestedGroup, requestedVersion string, crds [ continue } - // HACK: support the case when we add core resources through CRDs (KCP scenario) + // HACK: support the case when we add core resources through CRDs (kcp scenario) groupVersion := crd.Spec.Group + "/" + v.Name if crd.Spec.Group == "" { groupVersion = v.Name diff --git a/pkg/server/bootstrap/identity.go b/pkg/server/bootstrap/identity.go index 9abb2e9efa2..1f231f3368f 100644 --- a/pkg/server/bootstrap/identity.go +++ b/pkg/server/bootstrap/identity.go @@ -242,7 +242,7 @@ func (rt roundTripperFunc) WrappedRoundTripper() http.RoundTripper { return rt.delegate } -// injectKcpIdentities injects the KCP identities into the request URLs. +// injectKcpIdentities injects the kcp identities into the request URLs. func injectKcpIdentities(ids *identities) func(rt http.RoundTripper) http.RoundTripper { return func(rt http.RoundTripper) http.RoundTripper { return roundTripperFunc{ diff --git a/pkg/server/filters/filters_test.go b/pkg/server/filters/filters_test.go index 8d07fe6eb10..d0eb652c4f8 100644 --- a/pkg/server/filters/filters_test.go +++ b/pkg/server/filters/filters_test.go @@ -34,7 +34,7 @@ import ( var ( // reClusterName is a regular expression for cluster names. It is based on // modified RFC 1123. It allows for 63 characters for single name and includes - // KCP specific ':' separator for workspace nesting. We are not re-using k8s + // kcp specific ':' separator for workspace nesting. We are not re-using k8s // validation regex because its purpose is for single name validation. reClusterName = regexp.MustCompile(`^([a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?:)*[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$`) ) diff --git a/pkg/server/home_workspaces.go b/pkg/server/home_workspaces.go index 83134f040ab..1b157bf8c64 100644 --- a/pkg/server/home_workspaces.go +++ b/pkg/server/home_workspaces.go @@ -64,7 +64,7 @@ func init() { _ = tenancyv1alpha1.AddToScheme(homeWorkspaceScheme) } -// WithHomeWorkspaces implements an HTTP handler, in the KCP server, which: +// WithHomeWorkspaces implements an HTTP handler, in the kcp server, which: // // - supports a special 'kubectl get workspace ~' request which returns either // the old bucket-style workspace if it exists (= a LogicalCluster can be found) diff --git a/pkg/server/options/authentication.go b/pkg/server/options/authentication.go index 2ae6037647a..96a83b215a6 100644 --- a/pkg/server/options/authentication.go +++ b/pkg/server/options/authentication.go @@ -163,7 +163,7 @@ func (s *AdminAuthentication) WriteKubeConfig(config genericapiserver.CompletedC if shardAdminAuth := existingExternalKubeConfig.AuthInfos[shardAdminUserName]; shardAdminAuth != nil { kubeConfigTokenHash := sha256.Sum256([]byte(shardAdminAuth.Token)) if !bytes.Equal(kubeConfigTokenHash[:], shardAdminTokenHash) { - return fmt.Errorf("admin token in file %q is not valid anymore. Remove file %q and restart KCP", s.KubeConfigPath, s.ShardAdminTokenHashFilePath) + return fmt.Errorf("admin token in file %q is not valid anymore. Remove file %q and restart kcp", s.KubeConfigPath, s.ShardAdminTokenHashFilePath) } shardAdminToken = shardAdminAuth.Token diff --git a/pkg/server/options/options.go b/pkg/server/options/options.go index 17fd322c3cb..85882fdcd9c 100644 --- a/pkg/server/options/options.go +++ b/pkg/server/options/options.go @@ -147,14 +147,14 @@ func (o *Options) AddFlags(fss *cliflag.NamedFlagSets) { etcdServers.Usage += " By default an embedded etcd server is started." o.EmbeddedEtcd.AddFlags(fss.FlagSet("Embedded etcd")) - o.Controllers.AddFlags(fss.FlagSet("KCP Controllers")) - o.Authorization.AddFlags(fss.FlagSet("KCP Authorization")) - o.AdminAuthentication.AddFlags(fss.FlagSet("KCP Authentication")) - o.Virtual.AddFlags(fss.FlagSet("KCP Virtual Workspaces")) - o.HomeWorkspaces.AddFlags(fss.FlagSet("KCP Home Workspaces")) - o.Cache.AddFlags(fss.FlagSet("KCP Cache Server")) - - fs := fss.FlagSet("KCP") + o.Controllers.AddFlags(fss.FlagSet("kcp Controllers")) + o.Authorization.AddFlags(fss.FlagSet("kcp Authorization")) + o.AdminAuthentication.AddFlags(fss.FlagSet("kcp Authentication")) + o.Virtual.AddFlags(fss.FlagSet("kcp Virtual Workspaces")) + o.HomeWorkspaces.AddFlags(fss.FlagSet("kcp Home Workspaces")) + o.Cache.AddFlags(fss.FlagSet("kcp Cache Server")) + + fs := fss.FlagSet("kcp") fs.StringVar(&o.Extra.ProfilerAddress, "profiler-address", o.Extra.ProfilerAddress, "[Address]:port to bind the profiler to") fs.StringVar(&o.Extra.ShardKubeconfigFile, "shard-kubeconfig-file", o.Extra.ShardKubeconfigFile, "Kubeconfig holding admin(!) credentials to peer kcp shards.") fs.StringVar(&o.Extra.RootShardKubeconfigFile, "root-shard-kubeconfig-file", o.Extra.RootShardKubeconfigFile, "Kubeconfig holding admin(!) credentials to the root kcp shard.") diff --git a/pkg/virtual/apiexport/schemas/builtin/builtin.go b/pkg/virtual/apiexport/schemas/builtin/builtin.go index 014d79703fe..4c6fae595bd 100644 --- a/pkg/virtual/apiexport/schemas/builtin/builtin.go +++ b/pkg/virtual/apiexport/schemas/builtin/builtin.go @@ -48,7 +48,7 @@ func init() { schemes := []*runtime.Scheme{kcpscheme.Scheme} openAPIDefinitionsGetters := []common.GetOpenAPIDefinitions{ generatedopenapi.GetOpenAPIDefinitions, // core types - generatedkcpopenapi.GetOpenAPIDefinitions, // KCP core types for LogicalCluster + generatedkcpopenapi.GetOpenAPIDefinitions, // kcp core types for LogicalCluster } apis, err := internalapis.CreateAPIResourceSchemas(schemes, openAPIDefinitionsGetters, BuiltInAPIs...) diff --git a/pkg/virtual/framework/doc.go b/pkg/virtual/framework/doc.go index d17ab5728bc..94e8f0c9be2 100644 --- a/pkg/virtual/framework/doc.go +++ b/pkg/virtual/framework/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // Package framework provides a the required interfaces, structs and generic implementation -// that allow creating KCP virtual workspaces with a minimal amount of work. +// that allow creating kcp virtual workspaces with a minimal amount of work. // // To create virtual workspaces you have to: // diff --git a/pkg/virtual/framework/fixedgvs/apiserver/apiserver.go b/pkg/virtual/framework/fixedgvs/apiserver/apiserver.go index ba5678261be..a3a2ba63ecd 100644 --- a/pkg/virtual/framework/fixedgvs/apiserver/apiserver.go +++ b/pkg/virtual/framework/fixedgvs/apiserver/apiserver.go @@ -87,7 +87,7 @@ func (c completedConfig) New(virtualWorkspaceName string, groupManager discovery return } if vwName == virtualWorkspaceName { - // In the current KCP Kubernetes feature branch, some components (e.g.Discovery index) + // In the current kcp Kubernetes feature branch, some components (e.g.Discovery index) // don't support calls without a cluster set in the request context. // That's why we add a dummy cluster name here. // However we don't add it for the OpenAPI v2 endpoint since, on the contrary, diff --git a/pkg/virtual/framework/fixedgvs/register.go b/pkg/virtual/framework/fixedgvs/register.go index 1e5429773d5..a44039921bc 100644 --- a/pkg/virtual/framework/fixedgvs/register.go +++ b/pkg/virtual/framework/fixedgvs/register.go @@ -63,7 +63,7 @@ func (vw *FixedGroupVersionsVirtualWorkspace) Register(vwName string, rootAPISer if groupVersionAPISet.OpenAPIDefinitions != nil { cfg.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(groupVersionAPISet.OpenAPIDefinitions, openapi.NewDefinitionNamer(scheme)) - cfg.GenericConfig.OpenAPIConfig.Info.Title = "KCP Virtual Workspace for " + vwName + cfg.GenericConfig.OpenAPIConfig.Info.Title = "kcp Virtual Workspace for " + vwName cfg.GenericConfig.SkipOpenAPIInstallation = true } diff --git a/pkg/virtual/framework/forwardingregistry/doc.go b/pkg/virtual/framework/forwardingregistry/doc.go index 561c5981926..d93c518f78f 100644 --- a/pkg/virtual/framework/forwardingregistry/doc.go +++ b/pkg/virtual/framework/forwardingregistry/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // Package forwardingregistry provides a CRD-like REST storage implementation that can dynamically serve resources based -// on a given OpenAPI schema, and forward the requests to a KCP workspace-aware delegate client. +// on a given OpenAPI schema, and forward the requests to a kcp workspace-aware delegate client. // // It reuses as much as possible from k8s.io/apiextensions-apiserver/pkg/registry/customresource, but // replaces the underlying Store, using forwarding rather than access to etcd via genericregistry.Store. diff --git a/staging/src/github.com/kcp-dev/apimachinery/README.md b/staging/src/github.com/kcp-dev/apimachinery/README.md index 2ded71633b3..e4c21013a2e 100644 --- a/staging/src/github.com/kcp-dev/apimachinery/README.md +++ b/staging/src/github.com/kcp-dev/apimachinery/README.md @@ -1,10 +1,10 @@ -> ⚠️ **This is an automatically published staged repository for kcp**. -> Contributions, including issues and pull requests, should be made to the main kcp repository: [https://github.com/kcp-dev/kcp](https://github.com/kcp-dev/kcp). -> This repository is read-only for importing, and not used for direct contributions. +> ⚠️ **This is an automatically published staged repository for kcp**. +> Contributions, including issues and pull requests, should be made to the main kcp repository: [https://github.com/kcp-dev/kcp](https://github.com/kcp-dev/kcp). +> This repository is read-only for importing, and not used for direct contributions. > See the [monorepo structure document](https://docs.kcp.io/kcp/main/contributing/monorepo/) for more details. # apimachinery API machinery library for logical-cluster-aware code -For contributions, issues, or general discussion, please see the main KCP repository https://github.com/kcp-dev/kcp. +For contributions, issues, or general discussion, please see the main kcp repository https://github.com/kcp-dev/kcp. diff --git a/staging/src/github.com/kcp-dev/apimachinery/third_party/informers/shared_informer.go b/staging/src/github.com/kcp-dev/apimachinery/third_party/informers/shared_informer.go index bbc61c16f2b..e93fc7d9404 100644 --- a/staging/src/github.com/kcp-dev/apimachinery/third_party/informers/shared_informer.go +++ b/staging/src/github.com/kcp-dev/apimachinery/third_party/informers/shared_informer.go @@ -88,7 +88,7 @@ func NewSharedIndexInformerWithOptions(lw cache.ListerWatcher, exampleObject run realClock := &clock.RealClock{} return &sharedIndexInformer{ - // KCP modification: We changed the keyfunction passed to NewIndexer + // kcp modification: We changed the keyfunction passed to NewIndexer indexer: cache.NewIndexer(kcpcache.MetaClusterNamespaceKeyFunc, options.Indexers), processor: &sharedProcessor{clock: realClock}, listerWatcher: lw, @@ -266,7 +266,7 @@ func (s *sharedIndexInformer) RunWithContext(ctx context.Context) { KnownObjects: s.indexer, EmitDeltaTypeReplaced: true, Transformer: s.transform, - // KCP modification: We changed the keyfunction passed to NewDeltaFIFOWithOptions + // kcp modification: We changed the keyfunction passed to NewDeltaFIFOWithOptions KeyFunction: kcpcache.MetaClusterNamespaceKeyFunc, }) } @@ -284,7 +284,7 @@ func (s *sharedIndexInformer) RunWithContext(ctx context.Context) { } s.controller = cache.New(cfg) - // KCP modification: we removed setting the s.controller.clock here as it's an unexported field we can't access + // kcp modification: we removed setting the s.controller.clock here as it's an unexported field we can't access s.started = true }() @@ -836,7 +836,7 @@ func (p *processorListener) setResyncPeriod(resyncPeriod time.Duration) { // Multiplexes updates in the form of a list of Deltas into a Store, and informs // a given handler of events OnUpdate, OnAdd, OnDelete // taken from k8s.io/client-go/tools/cache/controller.go -// KCP modification: we added this function from controller.go +// kcp modification: we added this function from controller.go func processDeltas( // Object which receives event notifications from the given deltas handler cache.ResourceEventHandler, diff --git a/staging/src/github.com/kcp-dev/code-generator/README.md b/staging/src/github.com/kcp-dev/code-generator/README.md index ccdabd9e399..a1bc673f89a 100644 --- a/staging/src/github.com/kcp-dev/code-generator/README.md +++ b/staging/src/github.com/kcp-dev/code-generator/README.md @@ -1,9 +1,9 @@ -> ⚠️ **This is an automatically published staged repository for kcp**. -> Contributions, including issues and pull requests, should be made to the main kcp repository: [https://github.com/kcp-dev/kcp](https://github.com/kcp-dev/kcp). -> This repository is read-only for importing, and not used for direct contributions. +> ⚠️ **This is an automatically published staged repository for kcp**. +> Contributions, including issues and pull requests, should be made to the main kcp repository: [https://github.com/kcp-dev/kcp](https://github.com/kcp-dev/kcp). +> This repository is read-only for importing, and not used for direct contributions. > See the [monorepo structure document](https://docs.kcp.io/kcp/main/contributing/monorepo/) for more details. -## Code Generators for KCP-aware clients, informers and listers +## Code Generators for kcp-aware clients, informers and listers This repository contains code generation tools analogous to the Kubernetes code-generator. It contains: diff --git a/staging/src/github.com/kcp-dev/sdk/apis/apis/v1alpha1/types_apiexport.go b/staging/src/github.com/kcp-dev/sdk/apis/apis/v1alpha1/types_apiexport.go index 7bfa572340c..fd5f42bb35b 100644 --- a/staging/src/github.com/kcp-dev/sdk/apis/apis/v1alpha1/types_apiexport.go +++ b/staging/src/github.com/kcp-dev/sdk/apis/apis/v1alpha1/types_apiexport.go @@ -222,7 +222,7 @@ type PermissionClaim struct { // This is the identity for a given APIExport that the APIResourceSchema belongs to. // The hash can be found on APIExport and APIResourceSchema's status. // It will be empty for core types. - // Note that one must look this up for a particular KCP instance. + // Note that one must look this up for a particular kcp instance. // +optional IdentityHash string `json:"identityHash,omitempty"` } diff --git a/staging/src/github.com/kcp-dev/sdk/apis/apis/v1alpha2/types_apiexport.go b/staging/src/github.com/kcp-dev/sdk/apis/apis/v1alpha2/types_apiexport.go index 9ae42b6efa8..da42e3eb322 100644 --- a/staging/src/github.com/kcp-dev/sdk/apis/apis/v1alpha2/types_apiexport.go +++ b/staging/src/github.com/kcp-dev/sdk/apis/apis/v1alpha2/types_apiexport.go @@ -248,7 +248,7 @@ type PermissionClaim struct { // This is the identity for a given APIExport that the APIResourceSchema belongs to. // The hash can be found on APIExport and APIResourceSchema's status. // It will be empty for core types. - // Note that one must look this up for a particular KCP instance. + // Note that one must look this up for a particular kcp instance. // // +kubebuilder:default:="" // +optional diff --git a/staging/src/github.com/kcp-dev/sdk/apis/core/v1alpha1/shard_types.go b/staging/src/github.com/kcp-dev/sdk/apis/core/v1alpha1/shard_types.go index 9a461ab9483..1eda62fc822 100644 --- a/staging/src/github.com/kcp-dev/sdk/apis/core/v1alpha1/shard_types.go +++ b/staging/src/github.com/kcp-dev/sdk/apis/core/v1alpha1/shard_types.go @@ -64,7 +64,7 @@ var _ conditions.Setter = &Shard{} // ShardSpec holds the desired state of the Shard. type ShardSpec struct { - // baseURL is the address of the KCP shard for direct connections, e.g. by some + // baseURL is the address of the kcp shard for direct connections, e.g. by some // front-proxy doing the fan-out to the shards. // // +required diff --git a/staging/src/github.com/kcp-dev/sdk/testing/doc.go b/staging/src/github.com/kcp-dev/sdk/testing/doc.go index 4ed15f71c21..f63f3ffd72c 100644 --- a/staging/src/github.com/kcp-dev/sdk/testing/doc.go +++ b/staging/src/github.com/kcp-dev/sdk/testing/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package testing provides utilities for testing of and against KCP. This code -// to be used outside of the KCP repository is experimental and subject to change. +// Package testing provides utilities for testing of and against kcp. This code +// to be used outside of the kcp repository is experimental and subject to change. package testing diff --git a/staging/src/github.com/kcp-dev/sdk/testing/server/fixture.go b/staging/src/github.com/kcp-dev/sdk/testing/server/fixture.go index af02ba5b4dc..b4bdd35d00c 100644 --- a/staging/src/github.com/kcp-dev/sdk/testing/server/fixture.go +++ b/staging/src/github.com/kcp-dev/sdk/testing/server/fixture.go @@ -302,7 +302,7 @@ func runExternal(ctx context.Context, t TestingT, cfg Config) (<-chan struct{}, return nil, fmt.Errorf("failed to build kcp args: %w", err) } - workdir, commandLine := StartKcpCommand("KCP") + workdir, commandLine := StartKcpCommand("kcp") commandLine = append(commandLine, args...) t.Logf("running: %v", strings.Join(commandLine, " ")) diff --git a/test/e2e/apibinding/cross_workspace_auth_test.go b/test/e2e/apibinding/cross_workspace_auth_test.go index 647eb1835b4..215ed136c03 100644 --- a/test/e2e/apibinding/cross_workspace_auth_test.go +++ b/test/e2e/apibinding/cross_workspace_auth_test.go @@ -228,7 +228,7 @@ func TestWorkspaceAuth(t *testing.T) { // // This test is functionally equivalent to TestWorkspaceAuth, but uses // a Service Account instead of a "real" user authenticated via OIDC. -// Background is that in KCP Service Accounts are sometimes handled +// Background is that in kcp Service Accounts are sometimes handled // differently to users and go through a different code path when // effective users are computed. func TestServiceAccount(t *testing.T) { @@ -393,7 +393,7 @@ func TestServiceAccount(t *testing.T) { // TestScopedUser tests that a user restricted to the consumer workspace // by scopes can bind an APIExport from the provider workspace. -// 1. The user is a global KCP user but is restricted by scopes to +// 1. The user is a global kcp user but is restricted by scopes to // the consumer workspace // 2. The user can manage APIBindings in the consumer workspace // 3. The provider workspace allows the binding of APIExports for users @@ -582,7 +582,7 @@ func TestScopedUser(t *testing.T) { // TestUserWithWarrants tests that a user with a warrant for another // user in the workspace can bind an APIExport from the provider // workspace. -// 1. The user is a global KCP user but can act as another user +// 1. The user is a global kcp user but can act as another user // in consumer workspace through a warrant. // 2. The user can manage APIBindings in the consumer workspace // 3. The provider workspace allows the binding of APIExports for users diff --git a/test/e2e/reconciler/cache/replication_test.go b/test/e2e/reconciler/cache/replication_test.go index 6b04c63be9a..ca6932b60b5 100644 --- a/test/e2e/reconciler/cache/replication_test.go +++ b/test/e2e/reconciler/cache/replication_test.go @@ -712,7 +712,7 @@ func (b *replicateResourceScenario) verifyResourceReplicationHelper(ctx context. unstructured.RemoveNestedField(cachedResource.Object, "metadata", "resourceVersion") // TODO(davidfestal): find out why the generation is not equal, specially for rbacv1. - // Is it a characteristic of all built-in KCP resources (which are not backed by CRDs) ? + // Is it a characteristic of all built-in kcp resources (which are not backed by CRDs) ? // Issue opened: https://github.com/kcp-dev/kcp/issues/2935 if b.gvr.Group == rbacv1.SchemeGroupVersion.Group { unstructured.RemoveNestedField(originalResource.Object, "metadata", "generation") diff --git a/test/integration/framework/leak_test.go b/test/integration/framework/leak_test.go index e796a3c251c..652dfdeaf2d 100644 --- a/test/integration/framework/leak_test.go +++ b/test/integration/framework/leak_test.go @@ -24,7 +24,7 @@ import ( ) var ( - // knownGoroutineLeaks are leaks from just running and stopping KCP + // knownGoroutineLeaks are leaks from just running and stopping kcp // collected and run through: // grep 'on top of the stack' output.log | cut -d, -f2- | cut -d' ' -f3 | sort | uniq knownGoroutineLeaks = []goleak.Option{ diff --git a/test/integration/framework/server.go b/test/integration/framework/server.go index 7c58c7fda59..f3ae4a1d09e 100644 --- a/test/integration/framework/server.go +++ b/test/integration/framework/server.go @@ -282,7 +282,7 @@ func (s *InProcessServer) CADirectory() string { return s.Config.DataDir } -// StartTestServer starts a KCP server for testing purposes. +// StartTestServer starts a kcp server for testing purposes. func StartTestServer(t kcptestingserver.TestingT, opts ...kcptestingserver.Option) (*InProcessServer, kcpclientset.ClusterInterface, kcpkubernetesclientset.ClusterInterface) { t.Helper() diff --git a/test/integration/workspace/leak_test.go b/test/integration/workspace/leak_test.go index 986dee6f492..2a5577cfe6a 100644 --- a/test/integration/workspace/leak_test.go +++ b/test/integration/workspace/leak_test.go @@ -90,7 +90,7 @@ func createAndDeleteWs(ctx context.Context, t *testing.T, kcpClient kcpclientset } var ( - // These are goroutines that can pop up randomly during KCP + // These are goroutines that can pop up randomly during kcp // operations and aren't indicative of a leak when deleting // a workspace. randomGoroutineSources = []goleak.Option{