Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/images.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ jobs:
# prepare .ko.yaml to inject build settings into all images
entrypoints=(
./cmd/sharder
./cmd/shard
./cmd/checksum-controller
./webhosting-operator/cmd/experiment
./webhosting-operator/cmd/webhosting-operator
)
Expand Down
8 changes: 4 additions & 4 deletions .run/shard (kind).run.xml
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="shard (kind)" type="GoApplicationRunConfiguration" factoryName="Go Application">
<configuration default="false" name="checksum-controller (kind)" type="GoApplicationRunConfiguration" factoryName="Go Application">
<module name="kubernetes-controller-sharding" />
<working_directory value="$PROJECT_DIR$" />
<parameters value="--zap-log-level=debug --shard=shard-host --lease-namespace=default" />
<parameters value="--zap-log-level=debug --shard-name=checksum-controller-host --lease-namespace=default" />
<envs>
<env name="KUBECONFIG" value="$PROJECT_DIR$/hack/kind_kubeconfig.yaml" />
</envs>
<kind value="PACKAGE" />
<package value="github.com/timebertt/kubernetes-controller-sharding/cmd/shard" />
<package value="github.com/timebertt/kubernetes-controller-sharding/cmd/checksum-controller" />
<directory value="$PROJECT_DIR$" />
<filePath value="$PROJECT_DIR$/webhosting-operator/cmd/experiment/main.go" />
<method v="2" />
</configuration>
</component>
</component>
16 changes: 8 additions & 8 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
TAG ?= latest
GHCR_REPO ?= ghcr.io/timebertt/kubernetes-controller-sharding
SHARDER_IMG ?= $(GHCR_REPO)/sharder:$(TAG)
SHARD_IMG ?= $(GHCR_REPO)/shard:$(TAG)
CHECKSUM_CONTROLLER_IMG ?= $(GHCR_REPO)/checksum-controller:$(TAG)
WEBHOSTING_OPERATOR_IMG ?= $(GHCR_REPO)/webhosting-operator:$(TAG)
EXPERIMENT_IMG ?= $(GHCR_REPO)/experiment:$(TAG)

Expand Down Expand Up @@ -142,20 +142,20 @@ run: $(KUBECTL) generate-fast ## Run the sharder from your host and deploy prere
$(KUBECTL) apply --server-side --force-conflicts -k hack/config/certificates/host
go run ./cmd/sharder --config=hack/config/sharder/host/config.yaml --zap-log-level=debug

SHARD_NAME ?= shard-$(shell tr -dc bcdfghjklmnpqrstvwxz2456789 </dev/urandom | head -c 8)
SHARD_NAME ?= checksum-controller-$(shell tr -dc bcdfghjklmnpqrstvwxz2456789 </dev/urandom | head -c 8)

.PHONY: run-shard
run-shard: $(KUBECTL) ## Run a shard from your host and deploy prerequisites.
$(KUBECTL) apply --server-side --force-conflicts -k hack/config/shard/controllerring
go run ./cmd/shard --shard=$(SHARD_NAME) --lease-namespace=default --zap-log-level=debug
.PHONY: run-checksum-controller
run-checksum-controller: $(KUBECTL) ## Run checksum-controller from your host and deploy prerequisites.
$(KUBECTL) apply --server-side --force-conflicts -k hack/config/checksum-controller/controllerring
go run ./cmd/checksum-controller --shard-name=$(SHARD_NAME) --lease-namespace=default --zap-log-level=debug

PUSH ?= false
images: export KO_DOCKER_REPO = $(GHCR_REPO)

.PHONY: images
images: $(KO) ## Build and push container images using ko.
$(KO) build --push=$(PUSH) --sbom none --base-import-paths -t $(TAG) --platform linux/amd64,linux/arm64 \
./cmd/sharder ./cmd/shard ./webhosting-operator/cmd/webhosting-operator
./cmd/sharder ./cmd/checksum-controller ./webhosting-operator/cmd/webhosting-operator

##@ Deployment

Expand All @@ -182,7 +182,7 @@ up dev: export SKAFFOLD_TAIL ?= true

.PHONY: deploy
deploy: $(SKAFFOLD) $(KUBECTL) $(YQ) ## Build all images and deploy everything to K8s cluster specified in $KUBECONFIG.
$(SKAFFOLD) deploy -i $(SHARDER_IMG) -i $(SHARD_IMG) -i $(WEBHOSTING_OPERATOR_IMG) -i $(EXPERIMENT_IMG)
$(SKAFFOLD) deploy -i $(SHARDER_IMG) -i $(CHECKSUM_CONTROLLER_IMG) -i $(WEBHOSTING_OPERATOR_IMG) -i $(EXPERIMENT_IMG)

.PHONY: up
up: $(SKAFFOLD) $(KUBECTL) $(YQ) ## Build all images, deploy everything to K8s cluster specified in $KUBECONFIG, start port-forward and tail logs.
Expand Down
21 changes: 10 additions & 11 deletions cmd/shard/main.go → cmd/checksum-controller/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,13 @@ func main() {
opts := newOptions()

cmd := &cobra.Command{
Use: "shard",
Short: "Run an example shard",
Long: `The shard command runs an example shard that fulfills the requirements of a controller that supports sharding.
Use: "checksum-controller",
Short: "Run an example sharded controller",
Long: `The checksum-controller is an example for implementing the controller requirements for sharding.
For this, it creates a shard Lease object and renews it periodically.
It also starts a controller for ConfigMaps that are assigned to the shard and handles the drain operation as expected.
It also starts a controller for Secrets that are assigned to the shard and handles the drain operation as expected.
See https://github.com/timebertt/kubernetes-controller-sharding/blob/main/docs/implement-sharding.md for more details.
This is basically a lightweight example controller which is useful for developing the sharding components without actually
running a full controller that complies with the sharding requirements.`,
This example sharded controller is also useful for developing the sharding components.`,

Args: cobra.NoArgs,
SilenceErrors: true,
Expand Down Expand Up @@ -92,14 +91,14 @@ func newOptions() *options {
TimeEncoder: zapcore.ISO8601TimeEncoder,
},

controllerRingName: "example",
controllerRingName: "checksum-controller",
}
}

func (o *options) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.controllerRingName, "controllerring", o.controllerRingName, "Name of the ControllerRing the shard belongs to.")
fs.StringVar(&o.leaseNamespace, "lease-namespace", o.leaseNamespace, "Namespace to use for the shard lease. Defaults to the pod's namespace if running in-cluster.")
fs.StringVar(&o.shardName, "shard", o.shardName, "Name of the shard. Defaults to the instance's hostname.")
fs.StringVar(&o.shardName, "shard-name", o.shardName, "Name of the shard. Defaults to the instance's hostname.")

zapFlagSet := flag.NewFlagSet("zap", flag.ContinueOnError)
o.zapOptions.BindFlags(zapFlagSet)
Expand Down Expand Up @@ -154,11 +153,11 @@ func (o *options) run(ctx context.Context) error {

// FILTERED WATCH CACHE
Cache: cache.Options{
// This shard only acts on objects in the default namespace.
// This controller only acts on objects in the default namespace.
DefaultNamespaces: map[string]cache.Config{metav1.NamespaceDefault: {}},
// Configure cache to only watch objects that are assigned to this shard.
// This shard only watches sharded objects, so we can configure the label selector on the cache's global level.
// If your shard watches sharded objects as well as non-sharded objects, use cache.Options.ByObject to configure
// This controller only watches sharded objects, so we can configure the label selector on the cache's global level.
// If your controller watches sharded objects as well as non-sharded objects, use cache.Options.ByObject to configure
// the label selector on object level.
DefaultLabelSelector: labels.SelectorFromSet(labels.Set{
shardingv1alpha1.LabelShard(o.controllerRingName): shardLease.Identity(),
Expand Down
File renamed without changes.
30 changes: 15 additions & 15 deletions docs/development.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ Note that you might need to push images to a remote registry though.
The development setup reuses the deployment manifests of the main sharding components developed in this repository, located in [`config`](../config).
See [Install the Sharding Components](installation.md).

It also includes the [example shard](../pkg/shard) (see [Implement Sharding in Your Controller](implement-sharding.md)) and the [webhosting-operator](../webhosting-operator/README.md) (see [Evaluating the Sharding Mechanism](evaluation.md)).
It also includes the [checksum-controller](../cmd/checksum-controller) as an example sharded controller (see [Implement Sharding in Your Controller](implement-sharding.md)) and the [webhosting-operator](../webhosting-operator/README.md) (see [Evaluating the Sharding Mechanism](evaluation.md)).

Apart from this, the development setup also includes some external components, located in [`hack/config`](../hack/config).
This includes [cert-manager](https://cert-manager.io/), [ingress-nginx](https://kubernetes.github.io/ingress-nginx/), [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus), [kyverno](https://kyverno.io/), and [parca](https://parca.dev/).
Expand Down Expand Up @@ -83,29 +83,29 @@ Assuming a fresh kind cluster:
make run
```

Now, create the `example` `ControllerRing` and run a local shard:
Now, create the `ControllerRing` and run a local `checksum-controller`:

```bash
make run-shard
make run-checksum-controller
```

You should see that the shard successfully announced itself to the sharder:

```bash
$ kubectl get lease -L alpha.sharding.timebertt.dev/controllerring,alpha.sharding.timebertt.dev/state
NAME HOLDER AGE CONTROLLERRING STATE
shard-5pv57c6c shard-5pv57c6c 18s example ready
NAME HOLDER AGE CONTROLLERRING STATE
checksum-controller-lhrlt6h4 checksum-controller-lhrlt6h4 6s checksum-controller ready

$ kubectl get controllerring
NAME READY AVAILABLE SHARDS AGE
example True 1 1 34s
NAME READY AVAILABLE SHARDS AGE
checksum-controller True 1 1 13s
```

Running the shard locally gives you the option to test non-graceful termination, i.e., a scenario where the shard fails to renew its lease in time.
Running the `checksum-controller` locally gives you the option to test non-graceful termination, i.e., a scenario where the shard fails to renew its lease in time.
Simply press `Ctrl-C` twice:

```bash
make run-shard
make run-checksum-controller
...
^C2023-11-24T15:16:50.948+0100 INFO Shutting down gracefully in 2 seconds, send another SIGINT or SIGTERM to shutdown non-gracefully
^Cexit status 1
Expand All @@ -114,18 +114,18 @@ make run-shard
## Testing the Sharding Setup

Independent of the used setup (skaffold-based or running on the host machine), you should be able to create sharded `Secrets` in the `default` namespace as configured in the `example` `ControllerRing`.
The `ConfigMaps` created by the example shard controller should be assigned to the same shard as the owning `Secret`:
The `ConfigMaps` created by the `checksum-controller` should be assigned to the same shard as the owning `Secret`:

```bash
$ kubectl create secret generic foo --from-literal foo=bar
secret/foo created

$ kubectl get cm,secret -L shard.alpha.sharding.timebertt.dev/example
NAME DATA AGE EXAMPLE
configmap/checksums-foo 1 1s shard-5pv57c6c
$ kubectl get cm,secret -L shard.alpha.sharding.timebertt.dev/checksum-controller
NAME DATA AGE CHECKSUM-CONTROLLER
configmap/checksums-foo 1 1s checksum-controller-lhrlt6h4

NAME TYPE DATA AGE EXAMPLE
secret/foo Opaque 1 1s shard-5pv57c6c
NAME TYPE DATA AGE CHECKSUM-CONTROLLER
secret/foo Opaque 1 1s checksum-controller-lhrlt6h4
```

## Monitoring
Expand Down
Loading
Loading